repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pombredanne/Rusthon
|
regtests/test-rust.py
|
4
|
1223
|
import os, sys, subprocess
passed = {}
ignore = ()
TODO_FIX = (
'ffi_hello.py', ## libc is unstable as of Rust1.2
'pointer_syntax.py',
'rust_select.py',
'try.py',
)
files = os.listdir('./rust')
files.sort()
for md in files:
if md in TODO_FIX:
print 'skip test: %s (TODO fix later)' %md
continue
elif not md.endswith('.py'):
continue
print md
if md.startswith( ignore ):
continue
subprocess.check_call([
'python',
'../rusthon.py',
'--rust',
os.path.join('./rust', md)
])
passed[ md ] = open('/tmp/rusthon-build.rs').read().split('/*end-builtins*/')[-1]
report = [
'Rust Backend Regression Tests',
'-----------------------------',
'the following tests compiled, and the binary executed without any errors',
]
print 'TESTS PASSED:'
for md in passed:
print md
report.append('* [%s](rust/%s)' %(md,md))
report.append('')
report.append('input:')
report.append('------')
report.append('```python')
report.extend( open('./rust/'+md, 'rb').read().splitlines() )
report.append('```')
report.append('output:')
report.append('------')
report.append('```rust')
report.extend( passed[md].splitlines() )
report.append('```')
open('regtest-report-rust.md', 'wb').write('\n'.join(report))
|
bsd-3-clause
|
CryptArc/bitcoin
|
test/functional/interface_rpc.py
|
5
|
2791
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
import os
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args):
try:
fcn(*args)
raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code)
except JSONRPCException as exc:
assert_equal(exc.error["code"], expected_rpc_code)
assert_equal(exc.http_status, expected_http_status)
class RPCInterfaceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, 'regtest', 'debug.log'))
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getbestblockhash", "id": 3},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def test_http_status_codes(self):
self.log.info("Testing HTTP status codes for JSON-RPC requests...")
expect_http_status(404, -32601, self.nodes[0].invalidmethod)
expect_http_status(500, -8, self.nodes[0].getblockhash, 42)
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
self.test_http_status_codes()
if __name__ == '__main__':
RPCInterfaceTest().main()
|
mit
|
roadmapper/ansible
|
lib/ansible/modules/windows/win_chocolatey_source.py
|
38
|
3691
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_chocolatey_source
version_added: '2.7'
short_description: Manages Chocolatey sources
description:
- Used to managed Chocolatey sources configured on the client.
- Requires Chocolatey to be already installed on the remote host.
options:
admin_only:
description:
- Makes the source visible to Administrators only.
- Requires Chocolatey >= 0.10.8.
- When creating a new source, this defaults to C(no).
type: bool
allow_self_service:
description:
- Allow the source to be used with self-service
- Requires Chocolatey >= 0.10.4.
- When creating a new source, this defaults to C(no).
type: bool
bypass_proxy:
description:
- Bypass the proxy when using this source.
- Requires Chocolatey >= 0.10.4.
- When creating a new source, this defaults to C(no).
type: bool
certificate:
description:
- The path to a .pfx file to use for X509 authenticated feeds.
- Requires Chocolatey >= 0.9.10.
type: str
certificate_password:
description:
- The password for I(certificate) if required.
- Requires Chocolatey >= 0.9.10.
name:
description:
- The name of the source to configure.
required: yes
priority:
description:
- The priority order of this source compared to other sources, lower is
better.
- All priorities above C(0) will be evaluated first, then zero-based values
will be evaluated in config file order.
- Requires Chocolatey >= 0.9.9.9.
- When creating a new source, this defaults to C(0).
type: int
source:
description:
- The file/folder/url of the source.
- Required when I(state) is C(present) or C(disabled) and the source does
not already exist.
source_username:
description:
- The username used to access I(source).
source_password:
description:
- The password for I(source_username).
- Required if I(source_username) is set.
state:
description:
- When C(absent), will remove the source.
- When C(disabled), will ensure the source exists but is disabled.
- When C(present), will ensure the source exists and is enabled.
choices:
- absent
- disabled
- present
default: present
update_password:
description:
- When C(always), the module will always set the password and report a
change if I(certificate_password) or I(source_password) is set.
- When C(on_create), the module will only set the password if the source
is being created.
choices:
- always
- on_create
default: always
seealso:
- module: win_chocolatey
- module: win_chocolatey_config
- module: win_chocolatey_facts
- module: win_chocolatey_feature
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: Remove the default public source
win_chocolatey_source:
name: chocolatey
state: absent
- name: Add new internal source
win_chocolatey_source:
name: internal repo
state: present
source: http://chocolatey-server/chocolatey
- name: Create HTTP source with credentials
win_chocolatey_source:
name: internal repo
state: present
source: https://chocolatey-server/chocolatey
source_username: username
source_password: password
- name: Disable Chocolatey source
win_chocolatey_source:
name: chocolatey
state: disabled
'''
RETURN = r'''
'''
|
gpl-3.0
|
arokem/nipy
|
examples/formula/parametric_design.py
|
5
|
1927
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
In this example, we create a regression model for an event-related design in
which the response to an event at time T[i] is modeled as depending on the
amount of time since the last stimulus T[i-1]
"""
import numpy as np
import sympy
from nipy.algorithms.statistics.api import Formula, make_recarray
from nipy.modalities.fmri import utils, hrf
# Inter-stimulus intervals (time between events)
dt = np.random.uniform(low=0, high=2.5, size=(50,))
# Onset times from the ISIs
t = np.cumsum(dt)
# We're going to model the amplitudes ('a') by dt (the time between events)
a = sympy.Symbol('a')
linear = utils.define('linear', utils.events(t, dt, f=hrf.glover))
quadratic = utils.define('quad', utils.events(t, dt, f=hrf.glover, g=a**2))
cubic = utils.define('cubic', utils.events(t, dt, f=hrf.glover, g=a**3))
f1 = Formula([linear, quadratic, cubic])
# Evaluate this time-based formula at specific times to make the design matrix
tval = make_recarray(np.linspace(0,100, 1001), 't')
X1 = f1.design(tval, return_float=True)
# Now we make a model where the relationship of time between events and signal
# is an exponential with a time constant tau
l = sympy.Symbol('l')
exponential = utils.events(t, dt, f=hrf.glover, g=sympy.exp(-l*a))
f3 = Formula([exponential])
# Make a design matrix by passing in time and required parameters
params = make_recarray([(4.5, 3.5)], ('l', '_b0'))
X3 = f3.design(tval, params, return_float=True)
# the columns or d/d_b0 and d/dl
tt = tval.view(np.float)
v1 = np.sum([hrf.glovert(tt - s)*np.exp(-4.5*a) for s,a in zip(t, dt)], 0)
v2 = np.sum([-3.5*a*hrf.glovert(tt - s)*np.exp(-4.5*a) for s,a in zip(t, dt)], 0)
V = np.array([v1,v2]).T
W = V - np.dot(X3, np.dot(np.linalg.pinv(X3), V))
np.testing.assert_almost_equal((W**2).sum() / (V**2).sum(), 0)
|
bsd-3-clause
|
vishnumuthu/procamcalib_proj
|
doc/source/conf.py
|
16
|
8529
|
# -*- coding: utf-8 -*-
#
# rr-init documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 18 16:46:18 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rr-init'
copyright = u'2015, Author1, Author2, Author3'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v'
# The full version, including alpha/beta/rc tags.
release = 'r'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rr-initdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'rr-init.tex', u'rr-init Documentation',
u'Author1, Author2, Author3', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rr-init', u'rr-init Documentation',
[u'Author1, Author2, Author3'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rr-init', u'rr-init Documentation',
u'Author1, Author2, Author3', 'rr-init', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
cc0-1.0
|
ryfeus/lambda-packs
|
HDF4_H5_NETCDF/source2.7/numpy/core/tests/test_half.py
|
12
|
18605
|
from __future__ import division, absolute_import, print_function
import platform
import pytest
import numpy as np
from numpy import uint16, float16, float32, float64
from numpy.testing import assert_, assert_equal
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except FloatingPointError as exc:
assert_(str(exc).find(strmatch) >= 0,
"Did not raise floating point %s error" % strmatch)
else:
assert_(False,
"Did not raise floating point %s error" % strmatch)
class TestHalf(object):
def setup(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
self.all_f32 = np.array(self.all_f16, dtype=float32)
self.all_f64 = np.array(self.all_f16, dtype=float64)
# An array of all non-NaN float16 values, in sorted order
self.nonan_f16 = np.concatenate(
(np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
self.nonan_f16.dtype = float16
self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
# An array of all finite float16 values, in sorted order
self.finite_f16 = self.nonan_f16[1:-1]
self.finite_f32 = self.nonan_f32[1:-1]
self.finite_f64 = self.nonan_f64[1:-1]
def test_half_conversions(self):
"""Checks that all 16-bit values survive conversion
to/from 32-bit and 64-bit float"""
# Because the underlying routines preserve the NaN bits, every
# value is preserved when converting to/from other floats.
# Convert from float32 back to float16
b = np.array(self.all_f32, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert from float64 back to float16
b = np.array(self.all_f64, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert float16 to longdouble and back
# This doesn't necessarily preserve the extra NaN bits,
# so exclude NaNs.
a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
b = np.array(a_ld, dtype=float16)
assert_equal(self.nonan_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def test_half_values(self):
"""Confirms a small number of known half values"""
a = np.array([1.0, -1.0,
2.0, -2.0,
0.0999755859375, 0.333251953125, # 1/10, 1/3
65504, -65504, # Maximum magnitude
2.0**(-14), -2.0**(-14), # Minimum normal
2.0**(-24), -2.0**(-24), # Minimum subnormal
0, -1/1e1000, # Signed zeros
np.inf, -np.inf])
b = np.array([0x3c00, 0xbc00,
0x4000, 0xc000,
0x2e66, 0x3555,
0x7bff, 0xfbff,
0x0400, 0x8400,
0x0001, 0x8001,
0x0000, 0x8000,
0x7c00, 0xfc00], dtype=uint16)
b.dtype = float16
assert_equal(a, b)
def test_half_rounding(self):
"""Checks that rounding when converting to half is correct"""
a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
2.0**-25, # Underflows to zero (nearest even mode)
2.0**-26, # Underflows to zero
1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
1.0+2.0**-12, # rounds to 1.0
65519, # rounds to 65504
65520], # rounds to inf
dtype=float64)
rounded = [2.0**-24,
0.0,
0.0,
1.0+2.0**(-10),
1.0,
1.0,
65504,
np.inf]
# Check float64->float16 rounding
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
# Check float32->float16 rounding
a = np.array(a, dtype=float32)
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
def test_half_correctness(self):
"""Take every finite float16, and check the casting functions with
a manual conversion."""
# Create an array of all finite float16s
a_bits = self.finite_f16.view(dtype=uint16)
# Convert to 64-bit float manually
a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
a_man = (a_bits & 0x03ff) * 2.0**(-10)
# Implicit bit of normalized floats
a_man[a_exp != -15] += 1
# Denormalized exponent is -14
a_exp[a_exp == -15] = -14
a_manual = a_sgn * a_man * 2.0**a_exp
a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
if len(a32_fail) != 0:
bad_index = a32_fail[0]
assert_equal(self.finite_f32, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f32[bad_index],
a_manual[bad_index]))
a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
if len(a64_fail) != 0:
bad_index = a64_fail[0]
assert_equal(self.finite_f64, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f64[bad_index],
a_manual[bad_index]))
def test_half_ordering(self):
"""Make sure comparisons are working right"""
# All non-NaN float16 values in reverse order
a = self.nonan_f16[::-1].copy()
# 32-bit float copy
b = np.array(a, dtype=float32)
# Should sort the same
a.sort()
b.sort()
assert_equal(a, b)
# Comparisons should work
assert_((a[:-1] <= a[1:]).all())
assert_(not (a[:-1] > a[1:]).any())
assert_((a[1:] >= a[:-1]).all())
assert_(not (a[1:] < a[:-1]).any())
# All != except for +/-0
assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
def test_half_funcs(self):
"""Test the various ArrFuncs"""
# fill
assert_equal(np.arange(10, dtype=float16),
np.arange(10, dtype=float32))
# fillwithscalar
a = np.zeros((5,), dtype=float16)
a.fill(1)
assert_equal(a, np.ones((5,), dtype=float16))
# nonzero and copyswap
a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
assert_equal(a.nonzero()[0],
[2, 5, 6])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0],
[2, 5, 6])
# dot
a = np.arange(0, 10, 0.5, dtype=float16)
b = np.ones((20,), dtype=float16)
assert_equal(np.dot(a, b),
95)
# argmax
a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
4)
a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
5)
# getitem
a = np.arange(10, dtype=float16)
for i in range(10):
assert_equal(a.item(i), i)
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_half_ufuncs(self):
"""Test the various ufuncs"""
a = np.array([0, 1, 2, 4, 2], dtype=float16)
b = np.array([-2, 5, 1, 4, 3], dtype=float16)
c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
assert_equal(np.equal(a, b), [False, False, False, True, False])
assert_equal(np.not_equal(a, b), [True, True, True, False, True])
assert_equal(np.less(a, b), [False, True, False, False, True])
assert_equal(np.less_equal(a, b), [False, True, False, True, True])
assert_equal(np.greater(a, b), [True, False, True, False, False])
assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
assert_equal(np.logical_and(a, b), [False, True, True, True, True])
assert_equal(np.logical_or(a, b), [True, True, True, True, True])
assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
assert_equal(np.logical_not(a), [True, False, False, False, False])
assert_equal(np.isnan(c), [False, False, False, True, False])
assert_equal(np.isinf(c), [False, False, True, False, False])
assert_equal(np.isfinite(c), [True, True, False, False, True])
assert_equal(np.signbit(b), [True, False, False, False, False])
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
assert_equal(np.square(b), [4, 25, 1, 16, 9])
assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
assert_equal(np.conjugate(b), b)
assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
assert_equal(np.negative(b), [2, -5, -1, -4, -3])
assert_equal(np.positive(b), b)
assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def test_half_coercion(self):
"""Test that half gets coerced properly with the other types"""
a16 = np.array((1,), dtype=float16)
a32 = np.array((1,), dtype=float32)
b16 = float16(1)
b32 = float32(1)
assert_equal(np.power(a16, 2).dtype, float16)
assert_equal(np.power(a16, 2.0).dtype, float16)
assert_equal(np.power(a16, b16).dtype, float16)
assert_equal(np.power(a16, b32).dtype, float16)
assert_equal(np.power(a16, a16).dtype, float16)
assert_equal(np.power(a16, a32).dtype, float32)
assert_equal(np.power(b16, 2).dtype, float64)
assert_equal(np.power(b16, 2.0).dtype, float64)
assert_equal(np.power(b16, b16).dtype, float16)
assert_equal(np.power(b16, b32).dtype, float32)
assert_equal(np.power(b16, a16).dtype, float16)
assert_equal(np.power(b16, a32).dtype, float32)
assert_equal(np.power(a32, a16).dtype, float32)
assert_equal(np.power(a32, b16).dtype, float32)
assert_equal(np.power(b32, a16).dtype, float16)
assert_equal(np.power(b32, b16).dtype, float32)
@pytest.mark.skipif(platform.machine() == "armv5tel",
reason="See gh-413.")
def test_half_fpe(self):
with np.errstate(all='raise'):
sx16 = np.array((1e-4,), dtype=float16)
bx16 = np.array((1e4,), dtype=float16)
sy16 = float16(1e-4)
by16 = float16(1e4)
# Underflow errors
assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16)
assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16)
assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16)
assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16)
assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16)
assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16)
assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16)
assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16)
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(-2.**-14), float16(2**11))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14+2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(-2.**-14-2**-24), float16(2))
assert_raises_fpe('underflow', lambda a, b:a/b,
float16(2.**-14+2**-23), float16(4))
# Overflow errors
assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16)
assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16)
assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16)
assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16)
assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16)
assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16)
assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16)
assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16)
assert_raises_fpe('overflow', lambda a, b:a+b,
float16(65504), float16(17))
assert_raises_fpe('overflow', lambda a, b:a-b,
float16(-65504), float16(17))
assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf))
assert_raises_fpe('overflow', np.spacing, float16(65504))
# Invalid value errors
assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.inf))
assert_raises_fpe('invalid', np.spacing, float16(np.nan))
assert_raises_fpe('invalid', np.nextafter, float16(np.inf), float16(0))
assert_raises_fpe('invalid', np.nextafter, float16(-np.inf), float16(0))
assert_raises_fpe('invalid', np.nextafter, float16(0), float16(np.nan))
# These should not raise
float16(65472)+float16(32)
float16(2**-13)/float16(2)
float16(2**-14)/float16(2**10)
np.spacing(float16(-65504))
np.nextafter(float16(65504), float16(-np.inf))
np.nextafter(float16(-65504), float16(np.inf))
float16(2**-14)/float16(2**10)
float16(-2**-14)/float16(2**10)
float16(2**-14+2**-23)/float16(2)
float16(-2**-14-2**-23)/float16(2)
def test_half_array_interface(self):
"""Test that half is compatible with __array_interface__"""
class Dummy:
pass
a = np.ones((1,), dtype=float16)
b = Dummy()
b.__array_interface__ = a.__array_interface__
c = np.array(b)
assert_(c.dtype == float16)
assert_equal(a, c)
|
mit
|
ThinkOpen-Solutions/odoo
|
addons/base_gengo/__init__.py
|
377
|
1122
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Openerp sa (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_company
import ir_translation
import wizard
import controller
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Ernesto99/odoo
|
addons/report/tests/test_reports.py
|
385
|
2251
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
import openerp.tests
_logger = logging.getLogger(__name__)
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(True)
class TestReports(openerp.tests.TransactionCase):
def test_reports(self):
registry, cr, uid = self.registry, self.cr, self.uid
r_model = registry('ir.actions.report.xml')
domain = [('report_type', 'like', 'qweb')]
for r in r_model.browse(cr, uid, r_model.search(cr, uid, domain)):
report_model = 'report.%s' % r.report_name
try:
registry(report_model)
except KeyError:
# Only test the generic reports here
_logger.info("testing report %s", r.report_name)
report_model = registry(r.model)
report_model_ids = report_model.search(cr, uid, [], limit=10)
if not report_model_ids:
_logger.info("no record found skipping report %s", r.report_name)
if not r.multi:
report_model_ids = report_model_ids[:1]
# Test report generation
registry('report').get_html(cr, uid, report_model_ids, r.report_name)
else:
continue
|
agpl-3.0
|
pra85/calibre
|
src/calibre/gui2/dnd.py
|
4
|
10959
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import posixpath, os, urllib, re
from urlparse import urlparse, urlunparse
from threading import Thread
from Queue import Queue, Empty
from PyQt4.Qt import QPixmap, Qt, QDialog, QLabel, QVBoxLayout, \
QDialogButtonBox, QProgressBar, QTimer
from calibre.constants import DEBUG, iswindows
from calibre.ptempfile import PersistentTemporaryFile
from calibre import browser, as_unicode, prints
from calibre.gui2 import error_dialog
IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'gif', 'png', 'bmp']
class Worker(Thread): # {{{
def __init__(self, url, fpath, rq):
Thread.__init__(self)
self.url, self.fpath = url, fpath
self.daemon = True
self.rq = rq
self.err = self.tb = None
def run(self):
try:
br = browser()
br.retrieve(self.url, self.fpath, self.callback)
except Exception as e:
self.err = as_unicode(e)
import traceback
self.tb = traceback.format_exc()
def callback(self, a, b, c):
self.rq.put((a, b, c))
# }}}
class DownloadDialog(QDialog): # {{{
def __init__(self, url, fname, parent):
QDialog.__init__(self, parent)
self.setWindowTitle(_('Download %s')%fname)
self.l = QVBoxLayout(self)
self.purl = urlparse(url)
self.msg = QLabel(_('Downloading <b>%(fname)s</b> from %(url)s')%dict(
fname=fname, url=self.purl.netloc))
self.msg.setWordWrap(True)
self.l.addWidget(self.msg)
self.pb = QProgressBar(self)
self.pb.setMinimum(0)
self.pb.setMaximum(0)
self.l.addWidget(self.pb)
self.bb = QDialogButtonBox(QDialogButtonBox.Cancel, Qt.Horizontal, self)
self.l.addWidget(self.bb)
self.bb.rejected.connect(self.reject)
sz = self.sizeHint()
self.resize(max(sz.width(), 400), sz.height())
fpath = PersistentTemporaryFile(os.path.splitext(fname)[1])
fpath.close()
self.fpath = fpath.name
self.worker = Worker(url, self.fpath, Queue())
self.rejected = False
def reject(self):
self.rejected = True
QDialog.reject(self)
def start_download(self):
self.worker.start()
QTimer.singleShot(50, self.update)
self.exec_()
if self.worker.err is not None:
error_dialog(self.parent(), _('Download failed'),
_('Failed to download from %(url)r with error: %(err)s')%dict(
url=self.worker.url, err=self.worker.err),
det_msg=self.worker.tb, show=True)
def update(self):
if self.rejected:
return
try:
progress = self.worker.rq.get_nowait()
except Empty:
pass
else:
self.update_pb(progress)
if not self.worker.is_alive():
return self.accept()
QTimer.singleShot(50, self.update)
def update_pb(self, progress):
transferred, block_size, total = progress
if total == -1:
self.pb.setMaximum(0)
self.pb.setMinimum(0)
self.pb.setValue(0)
else:
so_far = transferred * block_size
self.pb.setMaximum(max(total, so_far))
self.pb.setValue(so_far)
@property
def err(self):
return self.worker.err
# }}}
def dnd_has_image(md):
return md.hasImage()
def data_as_string(f, md):
raw = bytes(md.data(f))
if '/x-moz' in f:
try:
raw = raw.decode('utf-16')
except:
pass
return raw
def path_from_qurl(qurl):
raw = bytes(bytearray(qurl.encodedPath()))
return urllib.unquote(raw).decode('utf-8')
def dnd_has_extension(md, extensions):
if DEBUG:
prints('Debugging DND event')
for f in md.formats():
f = unicode(f)
raw = data_as_string(f, md)
prints(f, len(raw), repr(raw[:300]), '\n')
print ()
if has_firefox_ext(md, extensions):
return True
if md.hasUrls():
urls = [unicode(u.toString()) for u in
md.urls()]
paths = [path_from_qurl(u) for u in md.urls()]
exts = frozenset([posixpath.splitext(u)[1][1:].lower() for u in
paths if u])
if DEBUG:
prints('URLS:', urls)
prints('Paths:', paths)
prints('Extensions:', exts)
return bool(exts.intersection(frozenset(extensions)))
return False
def _u2p(raw):
path = raw
if iswindows and path.startswith('/'):
path = path[1:]
return path.replace('/', os.sep)
def u2p(url):
path = url.path
ans = _u2p(path)
if not os.path.exists(ans):
ans = _u2p(url.path + '#' + url.fragment)
if os.path.exists(ans):
return ans
# Try unquoting the URL
return urllib.unquote(ans)
def dnd_get_image(md, image_exts=IMAGE_EXTENSIONS):
'''
Get the image in the QMimeData object md.
:return: None, None if no image is found
QPixmap, None if an image is found, the pixmap is guaranteed not
null
url, filename if a URL that points to an image is found
'''
if dnd_has_image(md):
for x in md.formats():
x = unicode(x)
if x.startswith('image/'):
cdata = bytes(md.data(x))
pmap = QPixmap()
pmap.loadFromData(cdata)
if not pmap.isNull():
return pmap, None
break
# No image, look for a URL pointing to an image
if md.hasUrls():
urls = [unicode(u.toString()) for u in
md.urls()]
purls = [urlparse(u) for u in urls]
# First look for a local file
images = [u2p(x) for x in purls if x.scheme in ('', 'file')]
images = [x for x in images if
posixpath.splitext(urllib.unquote(x))[1][1:].lower() in
image_exts]
images = [x for x in images if os.path.exists(x)]
p = QPixmap()
for path in images:
try:
with open(path, 'rb') as f:
p.loadFromData(f.read())
except:
continue
if not p.isNull():
return p, None
# No local images, look for remote ones
# First, see if this is from Firefox
rurl, fname = get_firefox_rurl(md, image_exts)
if rurl and fname:
return rurl, fname
# Look through all remaining URLs
remote_urls = [x for x in purls if x.scheme in ('http', 'https',
'ftp') and posixpath.splitext(x.path)[1][1:].lower() in image_exts]
if remote_urls:
rurl = remote_urls[0]
fname = posixpath.basename(urllib.unquote(rurl.path))
return urlunparse(rurl), fname
return None, None
def dnd_get_files(md, exts):
'''
Get the file in the QMimeData object md with an extension that is one of
the extensions in exts.
:return: None, None if no file is found
[paths], None if a local file is found
[urls], [filenames] if URLs that point to a files are found
'''
# Look for a URL pointing to a file
if md.hasUrls():
urls = [unicode(u.toString()) for u in
md.urls()]
purls = [urlparse(u) for u in urls]
# First look for a local file
local_files = [u2p(x) for x in purls if x.scheme in ('', 'file')]
local_files = [p for p in local_files if
posixpath.splitext(urllib.unquote(p))[1][1:].lower() in
exts]
local_files = [x for x in local_files if os.path.exists(x)]
if local_files:
return local_files, None
# No local files, look for remote ones
# First, see if this is from Firefox
rurl, fname = get_firefox_rurl(md, exts)
if rurl and fname:
return [rurl], [fname]
# Look through all remaining URLs
remote_urls = [x for x in purls if x.scheme in ('http', 'https',
'ftp') and posixpath.splitext(x.path)[1][1:].lower() in exts]
if remote_urls:
filenames = [posixpath.basename(urllib.unquote(rurl2.path)) for rurl2 in
remote_urls]
return [urlunparse(x) for x in remote_urls], filenames
return None, None
def _get_firefox_pair(md, exts, url, fname):
url = bytes(md.data(url)).decode('utf-16')
fname = bytes(md.data(fname)).decode('utf-16')
while url.endswith('\x00'):
url = url[:-1]
while fname.endswith('\x00'):
fname = fname[:-1]
if not url or not fname:
return None, None
ext = posixpath.splitext(fname)[1][1:].lower()
# Weird firefox bug on linux
ext = {'jpe':'jpg', 'epu':'epub', 'mob':'mobi'}.get(ext, ext)
fname = os.path.splitext(fname)[0] + '.' + ext
if DEBUG:
prints('Firefox file promise:', url, fname)
if ext not in exts:
fname = url = None
return url, fname
def get_firefox_rurl(md, exts):
formats = frozenset([unicode(x) for x in md.formats()])
url = fname = None
if 'application/x-moz-file-promise-url' in formats and \
'application/x-moz-file-promise-dest-filename' in formats:
try:
url, fname = _get_firefox_pair(md, exts,
'application/x-moz-file-promise-url',
'application/x-moz-file-promise-dest-filename')
except:
if DEBUG:
import traceback
traceback.print_exc()
if url is None and 'text/x-moz-url-data' in formats and \
'text/x-moz-url-desc' in formats:
try:
url, fname = _get_firefox_pair(md, exts,
'text/x-moz-url-data', 'text/x-moz-url-desc')
except:
if DEBUG:
import traceback
traceback.print_exc()
if url is None and '_NETSCAPE_URL' in formats:
try:
raw = bytes(md.data('_NETSCAPE_URL'))
raw = raw.decode('utf-8')
lines = raw.splitlines()
if len(lines) > 1 and re.match(r'[a-z]+://', lines[1]) is None:
url, fname = lines[:2]
ext = posixpath.splitext(fname)[1][1:].lower()
if ext not in exts:
fname = url = None
except:
if DEBUG:
import traceback
traceback.print_exc()
if DEBUG:
prints('Firefox rurl:', url, fname)
return url, fname
def has_firefox_ext(md, exts):
return bool(get_firefox_rurl(md, exts)[0])
|
gpl-3.0
|
HPPTECH/hpp_IOSTressTest
|
Refer/IOST_OLD_SRC/IOST_0.11/IOST_Basic.py
|
6
|
4169
|
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST_Basic.py
# Date : Oct 17, 2016
# Author : HuuHoang Nguyen
# Contact : [email protected]
# : [email protected]
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64
import time
from IOST_Prepare import IOST_Prepare
from IOST_Config import *
import gtk
import gtk.glade
import gobject
class IOST_Basic(IOST_Prepare):
def __init__(self):
"""
"""
#-----------------------------------------------------------------------
def MsgBox(self, text, parent=None):
""
global IOST_Config
MsgBox = gtk.MessageDialog(parent, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, text)
MsgBox.set_icon_from_file(IOST_Config["IconPath"])
MsgBox.run()
MsgBox.destroy()
#-----------------------------------------------------------------------
def MsgConfirm(self, text=""):
""
global IOST_Config
MsgBox=gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, text)
# MsgBox.set_icon(IOST_Config["IconPath"])
Response = MsgBox.run()
MsgBox.destroy()
return Response
#-----------------------------------------------------------------------
def InputBox(self, title, text, default='', password=False):
""
global IOST_Config
MsgBox = EntryDialog(title, text, default, mask=password)
# MsgBox.set_icon(IOST_Config["IconPath"])
if MsgBox.run() == gtk.RESPONSE_OK:
Response = MsgBox.value
else:
Response = None
MsgBox.destroy()
return Response
#-----------------------------------------------------------------------
def ShowFontDialog(self, parent, title, button):
""
Dlg = gtk.FileChooserDialog(title=title, parent=parent, action=action)
Dlg.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
Dlg.add_button(gtk.STOCK_SAVE if action==gtk.FILE_CHOOSER_ACTION_SAVE else gtk.STOCK_OPEN, gtk.RESPONSE_OK)
Dlg.set_do_overwrite_confirmation(True)
if not hasattr(parent,'lastPath'):
parent.lastPath = os.path.expanduser("~")
Dlg.set_current_folder( parent.lastPath )
if Dlg.run() == gtk.RESPONSE_OK:
filename = dlg.get_filename()
parent.lastPath = os.path.dirname(filename)
else:
filename = None
Dlg.destroy()
return filename
#-----------------------------------------------------------------------
def GetKeyName(self, event):
""
name = ""
if event.state & 4:
name = name + "CTRL+"
if event.state & 1:
name = name + "SHIFT+"
if event.state & 8:
name = name + "ALT+"
if event.state & 67108864:
name = name + "SUPER+"
return name + gtk.gdk.keyval_name(event.keyval).upper()
#-----------------------------------------------------------------------
def GetUserName():
return os.getenv('USER') or os.getenv('LOGNAME') or os.getenv('USERNAME')
# def IOST_Basic_GetPassword():
# return get_username() + enc_passwd
#-----------------------------------------------------------------------
def ReadFileJSON(file_name=""):
with open(file_name) as ReadFileName:
ReadData = json.load(ReadFileName, object_pairs_hook=OrderedDict)
# if IOST_Config_DebugEnable:
# pprint (IOST_Config_ReadData)
return ReadData
#-----------------------------------------------------------------------
def WriteFileJSON(file_name="", data=None):
with open(file_name, 'w') as WriteFileName:
json.dump(data, WriteFileName,indent=4)
# IOST_Basic = IOST_Basic()
|
mit
|
aoom/pattern
|
pattern/text/search.py
|
21
|
44939
|
#### PATTERN | TEXT | PATTERN MATCHING #############################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import re
import itertools
#--- TEXT, SENTENCE AND WORD -----------------------------------------------------------------------
# The search() and match() functions work on Text, Sentence and Word objects (see pattern.text.tree),
# i.e., the parse tree including part-of-speech tags and phrase chunk tags.
# The pattern.text.search Match object will contain matched Word objects,
# emulated with the following classes if the original input was a plain string:
PUNCTUATION = ".,;:!?()[]{}`'\"@#$^&*+-|=~_"
RE_PUNCTUATION = "|".join(map(re.escape, PUNCTUATION))
RE_PUNCTUATION = re.compile("(%s)" % RE_PUNCTUATION)
class Text(list):
def __init__(self, string="", token=["word"]):
""" A list of sentences, where each sentence is separated by a period.
"""
list.__init__(self, (Sentence(s + ".", token) for s in string.split(".")))
@property
def sentences(self):
return self
@property
def words(self):
return list(chain(*self))
class Sentence(list):
def __init__(self, string="", token=["word"]):
""" A list of words, where punctuation marks are split from words.
"""
s = RE_PUNCTUATION.sub(" \\1 ", string) # Naive tokenization.
s = re.sub(r"\s+", " ", s)
s = re.sub(r" ' (d|m|s|ll|re|ve)", " '\\1", s)
s = s.replace("n ' t", " n't")
s = s.split(" ")
list.__init__(self, (Word(self, w, index=i) for i, w in enumerate(s)))
@property
def string(self):
return " ".join(w.string for w in self)
@property
def words(self):
return self
@property
def chunks(self):
return []
class Word(object):
def __init__(self, sentence, string, tag=None, index=0):
""" A word with a position in a sentence.
"""
self.sentence, self.string, self.tag, self.index = sentence, string, tag, index
def __repr__(self):
return "Word(%s)" % repr(self.string)
def _get_type(self):
return self.tag
def _set_type(self, v):
self.tag = v
type = property(_get_type, _set_type)
@property
def chunk(self):
return None
@property
def lemma(self):
return None
#--- STRING MATCHING -------------------------------------------------------------------------------
WILDCARD = "*"
regexp = type(re.compile(r"."))
def _match(string, pattern):
""" Returns True if the pattern matches the given word string.
The pattern can include a wildcard (*front, back*, *both*, in*side),
or it can be a compiled regular expression.
"""
p = pattern
try:
if p[:1] == WILDCARD and (p[-1:] == WILDCARD and p[1:-1] in string or string.endswith(p[1:])):
return True
if p[-1:] == WILDCARD and not p[-2:-1] == "\\" and string.startswith(p[:-1]):
return True
if p == string:
return True
if WILDCARD in p[1:-1]:
p = p.split(WILDCARD)
return string.startswith(p[0]) and string.endswith(p[-1])
except:
# For performance, calling isinstance() last is 10% faster for plain strings.
if isinstance(p, regexp):
return p.search(string) is not None
return False
#--- LIST FUNCTIONS --------------------------------------------------------------------------------
# Search patterns can contain optional constraints,
# so we need to find all possible variations of a pattern.
def unique(iterable):
""" Returns a list copy in which each item occurs only once (in-order).
"""
seen = set()
return [x for x in iterable if x not in seen and not seen.add(x)]
def find(function, iterable):
""" Returns the first item in the list for which function(item) is True, None otherwise.
"""
for x in iterable:
if function(x) is True:
return x
def combinations(iterable, n):
# Backwards compatibility.
return product(iterable, repeat=n)
def product(*args, **kwargs):
""" Yields all permutations with replacement:
list(product("cat", repeat=2)) =>
[("c", "c"),
("c", "a"),
("c", "t"),
("a", "c"),
("a", "a"),
("a", "t"),
("t", "c"),
("t", "a"),
("t", "t")]
"""
p = [[]]
for iterable in map(tuple, args) * kwargs.get("repeat", 1):
p = [x + [y] for x in p for y in iterable]
for p in p:
yield tuple(p)
try: from itertools import product
except:
pass
def variations(iterable, optional=lambda x: False):
""" Returns all possible variations of a sequence with optional items.
"""
# For example: variations(["A?", "B?", "C"], optional=lambda s: s.endswith("?"))
# defines a sequence where constraint A and B are optional:
# [("A?", "B?", "C"), ("B?", "C"), ("A?", "C"), ("C")]
iterable = tuple(iterable)
# Create a boolean sequence where True means optional:
# ("A?", "B?", "C") => [True, True, False]
o = [optional(x) for x in iterable]
# Find all permutations of the boolean sequence:
# [True, False, True], [True, False, False], [False, False, True], [False, False, False].
# Map to sequences of constraints whose index in the boolean sequence yields True.
a = set()
for p in product([False, True], repeat=sum(o)):
p = list(p)
v = [b and (b and p.pop(0)) for b in o]
v = tuple(iterable[i] for i in range(len(v)) if not v[i])
a.add(v)
# Longest-first.
return sorted(a, cmp=lambda x, y: len(y) - len(x))
#### TAXONOMY ######################################################################################
#--- ORDERED DICTIONARY ----------------------------------------------------------------------------
# A taxonomy is based on an ordered dictionary
# (i.e., if a taxonomy term has multiple parents, the most recent parent is the default).
class odict(dict):
def __init__(self, items=[]):
""" A dictionary with ordered keys (first-in last-out).
"""
dict.__init__(self)
self._o = [] # List of ordered keys.
if isinstance(items, dict):
items = reversed(items.items())
for k, v in items:
self.__setitem__(k, v)
@classmethod
def fromkeys(cls, keys=[], v=None):
return cls((k, v) for k in keys)
def push(self, kv):
""" Adds a new item from the given (key, value)-tuple.
If the key exists, pushes the updated item to the head of the dict.
"""
if kv[0] in self:
self.__delitem__(kv[0])
self.__setitem__(kv[0], kv[1])
append = push
def __iter__(self):
return reversed(self._o)
def __setitem__(self, k, v):
if k not in self:
self._o.append(k)
dict.__setitem__(self, k, v)
def __delitem__(self, k):
self._o.remove(k)
dict.__delitem__(self, k)
def update(self, d):
for k, v in reversed(d.items()):
self.__setitem__(k, v)
def setdefault(self, k, v=None):
if not k in self:
self.__setitem__(k, v)
return self[k]
def pop(self, k, *args, **kwargs):
if k in self:
self._o.remove(k)
return dict.pop(self, k, *args, **kwargs)
def popitem(self):
k=self._o[-1] if self._o else None; return (k, self.pop(k))
def clear(self):
self._o=[]; dict.clear(self)
def iterkeys(self):
return reversed(self._o)
def itervalues(self):
return itertools.imap(self.__getitem__, reversed(self._o))
def iteritems(self):
return iter(zip(self.iterkeys(), self.itervalues()))
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
def copy(self):
return self.__class__(reversed(self.items()))
def __repr__(self):
return "{%s}" % ", ".join("%s: %s" % (repr(k), repr(v)) for k, v in self.items())
#--- TAXONOMY --------------------------------------------------------------------------------------
class Taxonomy(dict):
def __init__(self):
""" Hierarchical tree of words classified by semantic type.
For example: "rose" and "daffodil" can be classified as "flower":
>>> taxonomy.append("rose", type="flower")
>>> taxonomy.append("daffodil", type="flower")
>>> print(taxonomy.children("flower"))
Taxonomy terms can be used in a Pattern:
FLOWER will match "flower" as well as "rose" and "daffodil".
The taxonomy is case insensitive by default.
"""
self.case_sensitive = False
self._values = {}
self.classifiers = []
def _normalize(self, term):
try:
return not self.case_sensitive and term.lower() or term
except: # Not a string.
return term
def __contains__(self, term):
# Check if the term is in the dictionary.
# If the term is not in the dictionary, check the classifiers.
term = self._normalize(term)
if dict.__contains__(self, term):
return True
for classifier in self.classifiers:
if classifier.parents(term) \
or classifier.children(term):
return True
return False
def append(self, term, type=None, value=None):
""" Appends the given term to the taxonomy and tags it as the given type.
Optionally, a disambiguation value can be supplied.
For example: taxonomy.append("many", "quantity", "50-200")
"""
term = self._normalize(term)
type = self._normalize(type)
self.setdefault(term, (odict(), odict()))[0].push((type, True))
self.setdefault(type, (odict(), odict()))[1].push((term, True))
self._values[term] = value
def classify(self, term, **kwargs):
""" Returns the (most recently added) semantic type for the given term ("many" => "quantity").
If the term is not in the dictionary, try Taxonomy.classifiers.
"""
term = self._normalize(term)
if dict.__contains__(self, term):
return self[term][0].keys()[-1]
# If the term is not in the dictionary, check the classifiers.
# Returns the first term in the list returned by a classifier.
for classifier in self.classifiers:
# **kwargs are useful if the classifier requests extra information,
# for example the part-of-speech tag.
v = classifier.parents(term, **kwargs)
if v:
return v[0]
def parents(self, term, recursive=False, **kwargs):
""" Returns a list of all semantic types for the given term.
If recursive=True, traverses parents up to the root.
"""
def dfs(term, recursive=False, visited={}, **kwargs):
if term in visited: # Break on cyclic relations.
return []
visited[term], a = True, []
if dict.__contains__(self, term):
a = self[term][0].keys()
for classifier in self.classifiers:
a.extend(classifier.parents(term, **kwargs) or [])
if recursive:
for w in a: a += dfs(w, recursive, visited, **kwargs)
return a
return unique(dfs(self._normalize(term), recursive, {}, **kwargs))
def children(self, term, recursive=False, **kwargs):
""" Returns all terms of the given semantic type: "quantity" => ["many", "lot", "few", ...]
If recursive=True, traverses children down to the leaves.
"""
def dfs(term, recursive=False, visited={}, **kwargs):
if term in visited: # Break on cyclic relations.
return []
visited[term], a = True, []
if dict.__contains__(self, term):
a = self[term][1].keys()
for classifier in self.classifiers:
a.extend(classifier.children(term, **kwargs) or [])
if recursive:
for w in a: a += dfs(w, recursive, visited, **kwargs)
return a
return unique(dfs(self._normalize(term), recursive, {}, **kwargs))
def value(self, term, **kwargs):
""" Returns the value of the given term ("many" => "50-200")
"""
term = self._normalize(term)
if term in self._values:
return self._values[term]
for classifier in self.classifiers:
v = classifier.value(term, **kwargs)
if v is not None:
return v
def remove(self, term):
if dict.__contains__(self, term):
for w in self.parents(term):
self[w][1].pop(term)
dict.pop(self, term)
# Global taxonomy:
TAXONOMY = taxonomy = Taxonomy()
#taxonomy.append("rose", type="flower")
#taxonomy.append("daffodil", type="flower")
#taxonomy.append("flower", type="plant")
#print(taxonomy.classify("rose"))
#print(taxonomy.children("plant", recursive=True))
#c = Classifier(parents=lambda term: term.endswith("ness") and ["quality"] or [])
#taxonomy.classifiers.append(c)
#print(taxonomy.classify("roughness"))
#--- TAXONOMY CLASSIFIER ---------------------------------------------------------------------------
class Classifier(object):
def __init__(self, parents=lambda term: [], children=lambda term: [], value=lambda term: None):
""" A classifier uses a rule-based approach to enrich the taxonomy, for example:
c = Classifier(parents=lambda term: term.endswith("ness") and ["quality"] or [])
taxonomy.classifiers.append(c)
This tags any word ending in -ness as "quality".
This is much shorter than manually adding "roughness", "sharpness", ...
Other examples of useful classifiers: calling en.wordnet.Synset.hyponyms() or en.number().
"""
self.parents = parents
self.children = children
self.value = value
# Classifier(parents=lambda word: word.endswith("ness") and ["quality"] or [])
# Classifier(parents=lambda word, chunk=None: chunk=="VP" and [ACTION] or [])
class WordNetClassifier(Classifier):
def __init__(self, wordnet=None):
if wordnet is None:
try: from pattern.en import wordnet
except:
try: from en import wordnet
except:
pass
Classifier.__init__(self, self._parents, self._children)
self.wordnet = wordnet
def _children(self, word, pos="NN"):
try:
return [w.synonyms[0] for w in self.wordnet.synsets(word, pos[:2])[0].hyponyms()]
except:
pass
def _parents(self, word, pos="NN"):
try:
return [w.synonyms[0] for w in self.wordnet.synsets(word, pos[:2])[0].hypernyms()]
except:
pass
#from en import wordnet
#taxonomy.classifiers.append(WordNetClassifier(wordnet))
#print(taxonomy.parents("ponder", pos="VB"))
#print(taxonomy.children("computer"))
#### PATTERN #######################################################################################
#--- PATTERN CONSTRAINT ----------------------------------------------------------------------------
# Allowed chunk, role and part-of-speech tags (Penn Treebank II):
CHUNKS = dict.fromkeys(["NP", "PP", "VP", "ADVP", "ADJP", "SBAR", "PRT", "INTJ"], True)
ROLES = dict.fromkeys(["SBJ", "OBJ", "PRD", "TMP", "CLR", "LOC", "DIR", "EXT", "PRP"], True)
TAGS = dict.fromkeys(["CC", "CD", "CJ", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "JJ*",
"LS", "MD", "NN", "NNS", "NNP", "NNPS", "NN*", "NO", "PDT", "PR",
"PRP", "PRP$", "PR*", "PRP*", "PT", "RB", "RBR", "RBS", "RB*", "RP",
"SYM", "TO", "UH", "VB", "VBZ", "VBP", "VBD", "VBN", "VBG", "VB*",
"WDT", "WP*", "WRB", "X", ".", ",", ":", "(", ")"], True)
ALPHA = re.compile("[a-zA-Z]")
has_alpha = lambda string: ALPHA.match(string) is not None
class Constraint(object):
def __init__(self, words=[], tags=[], chunks=[], roles=[], taxa=[], optional=False, multiple=False, first=False, taxonomy=TAXONOMY, exclude=None, custom=None):
""" A range of words, tags and taxonomy terms that matches certain words in a sentence.
For example:
Constraint.fromstring("with|of") matches either "with" or "of".
Constraint.fromstring("(JJ)") optionally matches an adjective.
Constraint.fromstring("NP|SBJ") matches subject noun phrases.
Constraint.fromstring("QUANTITY|QUALITY") matches quantity-type and quality-type taxa.
"""
self.index = 0
self.words = list(words) # Allowed words/lemmata (of, with, ...)
self.tags = list(tags) # Allowed parts-of-speech (NN, JJ, ...)
self.chunks = list(chunks) # Allowed chunk types (NP, VP, ...)
self.roles = list(roles) # Allowed chunk roles (SBJ, OBJ, ...)
self.taxa = list(taxa) # Allowed word categories.
self.taxonomy = taxonomy
self.optional = optional
self.multiple = multiple
self.first = first
self.exclude = exclude # Constraint of words that are *not* allowed, or None.
self.custom = custom # Custom function(Word) returns True if word matches constraint.
@classmethod
def fromstring(cls, s, **kwargs):
""" Returns a new Constraint from the given string.
Uppercase words indicate either a tag ("NN", "JJ", "VP")
or a taxonomy term (e.g., "PRODUCT", "PERSON").
Syntax:
( defines an optional constraint, e.g., "(JJ)".
[ defines a constraint with spaces, e.g., "[Mac OS X | Windows Vista]".
_ is converted to spaces, e.g., "Windows_Vista".
| separates different options, e.g., "ADJP|ADVP".
! can be used as a word prefix to disallow it.
* can be used as a wildcard character, e.g., "soft*|JJ*".
? as a suffix defines a constraint that is optional, e.g., "JJ?".
+ as a suffix defines a constraint that can span multiple words, e.g., "JJ+".
^ as a prefix defines a constraint that can only match the first word.
These characters need to be escaped if used as content: "\(".
"""
C = cls(**kwargs)
s = s.strip()
s = s.strip("{}")
s = s.strip()
for i in range(3):
# Wrapping order of control characters is ignored:
# (NN+) == (NN)+ == NN?+ == NN+? == [NN+?] == [NN]+?
if s.startswith("^"):
s = s[1: ]; C.first = True
if s.endswith("+") and not s.endswith("\+"):
s = s[0:-1]; C.multiple = True
if s.endswith("?") and not s.endswith("\?"):
s = s[0:-1]; C.optional = True
if s.startswith("(") and s.endswith(")"):
s = s[1:-1]; C.optional = True
if s.startswith("[") and s.endswith("]"):
s = s[1:-1]
s = re.sub(r"^\\\^", "^", s)
s = re.sub(r"\\\+$", "+", s)
s = s.replace("\_", "&uscore;")
s = s.replace("_"," ")
s = s.replace("&uscore;", "_")
s = s.replace("&lparen;", "(")
s = s.replace("&rparen;", ")")
s = s.replace("[", "[")
s = s.replace("]", "]")
s = s.replace("&lcurly;", "{")
s = s.replace("&rcurly;", "}")
s = s.replace("\(", "(")
s = s.replace("\)", ")")
s = s.replace("\[", "[")
s = s.replace("\]", "]")
s = s.replace("\{", "{")
s = s.replace("\}", "}")
s = s.replace("\*", "*")
s = s.replace("\?", "?")
s = s.replace("\+", "+")
s = s.replace("\^", "^")
s = s.replace("\|", "⊢")
s = s.split("|")
s = [v.replace("⊢", "|").strip() for v in s]
for v in s:
C._append(v)
return C
def _append(self, v):
if v.startswith("!") and self.exclude is None:
self.exclude = Constraint()
if v.startswith("!"):
self.exclude._append(v[1:]); return
if "!" in v:
v = v.replace("\!", "!")
if v != v.upper():
self.words.append(v.lower())
elif v in TAGS:
self.tags.append(v)
elif v in CHUNKS:
self.chunks.append(v)
elif v in ROLES:
self.roles.append(v)
elif v in self.taxonomy or has_alpha(v):
self.taxa.append(v.lower())
else:
# Uppercase words indicate tags or taxonomy terms.
# However, this also matches "*" or "?" or "0.25".
# Unless such punctuation is defined in the taxonomy, it is added to Range.words.
self.words.append(v.lower())
def match(self, word):
""" Return True if the given Word is part of the constraint:
- the word (or lemma) occurs in Constraint.words, OR
- the word (or lemma) occurs in Constraint.taxa taxonomy tree, AND
- the word and/or chunk tags match those defined in the constraint.
Individual terms in Constraint.words or the taxonomy can contain wildcards (*).
Some part-of-speech-tags can also contain wildcards: NN*, VB*, JJ*, RB*, PR*.
If the given word contains spaces (e.g., proper noun),
the entire chunk will also be compared.
For example: Constraint(words=["Mac OS X*"])
matches the word "Mac" if the word occurs in a Chunk("Mac OS X 10.5").
"""
# If the constraint has a custom function it must return True.
if self.custom is not None and self.custom(word) is False:
return False
# If the constraint can only match the first word, Word.index must be 0.
if self.first and word.index > 0:
return False
# If the constraint defines excluded options, Word can not match any of these.
if self.exclude and self.exclude.match(word):
return False
# If the constraint defines allowed tags, Word.tag needs to match one of these.
if self.tags:
if find(lambda w: _match(word.tag, w), self.tags) is None:
return False
# If the constraint defines allowed chunks, Word.chunk.tag needs to match one of these.
if self.chunks:
ch = word.chunk and word.chunk.tag or None
if find(lambda w: _match(ch, w), self.chunks) is None:
return False
# If the constraint defines allowed role, Word.chunk.tag needs to match one of these.
if self.roles:
R = word.chunk and [r2 for r1, r2 in word.chunk.relations] or []
if find(lambda w: w in R, self.roles) is None:
return False
# If the constraint defines allowed words,
# Word.string.lower() OR Word.lemma needs to match one of these.
b = True # b==True when word in constraint (or Constraints.words=[]).
if len(self.words) + len(self.taxa) > 0:
s1 = word.string.lower()
s2 = word.lemma
b = False
for w in itertools.chain(self.words, self.taxa):
# If the constraint has a word with spaces (e.g., a proper noun),
# compare it to the entire chunk.
try:
if " " in w and (s1 in w or s2 and s2 in w or "*" in w):
s1 = word.chunk and word.chunk.string.lower() or s1
s2 = word.chunk and " ".join(x or "" for x in word.chunk.lemmata) or s2
except Exception as e:
s1 = s1
s2 = None
# Compare the word to the allowed words (which can contain wildcards).
if _match(s1, w):
b=True; break
# Compare the word lemma to the allowed words, e.g.,
# if "was" is not in the constraint, perhaps "be" is, which is a good match.
if s2 and _match(s2, w):
b=True; break
# If the constraint defines allowed taxonomy terms,
# and the given word did not match an allowed word, traverse the taxonomy.
# The search goes up from the given word to its parents in the taxonomy.
# This is faster than traversing all the children of terms in Constraint.taxa.
# The drawback is that:
# 1) Wildcards in the taxonomy are not detected (use classifiers instead),
# 2) Classifier.children() has no effect, only Classifier.parent().
if self.taxa and (not self.words or (self.words and not b)):
for s in (
word.string, # "ants"
word.lemma, # "ant"
word.chunk and word.chunk.string or None, # "army ants"
word.chunk and " ".join([x or "" for x in word.chunk.lemmata]) or None): # "army ant"
if s is not None:
if self.taxonomy.case_sensitive is False:
s = s.lower()
# Compare ancestors of the word to each term in Constraint.taxa.
for p in self.taxonomy.parents(s, recursive=True):
if find(lambda s: p==s, self.taxa): # No wildcards.
return True
return b
def __repr__(self):
s = []
for k,v in (
( "words", self.words),
( "tags", self.tags),
("chunks", self.chunks),
( "roles", self.roles),
( "taxa", self.taxa)):
if v: s.append("%s=%s" % (k, repr(v)))
return "Constraint(%s)" % ", ".join(s)
@property
def string(self):
a = self.words + self.tags + self.chunks + self.roles + [w.upper() for w in self.taxa]
a = (escape(s) for s in a)
a = (s.replace("\\*", "*") for s in a)
a = [s.replace(" ", "_") for s in a]
if self.exclude:
a.extend("!"+s for s in self.exclude.string[1:-1].split("|"))
return (self.optional and "%s(%s)%s" or "%s[%s]%s") % (
self.first and "^" or "", "|".join(a), self.multiple and "+" or "")
#--- PATTERN ---------------------------------------------------------------------------------------
STRICT = "strict"
GREEDY = "greedy"
class Pattern(object):
def __init__(self, sequence=[], *args, **kwargs):
""" A sequence of constraints that matches certain phrases in a sentence.
The given list of Constraint objects can contain nested lists (groups).
"""
# Parse nested lists and tuples from the sequence into groups.
# [DT [JJ NN]] => Match.group(1) will yield the JJ NN sequences.
def _ungroup(sequence, groups=None):
for v in sequence:
if isinstance(v, (list, tuple)):
if groups is not None:
groups.append(list(_ungroup(v, groups=None)))
for v in _ungroup(v, groups):
yield v
else:
yield v
self.groups = []
self.sequence = list(_ungroup(sequence, groups=self.groups))
# Assign Constraint.index:
i = 0
for constraint in self.sequence:
constraint.index = i; i+=1
# There are two search modes: STRICT and GREEDY.
# - In STRICT, "rabbit" matches only the string "rabbit".
# - In GREEDY, "rabbit|NN" matches the string "rabbit" tagged "NN".
# - In GREEDY, "rabbit" matches "the big white rabbit" (the entire chunk is a match).
# - Pattern.greedy(chunk, constraint) determines (True/False) if a chunk is a match.
self.strict = kwargs.get("strict", STRICT in args and not GREEDY in args)
self.greedy = kwargs.get("greedy", lambda chunk, constraint: True)
def __iter__(self):
return iter(self.sequence)
def __len__(self):
return len(self.sequence)
def __getitem__(self, i):
return self.sequence[i]
@classmethod
def fromstring(cls, s, *args, **kwargs):
""" Returns a new Pattern from the given string.
Constraints are separated by a space.
If a constraint contains a space, it must be wrapped in [].
"""
s = s.replace("\(", "&lparen;")
s = s.replace("\)", "&rparen;")
s = s.replace("\[", "[")
s = s.replace("\]", "]")
s = s.replace("\{", "&lcurly;")
s = s.replace("\}", "&rcurly;")
p = []
i = 0
for m in re.finditer(r"\[.*?\]|\(.*?\)", s):
# Spaces in a range encapsulated in square brackets are encoded.
# "[Windows Vista]" is one range, don't split on space.
p.append(s[i:m.start()])
p.append(s[m.start():m.end()].replace(" ", "&space;")); i=m.end()
p.append(s[i:])
s = "".join(p)
s = s.replace("][", "] [")
s = s.replace(")(", ") (")
s = s.replace("\|", "⊢")
s = re.sub(r"\s+\|\s+", "|", s)
s = re.sub(r"\s+", " ", s)
s = re.sub(r"\{\s+", "{", s)
s = re.sub(r"\s+\}", "}", s)
s = s.split(" ")
s = [v.replace("&space;"," ") for v in s]
P = cls([], *args, **kwargs)
G, O, i = [], [], 0
for s in s:
constraint = Constraint.fromstring(s.strip("{}"), taxonomy=kwargs.get("taxonomy", TAXONOMY))
constraint.index = len(P.sequence)
P.sequence.append(constraint)
# Push a new group on the stack if string starts with "{".
# Parse constraint from string, add it to all open groups.
# Pop latest group from stack if string ends with "}".
# Insert groups in opened-first order (i).
while s.startswith("{"):
s = s[1:]
G.append((i, [])); i+=1
O.append([])
for g in G:
g[1].append(constraint)
while s.endswith("}"):
s = s[:-1]
if G: O[G[-1][0]] = G[-1][1]; G.pop()
P.groups = [g for g in O if g]
return P
def scan(self, string):
""" Returns True if search(Sentence(string)) may yield matches.
If is often faster to scan prior to creating a Sentence and searching it.
"""
# In the following example, first scan the string for "good" and "bad":
# p = Pattern.fromstring("good|bad NN")
# for s in open("parsed.txt"):
# if p.scan(s):
# s = Sentence(s)
# m = p.search(s)
# if m:
# print(m)
w = (constraint.words for constraint in self.sequence if not constraint.optional)
w = itertools.chain(*w)
w = [w.strip(WILDCARD) for w in w if WILDCARD not in w[1:-1]]
if w and not any(w in string.lower() for w in w):
return False
return True
def search(self, sentence):
""" Returns a list of all matches found in the given sentence.
"""
if sentence.__class__.__name__ == "Sentence":
pass
elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text":
a=[]; [a.extend(self.search(s)) for s in sentence]; return a
elif isinstance(sentence, basestring):
sentence = Sentence(sentence)
elif isinstance(sentence, Match) and len(sentence) > 0:
sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1)
a = []
v = self._variations()
u = {}
m = self.match(sentence, _v=v)
while m:
a.append(m)
m = self.match(sentence, start=m.words[-1].index+1, _v=v, _u=u)
return a
def match(self, sentence, start=0, _v=None, _u=None):
""" Returns the first match found in the given sentence, or None.
"""
if sentence.__class__.__name__ == "Sentence":
pass
elif isinstance(sentence, list) or sentence.__class__.__name__ == "Text":
return find(lambda m: m is not None, (self.match(s, start, _v) for s in sentence))
elif isinstance(sentence, basestring):
sentence = Sentence(sentence)
elif isinstance(sentence, Match) and len(sentence) > 0:
sentence = sentence[0].sentence.slice(sentence[0].index, sentence[-1].index + 1)
# Variations (_v) further down the list may match words more to the front.
# We need to check all of them. Unmatched variations are blacklisted (_u).
# Pattern.search() calls Pattern.match() with a persistent blacklist (1.5x faster).
a = []
for sequence in (_v is not None and _v or self._variations()):
if _u is not None and id(sequence) in _u:
continue
m = self._match(sequence, sentence, start)
if m is not None:
a.append((m.words[0].index, len(m.words), m))
if m is not None and m.words[0].index == start:
return m
if m is None and _u is not None:
_u[id(sequence)] = False
# Return the leftmost-longest.
if len(a) > 0:
return sorted(a)[0][-1]
def _variations(self):
v = variations(self.sequence, optional=lambda constraint: constraint.optional)
v = sorted(v, key=len, reverse=True)
return v
def _match(self, sequence, sentence, start=0, i=0, w0=None, map=None, d=0):
# Backtracking tree search.
# Finds the first match in the sentence of the given sequence of constraints.
# start : the current word index.
# i : the current constraint index.
# w0 : the first word that matches a constraint.
# map : a dictionary of (Word index, Constraint) items.
# d : recursion depth.
# XXX - We can probably rewrite all of this using (faster) regular expressions.
if map is None:
map = {}
n = len(sequence)
# --- MATCH ----------
if i == n:
if w0 is not None:
w1 = sentence.words[start-1]
# Greedy algorithm:
# - "cat" matches "the big cat" if "cat" is head of the chunk.
# - "Tom" matches "Tom the cat" if "Tom" is head of the chunk.
# - This behavior is ignored with POS-tag constraints:
# "Tom|NN" can only match single words, not chunks.
# - This is also True for negated POS-tags (e.g., !NN).
w01 = [w0, w1]
for j in (0, -1):
constraint, w = sequence[j], w01[j]
if self.strict is False and w.chunk is not None:
if not constraint.tags:
if not constraint.exclude or not constraint.exclude.tags:
if constraint.match(w.chunk.head):
w01[j] = w.chunk.words[j]
if constraint.exclude and constraint.exclude.match(w.chunk.head):
return None
if self.greedy(w.chunk, constraint) is False: # User-defined.
return None
w0, w1 = w01
# Update map for optional chunk words (see below).
words = sentence.words[w0.index:w1.index+1]
for w in words:
if w.index not in map and w.chunk:
wx = find(lambda w: w.index in map, reversed(w.chunk.words))
if wx:
map[w.index] = map[wx.index]
# Return matched word range, we'll need the map to build Match.constituents().
return Match(self, words, map)
return None
# --- RECURSION --------
constraint = sequence[i]
for w in sentence.words[start:]:
#print(" "*d, "match?", w, sequence[i].string) # DEBUG
if i < n and constraint.match(w):
#print(" "*d, "match!", w, sequence[i].string) # DEBUG
map[w.index] = constraint
if constraint.multiple:
# Next word vs. same constraint if Constraint.multiple=True.
m = self._match(sequence, sentence, w.index+1, i, w0 or w, map, d+1)
if m:
return m
# Next word vs. next constraint.
m = self._match(sequence, sentence, w.index+1, i+1, w0 or w, map, d+1)
if m:
return m
# Chunk words other than the head are optional:
# - Pattern.fromstring("cat") matches "cat" but also "the big cat" (overspecification).
# - Pattern.fromstring("cat|NN") does not match "the big cat" (explicit POS-tag).
if w0 and not constraint.tags:
if not constraint.exclude and not self.strict and w.chunk and w.chunk.head != w:
continue
break
# Part-of-speech tags match one single word.
if w0 and constraint.tags:
break
if w0 and constraint.exclude and constraint.exclude.tags:
break
@property
def string(self):
return " ".join(constraint.string for constraint in self.sequence)
_cache = {}
_CACHE_SIZE = 100 # Number of dynamic Pattern objects to keep in cache.
def compile(pattern, *args, **kwargs):
""" Returns a Pattern from the given string or regular expression.
Recently compiled patterns are kept in cache
(if they do not use taxonomies, which are mutable dicts).
"""
id, p = repr(pattern) + repr(args), pattern
if id in _cache and not kwargs:
return _cache[id]
if isinstance(pattern, basestring):
p = Pattern.fromstring(pattern, *args, **kwargs)
if isinstance(pattern, regexp):
p = Pattern([Constraint(words=[pattern], taxonomy=kwargs.get("taxonomy", TAXONOMY))], *args, **kwargs)
if len(_cache) > _CACHE_SIZE:
_cache.clear()
if isinstance(p, Pattern) and not kwargs:
_cache[id] = p
if isinstance(p, Pattern):
return p
else:
raise TypeError("can't compile '%s' object" % pattern.__class__.__name__)
def scan(pattern, string, *args, **kwargs):
""" Returns True if pattern.search(Sentence(string)) may yield matches.
If is often faster to scan prior to creating a Sentence and searching it.
"""
return compile(pattern, *args, **kwargs).scan(string)
def match(pattern, sentence, *args, **kwargs):
""" Returns the first match found in the given sentence, or None.
"""
return compile(pattern, *args, **kwargs).match(sentence)
def search(pattern, sentence, *args, **kwargs):
""" Returns a list of all matches found in the given sentence.
"""
return compile(pattern, *args, **kwargs).search(sentence)
def escape(string):
""" Returns the string with control characters for Pattern syntax escaped.
For example: "hello!" => "hello\!".
"""
for ch in ("{","}","[","]","(",")","_","|","!","*","+","^"):
string = string.replace(ch, "\\"+ch)
return string
#--- PATTERN MATCH ---------------------------------------------------------------------------------
class Match(object):
def __init__(self, pattern, words=[], map={}):
""" Search result returned from Pattern.match(sentence),
containing a sequence of Word objects.
"""
self.pattern = pattern
self.words = words
self._map1 = dict() # Word index to Constraint.
self._map2 = dict() # Constraint index to list of Word indices.
for w in self.words:
self._map1[w.index] = map[w.index]
for k,v in self._map1.items():
self._map2.setdefault(self.pattern.sequence.index(v),[]).append(k)
for k,v in self._map2.items():
v.sort()
def __len__(self):
return len(self.words)
def __iter__(self):
return iter(self.words)
def __getitem__(self, i):
return self.words.__getitem__(i)
@property
def start(self):
return self.words and self.words[0].index or None
@property
def stop(self):
return self.words and self.words[-1].index+1 or None
def constraint(self, word):
""" Returns the constraint that matches the given Word, or None.
"""
if word.index in self._map1:
return self._map1[word.index]
def constraints(self, chunk):
""" Returns a list of constraints that match the given Chunk.
"""
a = [self._map1[w.index] for w in chunk.words if w.index in self._map1]
b = []; [b.append(constraint) for constraint in a if constraint not in b]
return b
def constituents(self, constraint=None):
""" Returns a list of Word and Chunk objects,
where words have been grouped into their chunks whenever possible.
Optionally, returns only chunks/words that match given constraint(s), or constraint index.
"""
# Select only words that match the given constraint.
# Note: this will only work with constraints from Match.pattern.sequence.
W = self.words
n = len(self.pattern.sequence)
if isinstance(constraint, (int, Constraint)):
if isinstance(constraint, int):
i = constraint
i = i<0 and i%n or i
else:
i = self.pattern.sequence.index(constraint)
W = self._map2.get(i,[])
W = [self.words[i-self.words[0].index] for i in W]
if isinstance(constraint, (list, tuple)):
W = []; [W.extend(self._map2.get(j<0 and j%n or j,[])) for j in constraint]
W = [self.words[i-self.words[0].index] for i in W]
W = unique(W)
a = []
i = 0
while i < len(W):
w = W[i]
if w.chunk and W[i:i+len(w.chunk)] == w.chunk.words:
i += len(w.chunk) - 1
a.append(w.chunk)
else:
a.append(w)
i += 1
return a
def group(self, index, chunked=False):
""" Returns a list of Word objects that match the given group.
With chunked=True, returns a list of Word + Chunk objects - see Match.constituents().
A group consists of consecutive constraints wrapped in { }, e.g.,
search("{JJ JJ} NN", Sentence(parse("big black cat"))).group(1) => big black.
"""
if index < 0 or index > len(self.pattern.groups):
raise IndexError("no such group")
if index > 0 and index <= len(self.pattern.groups):
g = self.pattern.groups[index-1]
if index == 0:
g = self.pattern.sequence
if chunked is True:
return Group(self, self.constituents(constraint=[self.pattern.sequence.index(x) for x in g]))
return Group(self, [w for w in self.words if self.constraint(w) in g])
@property
def string(self):
return " ".join(w.string for w in self.words)
def __repr__(self):
return "Match(words=%s)" % repr(self.words)
#--- PATTERN MATCH GROUP ---------------------------------------------------------------------------
class Group(list):
def __init__(self, match, words):
list.__init__(self, words)
self.match = match
@property
def words(self):
return list(self)
@property
def start(self):
return self and self[0].index or None
@property
def stop(self):
return self and self[-1].index+1 or None
@property
def string(self):
return " ".join(w.string for w in self)
|
bsd-3-clause
|
m4nh/roars
|
scripts/nodes/examples/arp_detector_example.py
|
1
|
2688
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from roars.rosutils.rosnode import RosNode
from roars.vision.cameras import CameraRGB
from roars.vision.arucoutils import MarkerDetector
from roars.vision.arp import ARP
import roars.vision.cvutils as cvutils
import cv2
import numpy as np
import os
import json
#⬢⬢⬢⬢⬢➤ NODE
node = RosNode("rosnode_example")
#⬢⬢⬢⬢⬢➤ Sets HZ from parameters
node.setHz(node.setupParameter("hz", 30))
#⬢⬢⬢⬢⬢➤ Creates Camera Proxy
camera_topic = node.setupParameter(
"camera_topic",
"/camera/rgb/image_raw/compressed"
)
camera_file = node.getFileInPackage(
'roars',
'data/camera_calibrations/asus_xtion.yml'
)
camera = CameraRGB(
configuration_file=camera_file,
rgb_topic=camera_topic,
compressed_image="compressed" in camera_topic
)
#⬢⬢⬢⬢⬢➤ ARP
arp_configuration = node.getFileInPackage(
'roars',
'data/arp_configurations/prototype_configuration.json'
)
arp = ARP(configuration_file=arp_configuration, camera_file=camera_file)
#⬢⬢⬢⬢⬢➤ Points storage
points_per_object = node.setupParameter("points_per_object", 6)
collected_points = []
output_file = node.setupParameter("output_file", "/tmp/arp_objects.json")
#⬢⬢⬢⬢⬢➤ Camera Callback
def cameraCallback(frame):
#⬢⬢⬢⬢⬢➤ Grabs image from Frame
img = frame.rgb_image.copy()
arp_pose = arp.detect(img, debug_draw=True)
if arp_pose:
img_points = cvutils.reproject3DPoint(
arp_pose.p.x(),
arp_pose.p.y(),
arp_pose.p.z(),
camera=camera
)
cv2.circle(
img,
(int(img_points[0]), int(img_points[1])),
5,
(0, 0, 255),
-1
)
#⬢⬢⬢⬢⬢➤ Show
cv2.imshow("output", img)
c = cv2.waitKey(1)
if c == 113:
node.close()
if c == 32 and arp_pose != None:
print("New Point Added", arp_pose.p)
collected_points.append([
arp_pose.p.x(), arp_pose.p.y(), arp_pose.p.z()
])
if len(collected_points) % points_per_object == 0:
print("New Object Stored")
camera.registerUserCallabck(cameraCallback)
#⬢⬢⬢⬢⬢➤ Main Loop
while node.isActive():
node.tick()
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
probable_objects = list(chunks(collected_points, points_per_object))
objects = []
for o in probable_objects:
if len(o) == points_per_object:
objects.append(o)
with open(output_file, 'w') as handle:
handle.write(json.dumps(objects, indent=4))
|
gpl-3.0
|
jing-bao/pa-chromium
|
sync/tools/testserver/xmppserver.py
|
6
|
19194
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A bare-bones and non-compliant XMPP server.
Just enough of the protocol is implemented to get it to work with
Chrome's sync notification system.
"""
import asynchat
import asyncore
import base64
import re
import socket
from xml.dom import minidom
# pychecker complains about the use of fileno(), which is implemented
# by asyncore by forwarding to an internal object via __getattr__.
__pychecker__ = 'no-classattr'
class Error(Exception):
"""Error class for this module."""
pass
class UnexpectedXml(Error):
"""Raised when an unexpected XML element has been encountered."""
def __init__(self, xml_element):
xml_text = xml_element.toxml()
Error.__init__(self, 'Unexpected XML element', xml_text)
def ParseXml(xml_string):
"""Parses the given string as XML and returns a minidom element
object.
"""
dom = minidom.parseString(xml_string)
# minidom handles xmlns specially, but there's a bug where it sets
# the attribute value to None, which causes toxml() or toprettyxml()
# to break.
def FixMinidomXmlnsBug(xml_element):
if xml_element.getAttribute('xmlns') is None:
xml_element.setAttribute('xmlns', '')
def ApplyToAllDescendantElements(xml_element, fn):
fn(xml_element)
for node in xml_element.childNodes:
if node.nodeType == node.ELEMENT_NODE:
ApplyToAllDescendantElements(node, fn)
root = dom.documentElement
ApplyToAllDescendantElements(root, FixMinidomXmlnsBug)
return root
def CloneXml(xml):
"""Returns a deep copy of the given XML element.
Args:
xml: The XML element, which should be something returned from
ParseXml() (i.e., a root element).
"""
return xml.ownerDocument.cloneNode(True).documentElement
class StanzaParser(object):
"""A hacky incremental XML parser.
StanzaParser consumes data incrementally via FeedString() and feeds
its delegate complete parsed stanzas (i.e., XML documents) via
FeedStanza(). Any stanzas passed to FeedStanza() are unlinked after
the callback is done.
Use like so:
class MyClass(object):
...
def __init__(self, ...):
...
self._parser = StanzaParser(self)
...
def SomeFunction(self, ...):
...
self._parser.FeedString(some_data)
...
def FeedStanza(self, stanza):
...
print stanza.toprettyxml()
...
"""
# NOTE(akalin): The following regexps are naive, but necessary since
# none of the existing Python 2.4/2.5 XML libraries support
# incremental parsing. This works well enough for our purposes.
#
# The regexps below assume that any present XML element starts at
# the beginning of the string, but there may be trailing whitespace.
# Matches an opening stream tag (e.g., '<stream:stream foo="bar">')
# (assumes that the stream XML namespace is defined in the tag).
_stream_re = re.compile(r'^(<stream:stream [^>]*>)\s*')
# Matches an empty element tag (e.g., '<foo bar="baz"/>').
_empty_element_re = re.compile(r'^(<[^>]*/>)\s*')
# Matches a non-empty element (e.g., '<foo bar="baz">quux</foo>').
# Does *not* handle nested elements.
_non_empty_element_re = re.compile(r'^(<([^ >]*)[^>]*>.*?</\2>)\s*')
# The closing tag for a stream tag. We have to insert this
# ourselves since all XML stanzas are children of the stream tag,
# which is never closed until the connection is closed.
_stream_suffix = '</stream:stream>'
def __init__(self, delegate):
self._buffer = ''
self._delegate = delegate
def FeedString(self, data):
"""Consumes the given string data, possibly feeding one or more
stanzas to the delegate.
"""
self._buffer += data
while (self._ProcessBuffer(self._stream_re, self._stream_suffix) or
self._ProcessBuffer(self._empty_element_re) or
self._ProcessBuffer(self._non_empty_element_re)):
pass
def _ProcessBuffer(self, regexp, xml_suffix=''):
"""If the buffer matches the given regexp, removes the match from
the buffer, appends the given suffix, parses it, and feeds it to
the delegate.
Returns:
Whether or not the buffer matched the given regexp.
"""
results = regexp.match(self._buffer)
if not results:
return False
xml_text = self._buffer[:results.end()] + xml_suffix
self._buffer = self._buffer[results.end():]
stanza = ParseXml(xml_text)
self._delegate.FeedStanza(stanza)
# Needed because stanza may have cycles.
stanza.unlink()
return True
class Jid(object):
"""Simple struct for an XMPP jid (essentially an e-mail address with
an optional resource string).
"""
def __init__(self, username, domain, resource=''):
self.username = username
self.domain = domain
self.resource = resource
def __str__(self):
jid_str = "%s@%s" % (self.username, self.domain)
if self.resource:
jid_str += '/' + self.resource
return jid_str
def GetBareJid(self):
return Jid(self.username, self.domain)
class IdGenerator(object):
"""Simple class to generate unique IDs for XMPP messages."""
def __init__(self, prefix):
self._prefix = prefix
self._id = 0
def GetNextId(self):
next_id = "%s.%s" % (self._prefix, self._id)
self._id += 1
return next_id
class HandshakeTask(object):
"""Class to handle the initial handshake with a connected XMPP
client.
"""
# The handshake states in order.
(_INITIAL_STREAM_NEEDED,
_AUTH_NEEDED,
_AUTH_STREAM_NEEDED,
_BIND_NEEDED,
_SESSION_NEEDED,
_FINISHED) = range(6)
# Used when in the _INITIAL_STREAM_NEEDED and _AUTH_STREAM_NEEDED
# states. Not an XML object as it's only the opening tag.
#
# The from and id attributes are filled in later.
_STREAM_DATA = (
'<stream:stream from="%s" id="%s" '
'version="1.0" xmlns:stream="http://etherx.jabber.org/streams" '
'xmlns="jabber:client">')
# Used when in the _INITIAL_STREAM_NEEDED state.
_AUTH_STANZA = ParseXml(
'<stream:features xmlns:stream="http://etherx.jabber.org/streams">'
' <mechanisms xmlns="urn:ietf:params:xml:ns:xmpp-sasl">'
' <mechanism>PLAIN</mechanism>'
' <mechanism>X-GOOGLE-TOKEN</mechanism>'
' <mechanism>X-OAUTH2</mechanism>'
' </mechanisms>'
'</stream:features>')
# Used when in the _AUTH_NEEDED state.
_AUTH_SUCCESS_STANZA = ParseXml(
'<success xmlns="urn:ietf:params:xml:ns:xmpp-sasl"/>')
# Used when in the _AUTH_NEEDED state.
_AUTH_FAILURE_STANZA = ParseXml(
'<failure xmlns="urn:ietf:params:xml:ns:xmpp-sasl"/>')
# Used when in the _AUTH_STREAM_NEEDED state.
_BIND_STANZA = ParseXml(
'<stream:features xmlns:stream="http://etherx.jabber.org/streams">'
' <bind xmlns="urn:ietf:params:xml:ns:xmpp-bind"/>'
' <session xmlns="urn:ietf:params:xml:ns:xmpp-session"/>'
'</stream:features>')
# Used when in the _BIND_NEEDED state.
#
# The id and jid attributes are filled in later.
_BIND_RESULT_STANZA = ParseXml(
'<iq id="" type="result">'
' <bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">'
' <jid/>'
' </bind>'
'</iq>')
# Used when in the _SESSION_NEEDED state.
#
# The id attribute is filled in later.
_IQ_RESPONSE_STANZA = ParseXml('<iq id="" type="result"/>')
def __init__(self, connection, resource_prefix, authenticated):
self._connection = connection
self._id_generator = IdGenerator(resource_prefix)
self._username = ''
self._domain = ''
self._jid = None
self._authenticated = authenticated
self._resource_prefix = resource_prefix
self._state = self._INITIAL_STREAM_NEEDED
def FeedStanza(self, stanza):
"""Inspects the given stanza and changes the handshake state if needed.
Called when a stanza is received from the client. Inspects the
stanza to make sure it has the expected attributes given the
current state, advances the state if needed, and sends a reply to
the client if needed.
"""
def ExpectStanza(stanza, name):
if stanza.tagName != name:
raise UnexpectedXml(stanza)
def ExpectIq(stanza, type, name):
ExpectStanza(stanza, 'iq')
if (stanza.getAttribute('type') != type or
stanza.firstChild.tagName != name):
raise UnexpectedXml(stanza)
def GetStanzaId(stanza):
return stanza.getAttribute('id')
def HandleStream(stanza):
ExpectStanza(stanza, 'stream:stream')
domain = stanza.getAttribute('to')
if domain:
self._domain = domain
SendStreamData()
def SendStreamData():
next_id = self._id_generator.GetNextId()
stream_data = self._STREAM_DATA % (self._domain, next_id)
self._connection.SendData(stream_data)
def GetUserDomain(stanza):
encoded_username_password = stanza.firstChild.data
username_password = base64.b64decode(encoded_username_password)
(_, username_domain, _) = username_password.split('\0')
# The domain may be omitted.
#
# If we were using python 2.5, we'd be able to do:
#
# username, _, domain = username_domain.partition('@')
# if not domain:
# domain = self._domain
at_pos = username_domain.find('@')
if at_pos != -1:
username = username_domain[:at_pos]
domain = username_domain[at_pos+1:]
else:
username = username_domain
domain = self._domain
return (username, domain)
def Finish():
self._state = self._FINISHED
self._connection.HandshakeDone(self._jid)
if self._state == self._INITIAL_STREAM_NEEDED:
HandleStream(stanza)
self._connection.SendStanza(self._AUTH_STANZA, False)
self._state = self._AUTH_NEEDED
elif self._state == self._AUTH_NEEDED:
ExpectStanza(stanza, 'auth')
(self._username, self._domain) = GetUserDomain(stanza)
if self._authenticated:
self._connection.SendStanza(self._AUTH_SUCCESS_STANZA, False)
self._state = self._AUTH_STREAM_NEEDED
else:
self._connection.SendStanza(self._AUTH_FAILURE_STANZA, False)
Finish()
elif self._state == self._AUTH_STREAM_NEEDED:
HandleStream(stanza)
self._connection.SendStanza(self._BIND_STANZA, False)
self._state = self._BIND_NEEDED
elif self._state == self._BIND_NEEDED:
ExpectIq(stanza, 'set', 'bind')
stanza_id = GetStanzaId(stanza)
resource_element = stanza.getElementsByTagName('resource')[0]
resource = resource_element.firstChild.data
full_resource = '%s.%s' % (self._resource_prefix, resource)
response = CloneXml(self._BIND_RESULT_STANZA)
response.setAttribute('id', stanza_id)
self._jid = Jid(self._username, self._domain, full_resource)
jid_text = response.parentNode.createTextNode(str(self._jid))
response.getElementsByTagName('jid')[0].appendChild(jid_text)
self._connection.SendStanza(response)
self._state = self._SESSION_NEEDED
elif self._state == self._SESSION_NEEDED:
ExpectIq(stanza, 'set', 'session')
stanza_id = GetStanzaId(stanza)
xml = CloneXml(self._IQ_RESPONSE_STANZA)
xml.setAttribute('id', stanza_id)
self._connection.SendStanza(xml)
Finish()
def AddrString(addr):
return '%s:%d' % addr
class XmppConnection(asynchat.async_chat):
"""A single XMPP client connection.
This class handles the connection to a single XMPP client (via a
socket). It does the XMPP handshake and also implements the (old)
Google notification protocol.
"""
# Used for acknowledgements to the client.
#
# The from and id attributes are filled in later.
_IQ_RESPONSE_STANZA = ParseXml('<iq from="" id="" type="result"/>')
def __init__(self, sock, socket_map, delegate, addr, authenticated):
"""Starts up the xmpp connection.
Args:
sock: The socket to the client.
socket_map: A map from sockets to their owning objects.
delegate: The delegate, which is notified when the XMPP
handshake is successful, when the connection is closed, and
when a notification has to be broadcast.
addr: The host/port of the client.
"""
# We do this because in versions of python < 2.6,
# async_chat.__init__ doesn't take a map argument nor pass it to
# dispatcher.__init__. We rely on the fact that
# async_chat.__init__ calls dispatcher.__init__ as the last thing
# it does, and that calling dispatcher.__init__ with socket=None
# and map=None is essentially a no-op.
asynchat.async_chat.__init__(self)
asyncore.dispatcher.__init__(self, sock, socket_map)
self.set_terminator(None)
self._delegate = delegate
self._parser = StanzaParser(self)
self._jid = None
self._addr = addr
addr_str = AddrString(self._addr)
self._handshake_task = HandshakeTask(self, addr_str, authenticated)
print 'Starting connection to %s' % self
def __str__(self):
if self._jid:
return str(self._jid)
else:
return AddrString(self._addr)
# async_chat implementation.
def collect_incoming_data(self, data):
self._parser.FeedString(data)
# This is only here to make pychecker happy.
def found_terminator(self):
asynchat.async_chat.found_terminator(self)
def close(self):
print "Closing connection to %s" % self
self._delegate.OnXmppConnectionClosed(self)
asynchat.async_chat.close(self)
# Called by self._parser.FeedString().
def FeedStanza(self, stanza):
if self._handshake_task:
self._handshake_task.FeedStanza(stanza)
elif stanza.tagName == 'iq' and stanza.getAttribute('type') == 'result':
# Ignore all client acks.
pass
elif (stanza.firstChild and
stanza.firstChild.namespaceURI == 'google:push'):
self._HandlePushCommand(stanza)
else:
raise UnexpectedXml(stanza)
# Called by self._handshake_task.
def HandshakeDone(self, jid):
if jid:
self._jid = jid
self._handshake_task = None
self._delegate.OnXmppHandshakeDone(self)
print "Handshake done for %s" % self
else:
print "Handshake failed for %s" % self
self.close()
def _HandlePushCommand(self, stanza):
if stanza.tagName == 'iq' and stanza.firstChild.tagName == 'subscribe':
# Subscription request.
self._SendIqResponseStanza(stanza)
elif stanza.tagName == 'message' and stanza.firstChild.tagName == 'push':
# Send notification request.
self._delegate.ForwardNotification(self, stanza)
else:
raise UnexpectedXml(command_xml)
def _SendIqResponseStanza(self, iq):
stanza = CloneXml(self._IQ_RESPONSE_STANZA)
stanza.setAttribute('from', str(self._jid.GetBareJid()))
stanza.setAttribute('id', iq.getAttribute('id'))
self.SendStanza(stanza)
def SendStanza(self, stanza, unlink=True):
"""Sends a stanza to the client.
Args:
stanza: The stanza to send.
unlink: Whether to unlink stanza after sending it. (Pass in
False if stanza is a constant.)
"""
self.SendData(stanza.toxml())
if unlink:
stanza.unlink()
def SendData(self, data):
"""Sends raw data to the client.
"""
# We explicitly encode to ascii as that is what the client expects
# (some minidom library functions return unicode strings).
self.push(data.encode('ascii'))
def ForwardNotification(self, notification_stanza):
"""Forwards a notification to the client."""
notification_stanza.setAttribute('from', str(self._jid.GetBareJid()))
notification_stanza.setAttribute('to', str(self._jid))
self.SendStanza(notification_stanza, False)
class XmppServer(asyncore.dispatcher):
"""The main XMPP server class.
The XMPP server starts accepting connections on the given address
and spawns off XmppConnection objects for each one.
Use like so:
socket_map = {}
xmpp_server = xmppserver.XmppServer(socket_map, ('127.0.0.1', 5222))
asyncore.loop(30.0, False, socket_map)
"""
# Used when sending a notification.
_NOTIFICATION_STANZA = ParseXml(
'<message>'
' <push xmlns="google:push">'
' <data/>'
' </push>'
'</message>')
def __init__(self, socket_map, addr):
asyncore.dispatcher.__init__(self, None, socket_map)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind(addr)
self.listen(5)
self._socket_map = socket_map
self._connections = set()
self._handshake_done_connections = set()
self._notifications_enabled = True
self._authenticated = True
def handle_accept(self):
(sock, addr) = self.accept()
xmpp_connection = XmppConnection(
sock, self._socket_map, self, addr, self._authenticated)
self._connections.add(xmpp_connection)
# Return the new XmppConnection for testing.
return xmpp_connection
def close(self):
# A copy is necessary since calling close on each connection
# removes it from self._connections.
for connection in self._connections.copy():
connection.close()
asyncore.dispatcher.close(self)
def EnableNotifications(self):
self._notifications_enabled = True
def DisableNotifications(self):
self._notifications_enabled = False
def MakeNotification(self, channel, data):
"""Makes a notification from the given channel and encoded data.
Args:
channel: The channel on which to send the notification.
data: The notification payload.
"""
notification_stanza = CloneXml(self._NOTIFICATION_STANZA)
push_element = notification_stanza.getElementsByTagName('push')[0]
push_element.setAttribute('channel', channel)
data_element = push_element.getElementsByTagName('data')[0]
encoded_data = base64.b64encode(data)
data_text = notification_stanza.parentNode.createTextNode(encoded_data)
data_element.appendChild(data_text)
return notification_stanza
def SendNotification(self, channel, data):
"""Sends a notification to all connections.
Args:
channel: The channel on which to send the notification.
data: The notification payload.
"""
notification_stanza = self.MakeNotification(channel, data)
self.ForwardNotification(None, notification_stanza)
notification_stanza.unlink()
def SetAuthenticated(self, auth_valid):
self._authenticated = auth_valid
def GetAuthenticated(self):
return self._authenticated
# XmppConnection delegate methods.
def OnXmppHandshakeDone(self, xmpp_connection):
self._handshake_done_connections.add(xmpp_connection)
def OnXmppConnectionClosed(self, xmpp_connection):
self._connections.discard(xmpp_connection)
self._handshake_done_connections.discard(xmpp_connection)
def ForwardNotification(self, unused_xmpp_connection, notification_stanza):
if self._notifications_enabled:
for connection in self._handshake_done_connections:
print 'Sending notification to %s' % connection
connection.ForwardNotification(notification_stanza)
else:
print 'Notifications disabled; dropping notification'
|
bsd-3-clause
|
ConnorDFlynn/Group1PySpider
|
tests/data_test_webpage.py
|
67
|
1106
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2015-01-24 13:44:10
from httpbin import app
@app.route('/pyspider/test.html')
def test_page():
return '''
<a href="/404">404
<a href="/links/10/0">0
<a href="/links/10/1">1
<a href="/links/10/2">2
<a href="/links/10/3">3
<a href="/links/10/4">4
<a href="/gzip">gzip
<a href="/get">get
<a href="/deflate">deflate
<a href="/html">html
<a href="/xml">xml
<a href="/robots.txt">robots
<a href="/cache">cache
<a href="/stream/20">stream
'''
@app.route('/pyspider/ajax.html')
def test_ajax():
return '''
<div class=status>loading...</div>
<div class=ua></div>
<div class=ip></div>
<script>
var xhr = new XMLHttpRequest();
xhr.onload = function() {
var data = JSON.parse(xhr.responseText);
document.querySelector('.status').innerHTML = 'done';
document.querySelector('.ua').innerHTML = data.headers['User-Agent'];
document.querySelector('.ip').innerHTML = data.origin;
}
xhr.open("get", "/get", true);
xhr.send();
</script>
'''
|
apache-2.0
|
a-b/PopClip-Extensions
|
source/InstantTranslate/main.py
|
5
|
1603
|
# coding=utf-8
LANG_CODES = {
"Arabic": "ar",
"Bosnian (Latin)": "bs-Latn",
"Bulgarian": "bg",
"Catalan": "ca",
"Chinese Simplified": "zh-CHS",
"Chinese Traditional": "zh-CHT",
"Croatian": "hr",
"Czech": "cs",
"Danish": "da",
"Dutch": "nl",
"English": "en",
"Estonian": "et",
"Finnish": "fi",
"French": "fr",
"German": "de",
"Greek": "el",
"Haitian Creole": "ht",
"Hebrew": "he",
"Hindi": "hi",
"Hmong Daw": "mww",
"Hungarian": "hu",
"Indonesian": "id",
"Italian": "it",
"Japanese": "ja",
"Klingon": "tlh",
"Klingon (pIqaD)": "tlh-Qaak",
"Korean": "ko",
"Latvian": "lv",
"Lithuanian": "lt",
"Malay": "ms",
"Maltese": "mt",
"Norwegian": "no",
"Persian": "fa",
"Polish": "pl",
"Portuguese": "pt",
"Querétaro Otomi": "otq",
"Romanian": "ro",
"Russian": "ru",
"Serbian (Cyrillic)": "sr-Cyrl",
"Serbian (Latin)": "sr-Latn",
"Slovak": "sk",
"Slovenian": "sl",
"Spanish": "es",
"Swedish": "sv",
"Thai": "th",
"Turkish": "tr",
"Ukrainian": "uk",
"Urdu": "ur",
"Vietnamese": "vi",
"Welsh": "cy",
"Yucatec Maya": "yua"
}
import os, access, mstrans
c = access.get_credentials()
translator = mstrans.Translator(client_id=c[0], client_secret=c[1])
translation = translator.translate_text(text=os.environ['POPCLIP_TEXT'],
from_lang='',
to_lang=LANG_CODES[os.environ['POPCLIP_OPTION_DESTLANG']])
print translation.encode('utf-8')
|
mit
|
Akshay0724/scikit-learn
|
examples/text/hashing_vs_dict_vectorizer.py
|
93
|
3243
|
"""
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
|
bsd-3-clause
|
jaloren/robotframework
|
utest/reporting/test_stringcache.py
|
9
|
2439
|
import time
import random
import string
import unittest
from robot.reporting.stringcache import StringCache, StringIndex
from robot.utils.asserts import assert_equal, assert_true, assert_false
try:
long
except NameError:
long = int
class TestStringCache(unittest.TestCase):
def setUp(self):
# To make test reproducable log the random seed if test fails
self._seed = long(time.time() * 256)
random.seed(self._seed)
self.cache = StringCache()
def _verify_text(self, string, expected):
self.cache.add(string)
assert_equal(('*', expected), self.cache.dump())
def _compress(self, text):
return self.cache._encode(text)
def test_short_test_is_not_compressed(self):
self._verify_text('short', '*short')
def test_long_test_is_compressed(self):
long_string = 'long'*1000
self._verify_text(long_string, self._compress(long_string))
def test_coded_string_is_at_most_1_characters_longer_than_raw(self):
for i in range(300):
id = self.cache.add(self._generate_random_string(i))
assert_true(i+1 >= len(self.cache.dump()[id]),
'len(self._text_cache.dump()[id]) (%s) > i+1 (%s) [test seed = %s]'
% (len(self.cache.dump()[id]), i+1, self._seed))
def test_long_random_strings_are_compressed(self):
for i in range(30):
value = self._generate_random_string(300)
id = self.cache.add(value)
assert_equal(self._compress(value), self.cache.dump()[id],
msg='Did not compress [test seed = %s]' % self._seed)
def _generate_random_string(self, length):
return ''.join(random.choice(string.digits) for _ in range(length))
def test_indices_reused_instances(self):
strings = ['', 'short', 'long'*1000, '']
indices1 = [self.cache.add(s) for s in strings]
indices2 = [self.cache.add(s) for s in strings]
for i1, i2 in zip(indices1, indices2):
assert_true(i1 is i2, 'not same: %s and %s' % (i1, i2))
class TestStringIndex(unittest.TestCase):
def test_to_string(self):
value = StringIndex(42)
assert_equal(str(value), '42')
def test_truth(self):
assert_true(StringIndex(1))
assert_true(StringIndex(-42))
assert_false(StringIndex(0))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
rahushen/ansible
|
lib/ansible/modules/network/aci/aci_interface_selector_to_switch_policy_leaf_profile.py
|
26
|
7567
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_interface_selector_to_switch_policy_leaf_profile
short_description: Bind interface selector profiles to switch policy leaf profiles (infra:RsAccPortP)
description:
- Bind interface selector profiles to switch policy leaf profiles on Cisco ACI fabrics.
notes:
- This module requires an existing leaf profile, the module M(aci_switch_policy_leaf_profile) can be used for this.
- More information about the internal APIC class B(infra:RsAccPortP) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
options:
leaf_profile:
description:
- Name of the Leaf Profile to which we add a Selector.
aliases: [ leaf_profile_name ]
interface_selector:
description:
- Name of Interface Profile Selector to be added and associated with the Leaf Profile.
aliases: [ name, interface_selector_name, interface_profile_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Associating an interface selector profile to a switch policy leaf profile
aci_interface_selector_to_switch_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
interface_selector: interface_profile_name
state: present
- name: Remove an interface selector profile associated with a switch policy leaf profile
aci_interface_selector_to_switch_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
interface_selector: interface_profile_name
state: absent
- name: Query an interface selector profile associated with a switch policy leaf profile
aci_interface_selector_to_switch_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
interface_selector: interface_profile_name
state: query
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
leaf_profile=dict(type='str', aliases=['leaf_profile_name']), # Not required for querying all objects
interface_selector=dict(type='str', aliases=['interface_profile_name', 'interface_selector_name', 'name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query'])
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['leaf_profile', 'interface_selector']],
['state', 'present', ['leaf_profile', 'interface_selector']]
],
)
leaf_profile = module.params['leaf_profile']
# WARNING: interface_selector accepts non existing interface_profile names and they appear on APIC gui with a state of "missing-target"
interface_selector = module.params['interface_selector']
state = module.params['state']
# Defining the interface profile tDn for clarity
interface_selector_tDn = 'uni/infra/accportprof-{0}'.format(interface_selector)
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraNodeP',
aci_rn='infra/nprof-{0}'.format(leaf_profile),
filter_target='eq(infraNodeP.name, "{0}")'.format(leaf_profile),
module_object=leaf_profile
),
subclass_1=dict(
aci_class='infraRsAccPortP',
aci_rn='rsaccPortP-[{0}]'.format(interface_selector_tDn),
filter_target='eq(infraRsAccPortP.name, "{0}")'.format(interface_selector),
module_object=interface_selector,
)
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraRsAccPortP',
class_config=dict(tDn=interface_selector_tDn),
)
aci.get_diff(aci_class='infraRsAccPortP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
gpl-3.0
|
ryfeus/lambda-packs
|
Sklearn_scipy_numpy/source/sklearn/svm/base.py
|
6
|
36145
|
from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import warnings
from abc import ABCMeta, abstractmethod
from . import libsvm, liblinear
from . import libsvm_sparse
from ..base import BaseEstimator, ClassifierMixin, ChangedBehaviorWarning
from ..preprocessing import LabelEncoder
from ..multiclass import _ovr_decision_function
from ..utils import check_array, check_random_state, column_or_1d
from ..utils import ConvergenceWarning, compute_class_weight, deprecated
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted, NotFittedError
from ..utils.multiclass import check_classification_targets
from ..externals import six
LIBSVM_IMPL = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1)
+ safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for estimators that use libsvm as backing library
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0,
tol, C, nu, epsilon, shrinking, probability, cache_size,
class_weight, verbose, max_iter, random_state):
if impl not in LIBSVM_IMPL: # pragma: no cover
raise ValueError("impl should be one of %s, %s was given" % (
LIBSVM_IMPL, impl))
# FIXME Remove gamma=0.0 support in 0.18
if gamma == 0:
msg = ("gamma=%s has been deprecated in favor of "
"gamma='%s' as of 0.17. Backward compatibility"
" for gamma=%s will be removed in %s")
invalid_gamma = 0.0
warnings.warn(msg % (invalid_gamma, "auto", invalid_gamma, "0.18"),
DeprecationWarning)
self._impl = impl
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
@property
def _pairwise(self):
# Used by cross_val_score.
kernel = self.kernel
return kernel == "precomputed" or callable(kernel)
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like, shape (n_samples,)
Target values (class labels in classification, real numbers in
regression)
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.isspmatrix(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
X = check_array(X, accept_sparse='csr', dtype=np.float64, order='C')
y = self._validate_targets(y)
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
if solver_type != 2 and X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"X has %s samples, but y has %s." %
(X.shape[0], y.shape[0]))
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != X.shape[0]:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape))
# FIXME remove (self.gamma == 0) in 0.18
if (self.kernel in ['poly', 'rbf']) and ((self.gamma == 0) or
(self.gamma == 'auto')):
# if custom gamma is not provided ...
self._gamma = 1.0 / X.shape[1]
elif self.gamma == 'auto':
self._gamma = 0.0
else:
self._gamma = self.gamma
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose: # pragma: no cover
print('[LibSVM]', end='')
seed = rnd.randint(np.iinfo('i').max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_ internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
# XXX this is ugly.
# Regression models should not have a class_weight_ attribute.
self.class_weight_ = np.empty(0)
return column_or_1d(y, warn=True).astype(np.float64)
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn('Solver terminated early (max_iter=%i).'
' Consider pre-processing your data with'
' StandardScaler or MinMaxScaler.'
% self.max_iter, ConvergenceWarning)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
self.support_, self.support_vectors_, self.n_support_, \
self.dual_coef_, self.intercept_, self.probA_, \
self.probB_, self.fit_status_ = libsvm.fit(
X, y,
svm_type=solver_type, sample_weight=sample_weight,
class_weight=self.class_weight_, kernel=kernel, C=self.C,
nu=self.nu, probability=self.probability, degree=self.degree,
shrinking=self.shrinking, tol=self.tol,
cache_size=self.cache_size, coef0=self.coef0,
gamma=self._gamma, epsilon=self.epsilon,
max_iter=self.max_iter, random_seed=random_seed)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
self.support_, self.support_vectors_, dual_coef_data, \
self.intercept_, self.n_support_, \
self.probA_, self.probB_, self.fit_status_ = \
libsvm_sparse.libsvm_sparse_train(
X.shape[1], X.data, X.indices, X.indptr, y, solver_type,
kernel_type, self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
sample_weight, self.nu, self.cache_size, self.epsilon,
int(self.shrinking), int(self.probability), self.max_iter,
random_seed)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,
dual_coef_indices.size / n_class)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr),
(n_class, n_SV))
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : array, shape (n_samples,)
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
n_samples, n_features = X.shape
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order='C')
kernel = self.kernel
if callable(self.kernel):
kernel = 'precomputed'
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_, svm_type=svm_type, kernel=kernel,
degree=self.degree, coef0=self.coef0, gamma=self._gamma,
cache_size=self.cache_size)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order='C')
return X
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train].
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
return libsvm.decision_function(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel, degree=self.degree, cache_size=self.cache_size,
coef0=self.coef0, gamma=self._gamma)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if hasattr(kernel, '__call__'):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _validate_for_predict(self, X):
check_is_fitted(self, 'support_')
X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C")
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
n_samples, n_features = X.shape
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
elif n_features != self.shape_fit_[1]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time" %
(n_features, self.shape_fit_[1]))
return X
@property
def coef_(self):
if self.kernel != 'linear':
raise ValueError('coef_ is only available when using a '
'linear kernel')
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
class BaseSVC(six.with_metaclass(ABCMeta, BaseLibSVM, ClassifierMixin)):
"""ABC for LibSVM-based classifiers."""
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0, tol, C, nu,
shrinking, probability, cache_size, class_weight, verbose,
max_iter, decision_function_shape, random_state):
self.decision_function_shape = decision_function_shape
super(BaseSVC, self).__init__(
impl=impl, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
random_state=random_state)
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
check_classification_targets(y)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, cls, y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d"
% len(cls))
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order='C')
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes)
"""
dec = self._decision_function(X)
if self.decision_function_shape is None and len(self.classes_) > 2:
warnings.warn("The decision_function_shape default value will "
"change from 'ovo' to 'ovr' in 0.18. This will change "
"the shape of the decision function returned by "
"SVC.", ChangedBehaviorWarning)
if self.decision_function_shape == 'ovr' and len(self.classes_) > 2:
return _ovr_decision_function(dec < 0, dec, len(self.classes_))
return dec
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(BaseSVC, self).predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability:
raise AttributeError("predict_proba is not available when "
" probability=False")
if self._impl not in ('c_svc', 'nu_svc'):
raise AttributeError("predict_proba only implemented for SVC"
" and NuSVC")
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
X = self._validate_for_predict(X)
if self.probA_.size == 0 or self.probB_.size == 0:
raise NotFittedError("predict_proba is not available when fitted "
"with probability=False")
pred_proba = (self._sparse_predict_proba
if self._sparse else self._dense_predict_proba)
return pred_proba(X)
@property
def predict_log_proba(self):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=svm_type, kernel=kernel, degree=self.degree,
cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _get_coef(self):
if self.dual_coef_.shape[0] == 1:
# binary classifier
coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
else:
# 1vs1 classifier
coef = _one_vs_one_coef(self.dual_coef_, self.n_support_,
self.support_vectors_)
if sp.issparse(coef[0]):
coef = sp.vstack(coef).tocsr()
else:
coef = np.vstack(coef)
return coef
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss functin,
# level3: wether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
'logistic_regression': {
'l1': {False: 6},
'l2': {False: 0, True: 7}},
'hinge': {
'l2': {True: 3}},
'squared_hinge': {
'l1': {False: 5},
'l2': {False: 2, True: 1}},
'epsilon_insensitive': {
'l2': {True: 13}},
'squared_epsilon_insensitive': {
'l2': {False: 11, True: 12}},
'crammer_singer': 4
}
if multi_class == 'crammer_singer':
return _solver_type_dict[multi_class]
elif multi_class != 'ovr':
raise ValueError("`multi_class` must be one of `ovr`, "
"`crammer_singer`, got %r" % multi_class)
# FIXME loss.lower() --> loss in 0.18
_solver_pen = _solver_type_dict.get(loss.lower(), None)
if _solver_pen is None:
error_string = ("loss='%s' is not supported" % loss)
else:
# FIME penalty.lower() --> penalty in 0.18
_solver_dual = _solver_pen.get(penalty.lower(), None)
if _solver_dual is None:
error_string = ("The combination of penalty='%s' "
"and loss='%s' is not supported"
% (penalty, loss))
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = ("The combination of penalty='%s' and "
"loss='%s' are not supported when dual=%s"
% (penalty, loss, dual))
else:
return solver_num
raise ValueError('Unsupported set of arguments: %s, '
'Parameters: penalty=%r, loss=%r, dual=%r'
% (error_string, penalty, loss, dual))
def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
random_state=None, multi_class='ovr',
loss='logistic_regression', epsilon=0.1):
"""Used by Logistic Regression (and CV) and LinearSVC.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. Lower the C, the more
the penalization.
fit_intercept : bool
Whether or not to fit the intercept, that is to add a intercept
term to the decision function.
intercept_scaling : float
LibLinear internally penalizes the intercept and this term is subject
to regularization just like the other terms of the feature vector.
In order to avoid this, one should increase the intercept_scaling.
such that the feature vector becomes [x, intercept_scaling].
class_weight : {dict, 'balanced'}, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : str, {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
multi_class : str, {'ovr', 'crammer_singer'}
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
'epsilon_insensitive', 'squared_epsilon_insensitive}
The loss function used to fit the model.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
Returns
-------
coef_ : ndarray, shape (n_features, n_features + 1)
The coefficent vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : int
Maximum number of iterations run across all classes.
"""
# FIXME Remove case insensitivity in 0.18 ---------------------
loss_l, penalty_l = loss.lower(), penalty.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
if (not loss.islower()) and loss_l not in ('l1', 'l2'):
warnings.warn(msg % (loss, loss_l, "0.18"),
DeprecationWarning)
if not penalty.islower():
warnings.warn(msg.replace("loss", "penalty")
% (penalty, penalty_l, "0.18"),
DeprecationWarning)
# -------------------------------------------------------------
# FIXME loss_l --> loss in 0.18
if loss_l not in ['epsilon_insensitive', 'squared_epsilon_insensitive']:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
class_weight_ = compute_class_weight(class_weight, classes_, y)
else:
class_weight_ = np.empty(0, dtype=np.float)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print('[LibLinear]', end='')
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError("Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C,
class_weight_, max_iter, rnd.randint(np.iinfo('i').max),
epsilon)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_ = max(n_iter_)
if n_iter_ >= max_iter and verbose > 0:
warnings.warn("Liblinear failed to converge, increase "
"the number of iterations.", ConvergenceWarning)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.
return coef_, intercept_, n_iter_
|
mit
|
IFAEControl/pirelay
|
pirelay/server.py
|
1
|
1591
|
#!/usr/bin/env python3
import time
from concurrent import futures
import grpc
from .protos import pirelay_pb2
from .protos import pirelay_pb2_grpc
from .relay import RelaysArray
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
PINS = [21]
class PiRelayServer(pirelay_pb2_grpc.PiRelayServicer):
def __init__(self, bcm_pins=[]):
self._relays = RelaysArray(bcm_pins=bcm_pins)
def Enable(self, request, context):
try:
self._relays.enable(request.channel)
except Exception as ex:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Error,
message=str(ex))
else:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Ok,
message="")
def Disable(self, request, context):
try:
self._relays.disable(request.channel)
except Exception as ex:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Error,
message=str(ex))
else:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Ok,
message="")
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
pirelay_pb2_grpc.add_PiRelayServicer_to_server(PiRelayServer(PINS), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
|
lgpl-3.0
|
mwilliamson/python-mammoth
|
tests/docx/style_map_tests.py
|
1
|
4495
|
import io
from zipfile import ZipFile
from nose.tools import istest, assert_equal
from mammoth.docx.style_map import write_style_map, read_style_map
from mammoth.zips import open_zip
from mammoth.docx import xmlparser as xml
@istest
def reading_embedded_style_map_on_document_without_embedded_style_map_returns_none():
fileobj = _normal_docx()
assert_equal(None, read_style_map(fileobj))
@istest
def writing_style_map_preserves_unrelated_files():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
with open_zip(fileobj, "r") as zip_file:
assert_equal("placeholder", zip_file.read_str("placeholder"))
@istest
def embedded_style_map_can_be_read_after_being_written():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
assert_equal("p => h1", read_style_map(fileobj))
@istest
def embedded_style_map_is_written_to_separate_file():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
with open_zip(fileobj, "r") as zip_file:
assert_equal("p => h1", zip_file.read_str("mammoth/style-map"))
@istest
def embedded_style_map_is_referenced_in_relationships():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
assert_equal(expected_relationships_xml, _read_relationships_xml(fileobj))
@istest
def embedded_style_map_has_override_content_type_in_content_types_xml():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
assert_equal(expected_content_types_xml, _read_content_types_xml(fileobj))
@istest
def can_overwrite_existing_style_map():
fileobj = _normal_docx()
write_style_map(fileobj, "p => h1")
write_style_map(fileobj, "p => h2")
with open_zip(fileobj, "r") as zip_file:
assert_equal("p => h2", read_style_map(fileobj))
_assert_no_duplicates(zip_file._zip_file.namelist())
assert_equal(expected_relationships_xml, _read_relationships_xml(fileobj))
assert_equal(expected_content_types_xml, _read_content_types_xml(fileobj))
def _read_relationships_xml(fileobj):
with open_zip(fileobj, "r") as zip_file:
return xml.parse_xml(
io.StringIO(zip_file.read_str("word/_rels/document.xml.rels")),
[("r", "http://schemas.openxmlformats.org/package/2006/relationships")],
)
def _read_content_types_xml(fileobj):
with open_zip(fileobj, "r") as zip_file:
return xml.parse_xml(
io.StringIO(zip_file.read_str("[Content_Types].xml")),
[("ct", "http://schemas.openxmlformats.org/package/2006/content-types")],
)
original_relationships_xml = ('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' +
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">' +
'<Relationship Id="rId3" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/settings" Target="settings.xml"/>' +
'</Relationships>')
expected_relationships_xml = xml.element("r:Relationships", {}, [
xml.element("r:Relationship", {"Id": "rId3", "Type": "http://schemas.openxmlformats.org/officeDocument/2006/relationships/settings", "Target": "settings.xml"}),
xml.element("r:Relationship", {"Id": "rMammothStyleMap", "Type": "http://schemas.zwobble.org/mammoth/style-map", "Target": "/mammoth/style-map"}),
])
original_content_types_xml = ('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>' +
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/content-types">' +
'<Default Extension="png" ContentType="image/png"/>' +
'</Types>'
)
expected_content_types_xml = xml.element("ct:Types", {}, [
xml.element("ct:Default", {"Extension": "png", "ContentType": "image/png"}),
xml.element("ct:Override", {"PartName": "/mammoth/style-map", "ContentType": "text/prs.mammoth.style-map"}),
])
def _normal_docx():
fileobj = io.BytesIO()
zip_file = ZipFile(fileobj, "w")
try:
zip_file.writestr("placeholder", "placeholder")
zip_file.writestr("word/_rels/document.xml.rels", original_relationships_xml)
zip_file.writestr("[Content_Types].xml", original_content_types_xml)
expected_relationships_xml
finally:
zip_file.close()
return fileobj
def _assert_no_duplicates(values):
counts = {}
for value in values:
counts[value] = counts.get(value, 0) + 1
for value, count in counts.items():
if count != 1:
assert False, "{0} has count of {1}".format(value, count)
|
bsd-2-clause
|
bjoshua/ansible
|
lib/ansible/plugins/cache/jsonfile.py
|
47
|
4628
|
# (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import errno
import codecs
try:
import simplejson as json
except ImportError:
import json
from ansible import constants as C
from ansible.errors import *
from ansible.parsing.utils.jsonify import jsonify
from ansible.plugins.cache.base import BaseCacheModule
class CacheModule(BaseCacheModule):
"""
A caching module backed by json files.
"""
def __init__(self, *args, **kwargs):
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._cache = {}
self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path
if not self._cache_dir:
raise AnsibleError("error, fact_caching_connection is not set, cannot use fact cache")
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except (OSError,IOError), e:
self._display.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
return None
def get(self, key):
if key in self._cache:
return self._cache.get(key)
if self.has_expired(key):
raise KeyError
cachefile = "%s/%s" % (self._cache_dir, key)
try:
f = codecs.open(cachefile, 'r', encoding='utf-8')
except (OSError,IOError), e:
self._display.warning("error while trying to read %s : %s" % (cachefile, str(e)))
pass
else:
try:
value = json.load(f)
self._cache[key] = value
return value
except ValueError:
self._display.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
raise KeyError
finally:
f.close()
def set(self, key, value):
self._cache[key] = value
cachefile = "%s/%s" % (self._cache_dir, key)
try:
f = codecs.open(cachefile, 'w', encoding='utf-8')
except (OSError,IOError), e:
self._display.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
pass
else:
f.write(jsonify(value))
finally:
f.close()
def has_expired(self, key):
cachefile = "%s/%s" % (self._cache_dir, key)
try:
st = os.stat(cachefile)
except (OSError,IOError), e:
if e.errno == errno.ENOENT:
return False
else:
self._display.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
pass
if time.time() - st.st_mtime <= self._timeout:
return False
if key in self._cache:
del self._cache[key]
return True
def keys(self):
keys = []
for k in os.listdir(self._cache_dir):
if not (k.startswith('.') or self.has_expired(k)):
keys.append(k)
return keys
def contains(self, key):
cachefile = "%s/%s" % (self._cache_dir, key)
if key in self._cache:
return True
if self.has_expired(key):
return False
try:
st = os.stat(cachefile)
return True
except (OSError,IOError), e:
if e.errno == errno.ENOENT:
return False
else:
self._display.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
pass
def delete(self, key):
del self._cache[key]
try:
os.remove("%s/%s" % (self._cache_dir, key))
except (OSError,IOError), e:
pass #TODO: only pass on non existing?
def flush(self):
self._cache = {}
for key in self.keys():
self.delete(key)
def copy(self):
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
|
gpl-3.0
|
Jgarcia-IAS/localizacion
|
openerp/addons/mail/mail_group_menu.py
|
334
|
2631
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv
from openerp.osv import fields
class ir_ui_menu(osv.osv):
""" Override of ir.ui.menu class. When adding mail_thread module, each
new mail.group will create a menu entry. This overrides checks that
the current user is in the mail.group followers. If not, the menu
entry is taken off the list of menu ids. This way the user will see
menu entries for the mail.group he is following.
"""
_inherit = 'ir.ui.menu'
_columns = {
'mail_group_id': fields.many2one('mail.group', 'Mail Group')
}
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
""" Remove mail.group menu entries when the user is not a follower."""
ids = super(ir_ui_menu, self).search(cr, uid, args, offset=offset,
limit=limit, order=order,
context=context, count=False)
if ids:
cr.execute("""
SELECT id FROM ir_ui_menu m
WHERE m.mail_group_id IS NULL OR EXISTS (
SELECT 1 FROM mail_followers
WHERE res_model = 'mail.group' AND res_id = m.mail_group_id
AND partner_id = (SELECT partner_id FROM res_users WHERE id = %s)
) AND id in %s
""", (uid, tuple(ids)))
# Preserve original search order
visible_ids = set(x[0] for x in cr.fetchall())
ids = [i for i in ids if i in visible_ids]
if count:
return len(ids)
return ids
|
agpl-3.0
|
atsao72/sympy
|
sympy/combinatorics/prufer.py
|
93
|
11915
|
from __future__ import print_function, division
from sympy.core import Basic
from sympy.core.compatibility import iterable, as_int, range
from sympy.utilities.iterables import flatten
from collections import defaultdict
class Prufer(Basic):
"""
The Prufer correspondence is an algorithm that describes the
bijection between labeled trees and the Prufer code. A Prufer
code of a labeled tree is unique up to isomorphism and has
a length of n - 2.
Prufer sequences were first used by Heinz Prufer to give a
proof of Cayley's formula.
References
==========
.. [1] http://mathworld.wolfram.com/LabeledTree.html
"""
_prufer_repr = None
_tree_repr = None
_nodes = None
_rank = None
@property
def prufer_repr(self):
"""Returns Prufer sequence for the Prufer object.
This sequence is found by removing the highest numbered vertex,
recording the node it was attached to, and continuing until only
two vertices remain. The Prufer sequence is the list of recorded nodes.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).prufer_repr
[3, 3, 3, 4]
>>> Prufer([1, 0, 0]).prufer_repr
[1, 0, 0]
See Also
========
to_prufer
"""
if self._prufer_repr is None:
self._prufer_repr = self.to_prufer(self._tree_repr[:], self.nodes)
return self._prufer_repr
@property
def tree_repr(self):
"""Returns the tree representation of the Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).tree_repr
[[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]
>>> Prufer([1, 0, 0]).tree_repr
[[1, 2], [0, 1], [0, 3], [0, 4]]
See Also
========
to_tree
"""
if self._tree_repr is None:
self._tree_repr = self.to_tree(self._prufer_repr[:])
return self._tree_repr
@property
def nodes(self):
"""Returns the number of nodes in the tree.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]]).nodes
6
>>> Prufer([1, 0, 0]).nodes
5
"""
return self._nodes
@property
def rank(self):
"""Returns the rank of the Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> p = Prufer([[0, 3], [1, 3], [2, 3], [3, 4], [4, 5]])
>>> p.rank
778
>>> p.next(1).rank
779
>>> p.prev().rank
777
See Also
========
prufer_rank, next, prev, size
"""
if self._rank is None:
self._rank = self.prufer_rank()
return self._rank
@property
def size(self):
"""Return the number of possible trees of this Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer([0]*4).size == Prufer([6]*4).size == 1296
True
See Also
========
prufer_rank, rank, next, prev
"""
return self.prev(self.rank).prev().rank + 1
@staticmethod
def to_prufer(tree, n):
"""Return the Prufer sequence for a tree given as a list of edges where
``n`` is the number of nodes in the tree.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_repr
[0, 0]
>>> Prufer.to_prufer([[0, 1], [0, 2], [0, 3]], 4)
[0, 0]
See Also
========
prufer_repr: returns Prufer sequence of a Prufer object.
"""
d = defaultdict(int)
L = []
for edge in tree:
# Increment the value of the corresponding
# node in the degree list as we encounter an
# edge involving it.
d[edge[0]] += 1
d[edge[1]] += 1
for i in range(n - 2):
# find the smallest leaf
for x in range(n):
if d[x] == 1:
break
# find the node it was connected to
y = None
for edge in tree:
if x == edge[0]:
y = edge[1]
elif x == edge[1]:
y = edge[0]
if y is not None:
break
# record and update
L.append(y)
for j in (x, y):
d[j] -= 1
if not d[j]:
d.pop(j)
tree.remove(edge)
return L
@staticmethod
def to_tree(prufer):
"""Return the tree (as a list of edges) of the given Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([0, 2], 4)
>>> a.tree_repr
[[0, 1], [0, 2], [2, 3]]
>>> Prufer.to_tree([0, 2])
[[0, 1], [0, 2], [2, 3]]
References
==========
- https://hamberg.no/erlend/posts/2010-11-06-prufer-sequence-compact-tree-representation.html
See Also
========
tree_repr: returns tree representation of a Prufer object.
"""
tree = []
last = []
n = len(prufer) + 2
d = defaultdict(lambda: 1)
for p in prufer:
d[p] += 1
for i in prufer:
for j in range(n):
# find the smallest leaf (degree = 1)
if d[j] == 1:
break
# (i, j) is the new edge that we append to the tree
# and remove from the degree dictionary
d[i] -= 1
d[j] -= 1
tree.append(sorted([i, j]))
last = [i for i in range(n) if d[i] == 1] or [0, 1]
tree.append(last)
return tree
@staticmethod
def edges(*runs):
"""Return a list of edges and the number of nodes from the given runs
that connect nodes in an integer-labelled tree.
All node numbers will be shifted so that the minimum node is 0. It is
not a problem if edges are repeated in the runs; only unique edges are
returned. There is no assumption made about what the range of the node
labels should be, but all nodes from the smallest through the largest
must be present.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer.edges([1, 2, 3], [2, 4, 5]) # a T
([[0, 1], [1, 2], [1, 3], [3, 4]], 5)
Duplicate edges are removed:
>>> Prufer.edges([0, 1, 2, 3], [1, 4, 5], [1, 4, 6]) # a K
([[0, 1], [1, 2], [1, 4], [2, 3], [4, 5], [4, 6]], 7)
"""
e = set()
nmin = runs[0][0]
for r in runs:
for i in range(len(r) - 1):
a, b = r[i: i + 2]
if b < a:
a, b = b, a
e.add((a, b))
rv = []
got = set()
nmin = nmax = None
for ei in e:
for i in ei:
got.add(i)
nmin = min(ei[0], nmin) if nmin is not None else ei[0]
nmax = max(ei[1], nmax) if nmax is not None else ei[1]
rv.append(list(ei))
missing = set(range(nmin, nmax + 1)) - got
if missing:
missing = [i + nmin for i in missing]
if len(missing) == 1:
msg = 'Node %s is missing.' % missing.pop()
else:
msg = 'Nodes %s are missing.' % list(sorted(missing))
raise ValueError(msg)
if nmin != 0:
for i, ei in enumerate(rv):
rv[i] = [n - nmin for n in ei]
nmax -= nmin
return sorted(rv), nmax + 1
def prufer_rank(self):
"""Computes the rank of a Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_rank()
0
See Also
========
rank, next, prev, size
"""
r = 0
p = 1
for i in range(self.nodes - 3, -1, -1):
r += p*self.prufer_repr[i]
p *= self.nodes
return r
@classmethod
def unrank(self, rank, n):
"""Finds the unranked Prufer sequence.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> Prufer.unrank(0, 4)
Prufer([0, 0])
"""
n, rank = as_int(n), as_int(rank)
L = defaultdict(int)
for i in range(n - 3, -1, -1):
L[i] = rank % n
rank = (rank - L[i])//n
return Prufer([L[i] for i in range(len(L))])
def __new__(cls, *args, **kw_args):
"""The constructor for the Prufer object.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
A Prufer object can be constructed from a list of edges:
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> a.prufer_repr
[0, 0]
If the number of nodes is given, no checking of the nodes will
be performed; it will be assumed that nodes 0 through n - 1 are
present:
>>> Prufer([[0, 1], [0, 2], [0, 3]], 4)
Prufer([[0, 1], [0, 2], [0, 3]], 4)
A Prufer object can be constructed from a Prufer sequence:
>>> b = Prufer([1, 3])
>>> b.tree_repr
[[0, 1], [1, 3], [2, 3]]
"""
ret_obj = Basic.__new__(cls, *args, **kw_args)
args = [list(args[0])]
if args[0] and iterable(args[0][0]):
if not args[0][0]:
raise ValueError(
'Prufer expects at least one edge in the tree.')
if len(args) > 1:
nnodes = args[1]
else:
nodes = set(flatten(args[0]))
nnodes = max(nodes) + 1
if nnodes != len(nodes):
missing = set(range(nnodes)) - nodes
if len(missing) == 1:
msg = 'Node %s is missing.' % missing.pop()
else:
msg = 'Nodes %s are missing.' % list(sorted(missing))
raise ValueError(msg)
ret_obj._tree_repr = [list(i) for i in args[0]]
ret_obj._nodes = nnodes
else:
ret_obj._prufer_repr = args[0]
ret_obj._nodes = len(ret_obj._prufer_repr) + 2
return ret_obj
def next(self, delta=1):
"""Generates the Prufer sequence that is delta beyond the current one.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [0, 2], [0, 3]])
>>> b = a.next(1) # == a.next()
>>> b.tree_repr
[[0, 2], [0, 1], [1, 3]]
>>> b.rank
1
See Also
========
prufer_rank, rank, prev, size
"""
return Prufer.unrank(self.rank + delta, self.nodes)
def prev(self, delta=1):
"""Generates the Prufer sequence that is -delta before the current one.
Examples
========
>>> from sympy.combinatorics.prufer import Prufer
>>> a = Prufer([[0, 1], [1, 2], [2, 3], [1, 4]])
>>> a.rank
36
>>> b = a.prev()
>>> b
Prufer([1, 2, 0])
>>> b.rank
35
See Also
========
prufer_rank, rank, next, size
"""
return Prufer.unrank(self.rank -delta, self.nodes)
|
bsd-3-clause
|
divya-csekar/flask-microblog-server
|
flask/Lib/encodings/iso8859_1.py
|
593
|
13432
|
""" Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
bsd-3-clause
|
tensorflow/model-optimization
|
tensorflow_model_optimization/g3doc/tools/build_docs.py
|
1
|
3663
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tool to generate open source api_docs for tensorflow_model_optimization.
To use:
1. Install the tensorflow docs package, which is only compatible with Python
python3 -m pip install git+https://github.com/tensorflow/docs
2. Install TensorFlow Model Optimization. The API docs are generated from
`tfmot` from the import of the tfmot package below, based on what is exposed
under
https://github.com/tensorflow/model-optimization/tree/master/tensorflow_model_optimization/python/core/api.
See https://www.tensorflow.org/model_optimization/guide/install.
3. Run build_docs.py.
python3 build_docs.py --output_dir=/tmp/model_optimization_api
4. View the generated markdown files on a viewer. One option is to fork
https://github.com/tensorflow/model-optimization/, push a change that
copies the files to tensorflow_model_optimization/g3doc, and then
view the files on Github.
Note:
If duplicate or spurious docs are generated (e.g. internal names), consider
blacklisting them via the `private_map` argument below.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
import tensorflow_model_optimization as tfmot
flags.DEFINE_string("output_dir", "/tmp/model_optimization_api",
"Where to output the docs")
flags.DEFINE_string(
"code_url_prefix",
("https://github.com/tensorflow/model-optimization/blob/master/"
"tensorflow_model_optimization"),
"The url prefix for links to code.")
flags.DEFINE_bool("search_hints", True,
"Include metadata search hints in the generated files")
flags.DEFINE_string("site_path", "model_optimization/api_docs/python",
"Path prefix in the _toc.yaml")
FLAGS = flags.FLAGS
def main(unused_argv):
doc_generator = generate_lib.DocGenerator(
root_title="TensorFlow Model Optimization",
py_modules=[("tfmot", tfmot)],
base_dir=os.path.dirname(tfmot.__file__),
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
# TODO(tfmot): remove this once the next release after 0.3.0 happens.
# This is needed in the interim because the API docs reflect
# the latest release and the current release still wildcard imports
# all of the classes below.
private_map={
"tfmot.sparsity.keras": [
# List of internal classes which get exposed when imported.
"InputLayer",
"custom_object_scope",
"pruning_sched",
"pruning_wrapper",
"absolute_import",
"division",
"print_function",
"compat"
]
},
)
doc_generator.build(output_dir=FLAGS.output_dir)
if __name__ == "__main__":
app.run(main)
|
apache-2.0
|
krisys/django
|
tests/forms_tests/field_tests/test_integerfield.py
|
4
|
5979
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import IntegerField, Textarea, ValidationError
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class IntegerFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_integerfield_1(self):
f = IntegerField()
self.assertWidgetRendersTo(f, '<input type="number" name="f" id="id_f" />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('a')
self.assertEqual(42, f.clean(42))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean(3.14)
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('1a')
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
def test_integerfield_2(self):
f = IntegerField(required=False)
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('a')
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('1a')
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
def test_integerfield_3(self):
f = IntegerField(max_value=10)
self.assertWidgetRendersTo(f, '<input max="10" type="number" name="f" id="id_f" />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual(1, f.clean(1))
self.assertEqual(10, f.clean(10))
with self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'"):
f.clean(11)
self.assertEqual(10, f.clean('10'))
with self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'"):
f.clean('11')
self.assertEqual(f.max_value, 10)
self.assertIsNone(f.min_value)
def test_integerfield_4(self):
f = IntegerField(min_value=10)
self.assertWidgetRendersTo(f, '<input id="id_f" type="number" name="f" min="10" />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'"):
f.clean(1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertIsNone(f.max_value)
self.assertEqual(f.min_value, 10)
def test_integerfield_5(self):
f = IntegerField(min_value=10, max_value=20)
self.assertWidgetRendersTo(f, '<input id="id_f" max="20" type="number" name="f" min="10" />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'"):
f.clean(1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(20, f.clean(20))
with self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 20.'"):
f.clean(21)
self.assertEqual(f.max_value, 20)
self.assertEqual(f.min_value, 10)
def test_integerfield_localized(self):
"""
A localized IntegerField's widget renders to a text input without any
number input specific attributes.
"""
f1 = IntegerField(localize=True)
self.assertWidgetRendersTo(f1, '<input id="id_f" name="f" type="text" />')
def test_integerfield_float(self):
f = IntegerField()
self.assertEqual(1, f.clean(1.0))
self.assertEqual(1, f.clean('1.0'))
self.assertEqual(1, f.clean(' 1.0 '))
self.assertEqual(1, f.clean('1.'))
self.assertEqual(1, f.clean(' 1. '))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('1.5')
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('…')
def test_integerfield_big_num(self):
f = IntegerField()
self.assertEqual(9223372036854775808, f.clean(9223372036854775808))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808'))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808.0'))
def test_integerfield_subclass(self):
"""
Class-defined widget is not overwritten by __init__() (#22245).
"""
class MyIntegerField(IntegerField):
widget = Textarea
f = MyIntegerField()
self.assertEqual(f.widget.__class__, Textarea)
f = MyIntegerField(localize=True)
self.assertEqual(f.widget.__class__, Textarea)
|
bsd-3-clause
|
redhat-openstack/ironic
|
ironic/tests/policy_fixture.py
|
8
|
1523
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo_config import cfg
from oslo_policy import opts as policy_opts
from ironic.common import policy as ironic_policy
from ironic.tests import fake_policy
CONF = cfg.CONF
class PolicyFixture(fixtures.Fixture):
def __init__(self, compat=None):
self.compat = compat
def setUp(self):
super(PolicyFixture, self).setUp()
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file_name = os.path.join(self.policy_dir.path,
'policy.json')
with open(self.policy_file_name, 'w') as policy_file:
policy_file.write(fake_policy.get_policy_data(self.compat))
policy_opts.set_defaults(CONF)
CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy')
ironic_policy._ENFORCER = None
self.addCleanup(ironic_policy.get_enforcer().clear)
|
apache-2.0
|
billwanjohi/ansible
|
lib/ansible/runner/lookup_plugins/first_found.py
|
33
|
5953
|
# (c) 2013, seth vidal <[email protected]> red hat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# take a list of files and (optionally) a list of paths
# return the first existing file found in the paths
# [file1, file2, file3], [path1, path2, path3]
# search order is:
# path1/file1
# path1/file2
# path1/file3
# path2/file1
# path2/file2
# path2/file3
# path3/file1
# path3/file2
# path3/file3
# first file found with os.path.exists() is returned
# no file matches raises ansibleerror
# EXAMPLES
# - name: copy first existing file found to /some/file
# action: copy src=$item dest=/some/file
# with_first_found:
# - files: foo ${inventory_hostname} bar
# paths: /tmp/production /tmp/staging
# that will look for files in this order:
# /tmp/production/foo
# ${inventory_hostname}
# bar
# /tmp/staging/foo
# ${inventory_hostname}
# bar
# - name: copy first existing file found to /some/file
# action: copy src=$item dest=/some/file
# with_first_found:
# - files: /some/place/foo ${inventory_hostname} /some/place/else
# that will look for files in this order:
# /some/place/foo
# $relative_path/${inventory_hostname}
# /some/place/else
# example - including tasks:
# tasks:
# - include: $item
# with_first_found:
# - files: generic
# paths: tasks/staging tasks/production
# this will include the tasks in the file generic where it is found first (staging or production)
# example simple file lists
#tasks:
#- name: first found file
# action: copy src=$item dest=/etc/file.cfg
# with_first_found:
# - files: foo.${inventory_hostname} foo
# example skipping if no matched files
# First_found also offers the ability to control whether or not failing
# to find a file returns an error or not
#
#- name: first found file - or skip
# action: copy src=$item dest=/etc/file.cfg
# with_first_found:
# - files: foo.${inventory_hostname}
# skip: true
# example a role with default configuration and configuration per host
# you can set multiple terms with their own files and paths to look through.
# consider a role that sets some configuration per host falling back on a default config.
#
#- name: some configuration template
# template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root
# with_first_found:
# - files:
# - ${inventory_hostname}/etc/file.cfg
# paths:
# - ../../../templates.overwrites
# - ../../../templates
# - files:
# - etc/file.cfg
# paths:
# - templates
# the above will return an empty list if the files cannot be found at all
# if skip is unspecificed or if it is set to false then it will return a list
# error which can be caught bye ignore_errors: true for that action.
# finally - if you want you can use it, in place to replace first_available_file:
# you simply cannot use the - files, path or skip options. simply replace
# first_available_file with with_first_found and leave the file listing in place
#
#
# - name: with_first_found like first_available_file
# action: copy src=$item dest=/tmp/faftest
# with_first_found:
# - ../files/foo
# - ../files/bar
# - ../files/baz
# ignore_errors: true
from ansible import utils, errors
import os
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
result = None
anydict = False
skip = False
for term in terms:
if isinstance(term, dict):
anydict = True
total_search = []
if anydict:
for term in terms:
if isinstance(term, dict):
files = term.get('files', [])
paths = term.get('paths', [])
skip = utils.boolean(term.get('skip', False))
filelist = files
if isinstance(files, basestring):
files = files.replace(',', ' ')
files = files.replace(';', ' ')
filelist = files.split(' ')
pathlist = paths
if paths:
if isinstance(paths, basestring):
paths = paths.replace(',', ' ')
paths = paths.replace(':', ' ')
paths = paths.replace(';', ' ')
pathlist = paths.split(' ')
if not pathlist:
total_search = filelist
else:
for path in pathlist:
for fn in filelist:
f = os.path.join(path, fn)
total_search.append(f)
else:
total_search.append(term)
else:
total_search = terms
result = None
for fn in total_search:
path = utils.path_dwim(self.basedir, fn)
if os.path.exists(path):
return [path]
if not result:
if skip:
return []
else:
return [None]
|
gpl-3.0
|
aricchen/openHR
|
openerp/addons/crm/report/crm_phonecall_report.py
|
48
|
4906
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
from .. import crm
AVAILABLE_STATES = [
('draft','Draft'),
('open','Todo'),
('cancel', 'Cancelled'),
('done', 'Held'),
('pending','Pending')
]
class crm_phonecall_report(osv.osv):
""" Phone calls by user and section """
_name = "crm.phonecall.report"
_description = "Phone calls by user and section"
_auto = False
_columns = {
'name': fields.char('Year', size=64, required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Section', readonly=True),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'nbr': fields.integer('# of Cases', readonly=True),
'state': fields.selection(AVAILABLE_STATES, 'Status', size=16, readonly=True),
'month':fields.selection([('01', 'January'), ('02', 'February'), \
('03', 'March'), ('04', 'April'),\
('05', 'May'), ('06', 'June'), \
('07', 'July'), ('08', 'August'),\
('09', 'September'), ('10', 'October'),\
('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'day': fields.char('Day', size=128, readonly=True),
'delay_close': fields.float('Delay to close', digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'duration': fields.float('Duration', digits=(16,2),readonly=True, group_operator="avg"),
'delay_open': fields.float('Delay to open',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'opening_date': fields.date('Opening Date', readonly=True, select=True),
'creation_date': fields.date('Creation Date', readonly=True, select=True),
'date_closed': fields.date('Close Date', readonly=True, select=True),
}
def init(self, cr):
""" Phone Calls By User And Section
@param cr: the current row, from the database cursor,
"""
tools.drop_view_if_exists(cr, 'crm_phonecall_report')
cr.execute("""
create or replace view crm_phonecall_report as (
select
id,
to_char(c.date, 'YYYY') as name,
to_char(c.date, 'MM') as month,
to_char(c.date, 'YYYY-MM-DD') as day,
to_char(c.create_date, 'YYYY-MM-DD') as creation_date,
to_char(c.date_open, 'YYYY-MM-DD') as opening_date,
to_char(c.date_closed, 'YYYY-mm-dd') as date_closed,
c.state,
c.user_id,
c.section_id,
c.categ_id,
c.partner_id,
c.duration,
c.company_id,
c.priority,
1 as nbr,
date_trunc('day',c.create_date) as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
from
crm_phonecall c
)""")
crm_phonecall_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
owlabs/incubator-airflow
|
airflow/executors/__init__.py
|
1
|
3891
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import BaseExecutor # noqa
from airflow.executors.local_executor import LocalExecutor
from airflow.executors.sequential_executor import SequentialExecutor
DEFAULT_EXECUTOR = None
def _integrate_plugins():
"""Integrate plugins to the context."""
from airflow.plugins_manager import executors_modules
for executors_module in executors_modules:
sys.modules[executors_module.__name__] = executors_module
globals()[executors_module._name] = executors_module
def get_default_executor():
"""Creates a new instance of the configured executor if none exists and returns it"""
global DEFAULT_EXECUTOR
if DEFAULT_EXECUTOR is not None:
return DEFAULT_EXECUTOR
executor_name = conf.get('core', 'EXECUTOR')
DEFAULT_EXECUTOR = _get_executor(executor_name)
log = LoggingMixin().log
log.info("Using executor %s", executor_name)
return DEFAULT_EXECUTOR
class Executors:
LocalExecutor = "LocalExecutor"
SequentialExecutor = "SequentialExecutor"
CeleryExecutor = "CeleryExecutor"
DaskExecutor = "DaskExecutor"
MesosExecutor = "MesosExecutor"
KubernetesExecutor = "KubernetesExecutor"
DebugExecutor = "DebugExecutor"
def _get_executor(executor_name):
"""
Creates a new instance of the named executor.
In case the executor name is not know in airflow,
look for it in the plugins
"""
if executor_name == Executors.LocalExecutor:
return LocalExecutor()
elif executor_name == Executors.SequentialExecutor:
return SequentialExecutor()
elif executor_name == Executors.CeleryExecutor:
from airflow.executors.celery_executor import CeleryExecutor
return CeleryExecutor()
elif executor_name == Executors.DaskExecutor:
from airflow.executors.dask_executor import DaskExecutor
return DaskExecutor()
elif executor_name == Executors.MesosExecutor:
from airflow.contrib.executors.mesos_executor import MesosExecutor
return MesosExecutor()
elif executor_name == Executors.KubernetesExecutor:
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
return KubernetesExecutor()
elif executor_name == Executors.DebugExecutor:
from airflow.executors.debug_executor import DebugExecutor
return DebugExecutor()
else:
# Loading plugins
_integrate_plugins()
executor_path = executor_name.split('.')
if len(executor_path) != 2:
raise AirflowException(
"Executor {0} not supported: "
"please specify in format plugin_module.executor".format(executor_name))
if executor_path[0] in globals():
return globals()[executor_path[0]].__dict__[executor_path[1]]()
else:
raise AirflowException("Executor {0} not supported.".format(executor_name))
|
apache-2.0
|
dgquintas/vmcontroller.unstable
|
src/vmcontroller.host/vmcontroller/host/__init__.py
|
2
|
1311
|
"""
VMController Host, agent for host system
"""
__version__ = '0.2.0'
__authors__ = ['"David Garcia Quintas" <[email protected]>',
'"Rohit Yadav" <[email protected]>']
__copyright__ = "Copyright 2010 VMController Authors"
__license__ = """Licensed under the (Simplified) BSD License
you may not use this project "VMController" except in compliance with the License. You may obtain a copy of the License at
http://www.opensource.org/licenses/bsd-license.php
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
try:
from __main__ import *
except ImportError, e:
pass
|
bsd-3-clause
|
aachik/flask-blog-abdulmx
|
env/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py
|
169
|
117500
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"noframes", "script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=
token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
|
mit
|
gentledevil/ansible
|
lib/ansible/module_utils/ec2.py
|
67
|
8639
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from distutils.version import LooseVersion
HAS_LOOSE_VERSION = True
except:
HAS_LOOSE_VERSION = False
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
if conn_type not in ['both', 'resource', 'client']:
module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call')
resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params)
client = resource.meta.client
if conn_type == 'resource':
return resource
elif conn_type == 'client':
return client
else:
return client, resource
def aws_common_argument_spec():
return dict(
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
)
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
return spec
def boto_supports_profile_name():
return hasattr(boto.ec2.EC2Connection, 'profile_name')
def get_aws_connection_info(module, boto3=False):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
elif 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not access_key:
if 'AWS_ACCESS_KEY_ID' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif 'AWS_ACCESS_KEY' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY']
elif 'EC2_ACCESS_KEY' in os.environ:
access_key = os.environ['EC2_ACCESS_KEY']
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if 'AWS_SECRET_ACCESS_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif 'AWS_SECRET_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_KEY']
elif 'EC2_SECRET_KEY' in os.environ:
secret_key = os.environ['EC2_SECRET_KEY']
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
else:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
if not security_token:
if 'AWS_SECURITY_TOKEN' in os.environ:
security_token = os.environ['AWS_SECURITY_TOKEN']
elif 'EC2_SECURITY_TOKEN' in os.environ:
security_token = os.environ['EC2_SECURITY_TOKEN']
else:
# in case security_token came in as empty string
security_token = None
if boto3:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=security_token)
if validate_certs:
boto_params['verify'] = validate_certs
if profile_name:
boto_params['profile_name'] = profile_name
else:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# profile_name only works as a key in boto >= 2.24
# so only set profile_name if passed as an argument
if profile_name:
if not boto_supports_profile_name():
module.fail_json("boto does not support profile_name before 2.24")
boto_params['profile_name'] = profile_name
if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
boto_params['validate_certs'] = validate_certs
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
conn = aws_module.connect_to_region(region, **params)
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2
|
gpl-3.0
|
lmiccini/sos
|
sos/plugins/iprconfig.py
|
3
|
4670
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# This plugin enables collection of logs for Power systems
import os
import re
from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin
from sos.utilities import is_executable
class IprConfig(Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin):
"""IBM Power RAID storage adapter configuration information
"""
plugin_name = 'iprconfig'
def check_enabled(self):
arch = self.policy().get_arch()
return arch == "ppc64" and is_executable("iprconfig")
def setup(self):
self.add_cmd_output([
"iprconfig -c show-config",
"iprconfig -c show-alt-config",
"iprconfig -c show-arrays",
"iprconfig -c show-jbod-disks",
"iprconfig -c show-ioas",
])
show_ioas = self.call_ext_prog("iprconfig -c show-ioas")
if not show_ioas['status'] == 0:
return
devices = []
if show_ioas['output']:
p = re.compile('sg')
for line in show_ioas['output'].splitlines():
temp = line.split(' ')
# temp[0] holds the device name
if p.search(temp[0]):
devices.append(temp[0])
for device in devices:
self.add_cmd_output("iprconfig -c show-details %s" % (device,))
# Look for IBM Power RAID enclosures (iprconfig lists them)
show_config = self.call_ext_prog("iprconfig -c show-config")
if not show_config['status'] == 0:
return
if not show_config['output']:
return
# iprconfig -c show-config
# Name PCI/SCSI Location Description Status
# ------ ------------------------- ------------------------- -----------------
# 0005:60:00.0/0: PCI-E SAS RAID Adapter Operational
# sda 0005:60:00.0/0:0:0:0 Physical Disk Active
# sdb 0005:60:00.0/0:1:0:0 Physical Disk Active
# sdc 0005:60:00.0/0:2:0:0 Physical Disk Active
# sdd 0005:60:00.0/0:3:0:0 Physical Disk Active
# sde 0005:60:00.0/0:4:0:0 Physical Disk Active
# sdf 0005:60:00.0/0:5:0:0 Physical Disk Active
# 0005:60:00.0/0:8:0:0 Enclosure Active
# 0005:60:00.0/0:8:1:0 Enclosure Active
show_alt_config = "iprconfig -c show-alt-config"
altconfig = self.call_ext_prog(show_alt_config)
if not (altconfig['status'] == 0):
return
if not altconfig['output']:
return
# iprconfig -c show-alt-config
# Name Resource Path/Address Vendor Product ID Status
# ------ -------------------------- -------- ---------------- -----------------
# sg9 0: IBM 57C7001SISIOA Operational
# sg0 0:0:0:0 IBM MBF2300RC Active
# sg1 0:1:0:0 IBM MBF2300RC Active
# sg2 0:2:0:0 IBM HUC106030CSS600 Active
# sg3 0:3:0:0 IBM HUC106030CSS600 Active
# sg4 0:4:0:0 IBM HUC106030CSS600 Active
# sg5 0:5:0:0 IBM HUC106030CSS600 Active
# sg7 0:8:0:0 IBM VSBPD6E4A 3GSAS Active
# sg8 0:8:1:0 IBM VSBPD6E4B 3GSAS Active
for line in show_config['output'].splitlines():
if "Enclosure" in line:
temp = re.split('\s+', line)
# temp[1] holds the PCI/SCSI location
pci, scsi = temp[1].split('/')
for line in altconfig['output'].splitlines():
if scsi in line:
temp = line.split(' ')
# temp[0] holds device name
self.add_cmd_output("iprconfig -c "
"query-ses-mode %s" % (temp[0],))
|
gpl-2.0
|
Threew/python-oauth2
|
example/server.py
|
375
|
7669
|
"""
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib
import oauth.oauth as oauth
# fake urls for the test server
REQUEST_TOKEN_URL = 'https://photos.example.net/request_token'
ACCESS_TOKEN_URL = 'https://photos.example.net/access_token'
AUTHORIZATION_URL = 'https://photos.example.net/authorize'
CALLBACK_URL = 'http://printer.example.com/request_token_ready'
RESOURCE_URL = 'http://photos.example.net/photos'
REALM = 'http://photos.example.net/'
VERIFIER = 'verifier'
# example store for one of each thing
class MockOAuthDataStore(oauth.OAuthDataStore):
def __init__(self):
self.consumer = oauth.OAuthConsumer('key', 'secret')
self.request_token = oauth.OAuthToken('requestkey', 'requestsecret')
self.access_token = oauth.OAuthToken('accesskey', 'accesssecret')
self.nonce = 'nonce'
self.verifier = VERIFIER
def lookup_consumer(self, key):
if key == self.consumer.key:
return self.consumer
return None
def lookup_token(self, token_type, token):
token_attrib = getattr(self, '%s_token' % token_type)
if token == token_attrib.key:
## HACK
token_attrib.set_callback(CALLBACK_URL)
return token_attrib
return None
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
if oauth_token and oauth_consumer.key == self.consumer.key and (oauth_token.key == self.request_token.key or oauth_token.key == self.access_token.key) and nonce == self.nonce:
return self.nonce
return None
def fetch_request_token(self, oauth_consumer, oauth_callback):
if oauth_consumer.key == self.consumer.key:
if oauth_callback:
# want to check here if callback is sensible
# for mock store, we assume it is
self.request_token.set_callback(oauth_callback)
return self.request_token
return None
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
if oauth_consumer.key == self.consumer.key and oauth_token.key == self.request_token.key and oauth_verifier == self.verifier:
# want to check here if token is authorized
# for mock store, we assume it is
return self.access_token
return None
def authorize_request_token(self, oauth_token, user):
if oauth_token.key == self.request_token.key:
# authorize the request token in the store
# for mock store, do nothing
return self.request_token
return None
class RequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.oauth_server = oauth.OAuthServer(MockOAuthDataStore())
self.oauth_server.add_signature_method(oauth.OAuthSignatureMethod_PLAINTEXT())
self.oauth_server.add_signature_method(oauth.OAuthSignatureMethod_HMAC_SHA1())
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
# example way to send an oauth error
def send_oauth_error(self, err=None):
# send a 401 error
self.send_error(401, str(err.message))
# return the authenticate header
header = oauth.build_authenticate_header(realm=REALM)
for k, v in header.iteritems():
self.send_header(k, v)
def do_GET(self):
# debug info
#print self.command, self.path, self.headers
# get the post data (if any)
postdata = None
if self.command == 'POST':
try:
length = int(self.headers.getheader('content-length'))
postdata = self.rfile.read(length)
except:
pass
# construct the oauth request from the request parameters
oauth_request = oauth.OAuthRequest.from_request(self.command, self.path, headers=self.headers, query_string=postdata)
# request token
if self.path.startswith(REQUEST_TOKEN_URL):
try:
# create a request token
token = self.oauth_server.fetch_request_token(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the token
self.wfile.write(token.to_string())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# user authorization
if self.path.startswith(AUTHORIZATION_URL):
try:
# get the request token
token = self.oauth_server.fetch_request_token(oauth_request)
# authorize the token (kind of does nothing for now)
token = self.oauth_server.authorize_token(token, None)
token.set_verifier(VERIFIER)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the callback url (to show server has it)
self.wfile.write(token.get_callback_url())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# access token
if self.path.startswith(ACCESS_TOKEN_URL):
try:
# create an access token
token = self.oauth_server.fetch_access_token(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the token
self.wfile.write(token.to_string())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# protected resources
if self.path.startswith(RESOURCE_URL):
try:
# verify the request has been oauth authorized
consumer, token, params = self.oauth_server.verify_request(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the extra parameters - just for something to return
self.wfile.write(str(params))
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
def do_POST(self):
return self.do_GET()
def main():
try:
server = HTTPServer(('', 8080), RequestHandler)
print 'Test server running...'
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
if __name__ == '__main__':
main()
|
mit
|
suutari/shoop
|
shuup/notify/template.py
|
1
|
3011
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.encoding import force_text
from jinja2.sandbox import SandboxedEnvironment
class NoLanguageMatches(Exception):
pass
def render_in_context(context, template_text, html_intent=False):
"""
Render the given Jinja2 template text in the script context.
:param context: Script context.
:type context: shuup.notify.script.Context
:param template_text: Jinja2 template text.
:type template_text: str
:param html_intent: Is the template text intended for HTML output?
This currently turns on autoescaping.
:type html_intent: bool
:return: Rendered template text
:rtype: str
:raises: Whatever Jinja2 might happen to raise
"""
# TODO: Add some filters/globals into this environment?
env = SandboxedEnvironment(autoescape=html_intent)
template = env.from_string(template_text)
return template.render(context.get_variables())
class Template(object):
def __init__(self, context, data):
"""
:param context: Script context
:type context: shuup.notify.script.Context
:param data: Template data dictionary
:type data: dict
"""
self.context = context
self.data = data
def _get_language_data(self, language):
return self.data.get(force_text(language).lower(), {})
def has_language(self, language, fields):
data = self._get_language_data(language)
return set(data.keys()) >= set(fields)
def render(self, language, fields):
"""
Render this template in the given language,
returning the given fields.
:param language: Language code (ISO 639-1 or ISO 639-2)
:type language: str
:param fields: Desired fields to render.
:type fields: list[str]
:return: Dict of field -> rendered content.
:rtype: dict[str, str]
"""
data = self._get_language_data(language)
rendered = {}
for field in fields:
field_template = data.get(field)
if field_template: # pragma: no branch
rendered[field] = render_in_context(self.context, field_template, html_intent=False)
return rendered
def render_first_match(self, language_preferences, fields):
# TODO: Document
for language in language_preferences:
if self.has_language(language, fields):
rendered = self.render(language=language, fields=fields)
rendered["_language"] = language
return rendered
raise NoLanguageMatches("No language in template matches any of languages %r for fields %r" % (
language_preferences, fields
))
|
agpl-3.0
|
leelasd/OPLS-AAM_for_Gromacs
|
GMX_TEST/GXG/N/NAMD_GMX_DIFF.py
|
45
|
1077
|
import os
from collections import OrderedDict
import sys
fil = open('energy.xvg').readlines()
GMX_dat = [float(f)/4.184 for f in fil[-1].split()[1:-1]]
nfil = open('LOG_NAMD').readlines()
for line in nfil:
if 'ENERGY: 0' in line:
NAMD_DAT = [float(f) for f in line.split()[2:12]]
break
#print(NAMD_DAT)
#print(GMX_dat)
print('GMX: %6.3f NAMD: %6.3f BOND_DIFF: %5.5f'%(GMX_dat[0],NAMD_DAT[0],GMX_dat[0]-NAMD_DAT[0]))
print('GMX: %6.3f NAMD: %6.3f ANGL_DIFF: %5.5f'%(GMX_dat[1],NAMD_DAT[1],GMX_dat[1]-NAMD_DAT[1]))
print('GMX: %6.3f NAMD: %6.3f TORS_DIFF: %5.5f'%(GMX_dat[2],NAMD_DAT[2],GMX_dat[2]-NAMD_DAT[2]))
print('GMX: %6.3f NAMD: %6.3f IMPR_DIFF: %5.5f'%(GMX_dat[3],NAMD_DAT[3],GMX_dat[3]-NAMD_DAT[3]))
print('GMX: %6.3f NAMD: %6.3f ELEC_DIFF: %5.5f'%(GMX_dat[5]+GMX_dat[7],NAMD_DAT[4],(GMX_dat[5]+GMX_dat[7])-(NAMD_DAT[4])))
print('GMX: %6.3f NAMD: %6.3f VDWL_DIFF: %5.5f'%(GMX_dat[4]+GMX_dat[6],NAMD_DAT[5],GMX_dat[4]+GMX_dat[6]-NAMD_DAT[5]))
print('GMX: %6.3f NAMD: %6.3f TOTL_DIFF: %5.5f'%(GMX_dat[8],NAMD_DAT[9],GMX_dat[8]-NAMD_DAT[9]))
|
mit
|
Jgarcia-IAS/ReporsitorioVacioOdoo
|
openerp/addons/hr_contract/__init__.py
|
381
|
1107
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP SA (<http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_contract
import base_action_rule
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
pomegranited/edx-platform
|
common/djangoapps/terrain/stubs/ora.py
|
119
|
17323
|
"""
Stub implementation of ORA service.
This is an extremely simple version of the service, with most
business logic removed. In particular, the stub:
1) Provides an infinite number of peer and calibration essays,
with dummy data.
2) Simulates a set number of pending submissions for each student;
grades submitted by one student are not used for any other student.
3) Ignores the scores/feedback students submit.
4) Ignores problem location: an essay graded for *any* problem is graded
for *every* problem.
Basically, the stub tracks only the *number* of peer/calibration essays
submitted by each student.
"""
import json
import pkg_resources
from .http import StubHttpRequestHandler, StubHttpService, require_params
class StudentState(object):
"""
Store state about the student that the stub
ORA implementation needs to keep track of.
"""
INITIAL_ESSAYS_AVAILABLE = 3
NUM_ESSAYS_REQUIRED = 1
NUM_CALIBRATION_REQUIRED = 1
def __init__(self):
self.num_graded = 0
self.num_calibrated = 0
def grade_peer_essay(self):
self.num_graded += 1
def grade_calibration_essay(self):
self.num_calibrated += 1
@property
def num_pending(self):
return max(self.INITIAL_ESSAYS_AVAILABLE - self.num_graded, 0)
@property
def num_required(self):
return max(self.NUM_ESSAYS_REQUIRED - self.num_graded, 0)
@property
def is_calibrated(self):
return self.num_calibrated >= self.NUM_CALIBRATION_REQUIRED
class StubOraHandler(StubHttpRequestHandler):
"""
Handler for ORA requests.
"""
GET_URL_HANDLERS = {
'/peer_grading/get_next_submission': '_get_next_submission',
'/peer_grading/is_student_calibrated': '_is_student_calibrated',
'/peer_grading/show_calibration_essay': '_show_calibration_essay',
'/peer_grading/get_notifications': '_get_notifications',
'/peer_grading/get_data_for_location': '_get_data_for_location',
'/peer_grading/get_problem_list': '_get_problem_list',
}
POST_URL_HANDLERS = {
'/peer_grading/save_grade': '_save_grade',
'/peer_grading/save_calibration_essay': '_save_calibration_essay',
# Test-specific, used by the XQueue stub to register a new submission,
# which we use to discover valid problem locations in the LMS
'/test/register_submission': '_register_submission'
}
def do_GET(self):
"""
Handle GET methods to the ORA API stub.
"""
self._send_handler_response('GET')
def do_POST(self):
"""
Handle POST methods to the ORA API stub.
"""
self._send_handler_response('POST')
def _send_handler_response(self, method):
"""
Delegate response to handler methods.
If no handler defined, send a 404 response.
"""
# Choose the list of handlers based on the HTTP method
if method == 'GET':
handler_list = self.GET_URL_HANDLERS
elif method == 'POST':
handler_list = self.POST_URL_HANDLERS
else:
self.log_error('Unrecognized method "{method}"'.format(method=method))
return
# Check the path (without querystring params) against our list of handlers
handler_name = handler_list.get(self.path_only)
if handler_name is not None:
handler = getattr(self, handler_name, None)
else:
handler = None
# Delegate to the handler to send a response
if handler is not None:
handler()
# If we don't have a handler for this URL and/or HTTP method,
# respond with a 404. This is the same behavior as the ORA API.
else:
self.send_response(404)
@require_params('GET', 'student_id', 'problem_id')
def _is_student_calibrated(self):
"""
Query whether the student has completed enough calibration
essays to begin peer grading.
Method: GET
Params:
- student_id
- problem_id
Result (JSON):
- success (bool)
- total_calibrated_on_so_far (int)
- calibrated (bool)
"""
student = self._student('GET')
if student is None:
self._error_response()
else:
self._success_response({
'total_calibrated_on_so_far': student.num_calibrated,
'calibrated': student.is_calibrated
})
@require_params('GET', 'student_id', 'problem_id')
def _show_calibration_essay(self):
"""
Retrieve a calibration essay for the student to grade.
Method: GET
Params:
- student_id
- problem_id
Result (JSON):
- success (bool)
- submission_id (str)
- submission_key (str)
- student_response (str)
- prompt (str)
- rubric (str)
- max_score (int)
"""
self._success_response({
'submission_id': self.server.DUMMY_DATA['submission_id'],
'submission_key': self.server.DUMMY_DATA['submission_key'],
'student_response': self.server.DUMMY_DATA['student_response'],
'prompt': self.server.DUMMY_DATA['prompt'],
'rubric': self.server.DUMMY_DATA['rubric'],
'max_score': self.server.DUMMY_DATA['max_score']
})
@require_params('GET', 'student_id', 'course_id')
def _get_notifications(self):
"""
Query counts of submitted, required, graded, and available peer essays
for a particular student.
Method: GET
Params:
- student_id
- course_id
Result (JSON):
- success (bool)
- student_sub_count (int)
- count_required (int)
- count_graded (int)
- count_available (int)
"""
student = self._student('GET')
if student is None:
self._error_response()
else:
self._success_response({
'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],
'count_required': student.num_required,
'count_graded': student.num_graded,
'count_available': student.num_pending
})
@require_params('GET', 'student_id', 'location')
def _get_data_for_location(self):
"""
Query counts of submitted, required, graded, and available peer essays
for a problem location.
This will send an error response if the problem has not
been registered at the given `location`. This allows us
to ignore problems that are self- or ai-graded.
Method: GET
Params:
- student_id
- location
Result (JSON):
- success (bool)
- student_sub_count (int)
- count_required (int)
- count_graded (int)
- count_available (int)
"""
student = self._student('GET')
location = self.get_params.get('location')
# Do not return data if we're missing the student param
# or the problem has not yet been registered.
if student is None or location not in self.server.problems:
self._error_response()
else:
self._success_response({
'student_sub_count': self.server.DUMMY_DATA['student_sub_count'],
'count_required': student.num_required,
'count_graded': student.num_graded,
'count_available': student.num_pending
})
@require_params('GET', 'grader_id', 'location')
def _get_next_submission(self):
"""
Retrieve the next submission for the student to peer-grade.
Method: GET
Params:
- grader_id
- location
Result (JSON):
- success (bool)
- submission_id (str)
- submission_key (str)
- student_response (str)
- prompt (str, HTML)
- rubric (str, XML)
- max_score (int)
"""
self._success_response({
'submission_id': self.server.DUMMY_DATA['submission_id'],
'submission_key': self.server.DUMMY_DATA['submission_key'],
'student_response': self.server.DUMMY_DATA['student_response'],
'prompt': self.server.DUMMY_DATA['prompt'],
'rubric': self.server.DUMMY_DATA['rubric'],
'max_score': self.server.DUMMY_DATA['max_score']
})
@require_params('GET', 'course_id')
def _get_problem_list(self):
"""
Retrieve the list of problems available for peer grading.
Method: GET
Params:
- course_id
Result (JSON):
- success (bool)
- problem_list (list)
where `problem_list` is a list of dictionaries with keys:
- location (str)
- problem_name (str)
- num_graded (int)
- num_pending (int)
- num_required (int)
"""
self._success_response({'problem_list': self.server.problem_list})
@require_params('POST', 'grader_id', 'location', 'submission_id', 'score', 'feedback', 'submission_key')
def _save_grade(self):
"""
Save a score and feedback for an essay the student has graded.
Method: POST
Params:
- grader_id
- location
- submission_id
- score
- feedback
- submission_key
Result (JSON):
- success (bool)
"""
student = self._student('POST', key='grader_id')
if student is None:
self._error_response()
else:
# Update the number of essays the student has graded
student.grade_peer_essay()
return self._success_response({})
@require_params('POST', 'student_id', 'location', 'calibration_essay_id', 'score', 'feedback', 'submission_key')
def _save_calibration_essay(self):
"""
Save a score and feedback for a calibration essay the student has graded.
Returns the scores/feedback that the instructor gave for the essay.
Method: POST
Params:
- student_id
- location
- calibration_essay_id
- score
- feedback
- submission_key
Result (JSON):
- success (bool)
- message (str)
- actual_score (int)
- actual_rubric (str, XML)
- actual_feedback (str)
"""
student = self._student('POST')
if student is None:
self._error_response()
else:
# Increment the student calibration count
student.grade_calibration_essay()
self._success_response({
'message': self.server.DUMMY_DATA['message'],
'actual_score': self.server.DUMMY_DATA['actual_score'],
'actual_rubric': self.server.DUMMY_DATA['actual_rubric'],
'actual_feedback': self.server.DUMMY_DATA['actual_feedback']
})
@require_params('POST', 'grader_payload')
def _register_submission(self):
"""
Test-specific method to register a new submission.
This is used by `get_problem_list` to return valid locations in the LMS courseware.
In tests, this end-point gets called by the XQueue stub when it receives new submissions,
much like ORA discovers locations when students submit peer-graded problems to the XQueue.
Since the LMS sends *all* open-ended problems to the XQueue (including self- and ai-graded),
we have to ignore everything except peer-graded problems. We do so by looking
for the text 'peer' in the problem's name. This is a little bit of a hack,
but it makes the implementation much simpler.
Method: POST
Params:
- grader_payload (JSON dict)
Result: Empty
The only keys we use in `grader_payload` are 'location' and 'problem_id'.
"""
# Since this is a required param, we know it is in the post dict
try:
payload = json.loads(self.post_dict['grader_payload'])
except ValueError:
self.log_message(
"Could not decode grader payload as JSON: '{0}'".format(
self.post_dict['grader_payload']))
self.send_response(400)
else:
location = payload.get('location')
name = payload.get('problem_id')
if location is not None and name is not None:
if "peer" in name.lower():
self.server.register_problem(location, name)
self.send_response(200)
else:
self.log_message(
"Problem '{0}' does not have 'peer' in its name. Ignoring...".format(name)
)
self.send_response(200)
else:
self.log_message(
"Grader payload should contain 'location' and 'problem_id' keys: {0}".format(payload)
)
self.send_response(400)
def _student(self, method, key='student_id'):
"""
Return the `StudentState` instance for the student ID given
in the request parameters.
`method` is the HTTP request method (either "GET" or "POST")
and `key` is the parameter key.
"""
if method == 'GET':
student_id = self.get_params.get(key)
elif method == 'POST':
student_id = self.post_dict.get(key)
else:
self.log_error("Unrecognized method '{method}'".format(method=method))
return None
if student_id is None:
self.log_error("Could not get student ID from parameters")
return None
return self.server.student_state(student_id)
def _success_response(self, response_dict):
"""
Send a success response.
`response_dict` is a Python dictionary to JSON-encode.
"""
response_dict['success'] = True
response_dict['version'] = 1
self.send_response(
200, content=json.dumps(response_dict),
headers={'Content-type': 'application/json'}
)
def _error_response(self):
"""
Send an error response.
"""
response_dict = {'success': False, 'version': 1}
self.send_response(
400, content=json.dumps(response_dict),
headers={'Content-type': 'application/json'}
)
class StubOraService(StubHttpService):
"""
Stub ORA service.
"""
HANDLER_CLASS = StubOraHandler
DUMMY_DATA = {
'submission_id': 1,
'submission_key': 'test key',
'student_response': 'Test response',
'prompt': 'Test prompt',
'rubric': pkg_resources.resource_string(__name__, "data/ora_rubric.xml"),
'max_score': 2,
'message': 'Successfully saved calibration record.',
'actual_score': 2,
'actual_rubric': pkg_resources.resource_string(__name__, "data/ora_graded_rubric.xml"),
'actual_feedback': 'Great job!',
'student_sub_count': 1,
'problem_name': 'test problem',
'problem_list_num_graded': 1,
'problem_list_num_pending': 1,
'problem_list_num_required': 0,
}
def __init__(self, *args, **kwargs):
"""
Initialize student submission state.
"""
super(StubOraService, self).__init__(*args, **kwargs)
# Create a dict to map student ID's to their state
self._students = dict()
# By default, no problems are available for peer grading
# You can add to this list using the `register_location` HTTP end-point
# This is a dict mapping problem locations to problem names
self.problems = dict()
def student_state(self, student_id):
"""
Return the `StudentState` (named tuple) for the student
with ID `student_id`. The student state can be modified by the caller.
"""
# Create the student state if it does not already exist
if student_id not in self._students:
student = StudentState()
self._students[student_id] = student
# Retrieve the student state
return self._students[student_id]
@property
def problem_list(self):
"""
Return a list of problems available for peer grading.
"""
return [{
'location': location, 'problem_name': name,
'num_graded': self.DUMMY_DATA['problem_list_num_graded'],
'num_pending': self.DUMMY_DATA['problem_list_num_pending'],
'num_required': self.DUMMY_DATA['problem_list_num_required']
} for location, name in self.problems.items()]
def register_problem(self, location, name):
"""
Register a new problem with `location` and `name` for peer grading.
"""
self.problems[location] = name
|
agpl-3.0
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/social/backends/exacttarget.py
|
70
|
4029
|
"""
ExactTarget OAuth support.
Support Authentication from IMH using JWT token and pre-shared key.
Requires package pyjwt
"""
from datetime import timedelta, datetime
import jwt
from social.exceptions import AuthFailed, AuthCanceled
from social.backends.oauth import BaseOAuth2
class ExactTargetOAuth2(BaseOAuth2):
name = 'exacttarget'
def get_user_details(self, response):
"""Use the email address of the user, suffixed by _et"""
user = response.get('token', {})\
.get('request', {})\
.get('user', {})
if 'email' in user:
user['username'] = user['email']
return user
def get_user_id(self, details, response):
"""
Create a user ID from the ET user ID. Uses details rather than the
default response, as only the token is available in response. details
is much richer:
{
'expiresIn': 1200,
'username': '[email protected]',
'refreshToken': '1234567890abcdef',
'internalOauthToken': 'jwttoken.......',
'oauthToken': 'yetanothertoken',
'id': 123456,
'culture': 'en-US',
'timezone': {
'shortName': 'CST',
'offset': -6.0,
'dst': False,
'longName': '(GMT-06:00) Central Time (No Daylight Saving)'
},
'email': '[email protected]'
}
"""
return '{0}'.format(details.get('id'))
def uses_redirect(self):
return False
def auth_url(self):
return None
def process_error(self, data):
if data.get('error'):
error = self.data.get('error_description') or self.data['error']
raise AuthFailed(self, error)
def do_auth(self, token, *args, **kwargs):
dummy, secret = self.get_key_and_secret()
try: # Decode the token, using the Application Signature from settings
decoded = jwt.decode(token, secret, algorithms=['HS256'])
except jwt.DecodeError: # Wrong signature, fail authentication
raise AuthCanceled(self)
kwargs.update({'response': {'token': decoded}, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
token = self.data.get('jwt', {})
if not token:
raise AuthFailed(self, 'Authentication Failed')
return self.do_auth(token, *args, **kwargs)
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
"""Load extra details from the JWT token"""
data = {
'id': details.get('id'),
'email': details.get('email'),
# OAuth token, for use with legacy SOAP API calls:
# http://bit.ly/13pRHfo
'internalOauthToken': details.get('internalOauthToken'),
# Token for use with the Application ClientID for the FUEL API
'oauthToken': details.get('oauthToken'),
# If the token has expired, use the FUEL API to get a new token see
# http://bit.ly/10v1K5l and http://bit.ly/11IbI6F - set legacy=1
'refreshToken': details.get('refreshToken'),
}
# The expiresIn value determines how long the tokens are valid for.
# Take a bit off, then convert to an int timestamp
expiresSeconds = details.get('expiresIn', 0) - 30
expires = datetime.utcnow() + timedelta(seconds=expiresSeconds)
data['expires'] = (expires - datetime(1970, 1, 1)).total_seconds()
if response.get('token'):
token = response['token']
org = token.get('request', {}).get('organization')
if org:
data['stack'] = org.get('stackKey')
data['enterpriseId'] = org.get('enterpriseId')
return data
|
agpl-3.0
|
hoho/dosido
|
nodejs/deps/v8/third_party/jinja2/compiler.py
|
335
|
63846
|
# -*- coding: utf-8 -*-
"""
jinja2.compiler
~~~~~~~~~~~~~~~
Compiles nodes into python code.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from itertools import chain
from copy import deepcopy
from keyword import iskeyword as is_python_keyword
from jinja2 import nodes
from jinja2.nodes import EvalContext
from jinja2.visitor import NodeVisitor
from jinja2.exceptions import TemplateAssertionError
from jinja2.utils import Markup, concat, escape
from jinja2._compat import range_type, text_type, string_types, \
iteritems, NativeStringIO, imap
operators = {
'eq': '==',
'ne': '!=',
'gt': '>',
'gteq': '>=',
'lt': '<',
'lteq': '<=',
'in': 'in',
'notin': 'not in'
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, 'iteritems'):
dict_item_iter = 'iteritems'
else:
dict_item_iter = 'items'
# does if 0: dummy(x) get us x into the scope?
def unoptimize_before_dead_code():
x = 42
def f():
if 0: dummy(x)
return f
# The getattr is necessary for pypy which does not set this attribute if
# no closure is on the function
unoptimize_before_dead_code = bool(
getattr(unoptimize_before_dead_code(), '__closure__', None))
def generate(node, environment, name, filename, stream=None,
defer_init=False):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = environment.code_generator_class(environment, name, filename,
stream, defer_init)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if isinstance(value, (bool, int, float, complex, range_type,
Markup) + string_types):
return True
if isinstance(value, (tuple, list, set, frozenset)):
for item in value:
if not has_safe_repr(item):
return False
return True
elif isinstance(value, dict):
for key, value in iteritems(value):
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class Identifiers(object):
"""Tracks the status of identifiers in frames."""
def __init__(self):
# variables that are known to be declared (probably from outer
# frames or because they are special for the frame)
self.declared = set()
# undeclared variables from outer scopes
self.outer_undeclared = set()
# names that are accessed without being explicitly declared by
# this one or any of the outer scopes. Names can appear both in
# declared and undeclared.
self.undeclared = set()
# names that are declared locally
self.declared_locally = set()
# names that are declared by parameters
self.declared_parameter = set()
def add_special(self, name):
"""Register a special name like `loop`."""
self.undeclared.discard(name)
self.declared.add(name)
def is_declared(self, name):
"""Check if a name is declared in this or an outer scope."""
if name in self.declared_locally or name in self.declared_parameter:
return True
return name in self.declared
def copy(self):
return deepcopy(self)
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None):
self.eval_ctx = eval_ctx
self.identifiers = Identifiers()
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# a set of actually assigned names
self.assigned_names = set()
# the parent of this frame
self.parent = parent
if parent is not None:
self.identifiers.declared.update(
parent.identifiers.declared |
parent.identifiers.declared_parameter |
parent.assigned_names
)
self.identifiers.outer_undeclared.update(
parent.identifiers.undeclared -
self.identifiers.declared
)
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.identifiers = object.__new__(self.identifiers.__class__)
rv.identifiers.__dict__.update(self.identifiers.__dict__)
return rv
def inspect(self, nodes):
"""Walk the node and check for identifiers. If the scope is hard (eg:
enforce on a python level) overrides from outer scopes are tracked
differently.
"""
visitor = FrameIdentifierVisitor(self.identifiers)
for node in nodes:
visitor.visit(node)
def find_shadowed(self, extra=()):
"""Find all the shadowed names. extra is an iterable of variables
that may be defined with `add_special` which may occour scoped.
"""
i = self.identifiers
return (i.declared | i.outer_undeclared) & \
(i.declared_locally | i.declared_parameter) | \
set(x for x in extra if i.is_declared(x))
def inner(self):
"""Return an inner frame."""
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class FrameIdentifierVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, identifiers):
self.identifiers = identifiers
def visit_Name(self, node):
"""All assignments to names go through this function."""
if node.ctx == 'store':
self.identifiers.declared_locally.add(node.name)
elif node.ctx == 'param':
self.identifiers.declared_parameter.add(node.name)
elif node.ctx == 'load' and not \
self.identifiers.is_declared(node.name):
self.identifiers.undeclared.add(node.name)
def visit_If(self, node):
self.visit(node.test)
real_identifiers = self.identifiers
old_names = real_identifiers.declared_locally | \
real_identifiers.declared_parameter
def inner_visit(nodes):
if not nodes:
return set()
self.identifiers = real_identifiers.copy()
for subnode in nodes:
self.visit(subnode)
rv = self.identifiers.declared_locally - old_names
# we have to remember the undeclared variables of this branch
# because we will have to pull them.
real_identifiers.undeclared.update(self.identifiers.undeclared)
self.identifiers = real_identifiers
return rv
body = inner_visit(node.body)
else_ = inner_visit(node.else_ or ())
# the differences between the two branches are also pulled as
# undeclared variables
real_identifiers.undeclared.update(body.symmetric_difference(else_) -
real_identifiers.declared)
# remember those that are declared.
real_identifiers.declared_locally.update(body | else_)
def visit_Macro(self, node):
self.identifiers.declared_locally.add(node.name)
def visit_Import(self, node):
self.generic_visit(node)
self.identifiers.declared_locally.add(node.target)
def visit_FromImport(self, node):
self.generic_visit(node)
for name in node.names:
if isinstance(name, tuple):
self.identifiers.declared_locally.add(name[1])
else:
self.identifiers.declared_locally.add(name)
def visit_Assign(self, node):
"""Visit assignments in the correct order."""
self.visit(node.node)
self.visit(node.target)
def visit_For(self, node):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter)
def visit_CallBlock(self, node):
self.visit(node.call)
def visit_FilterBlock(self, node):
self.visit(node.filter)
def visit_AssignBlock(self, node):
"""Stop visiting at block assigns."""
def visit_Scope(self, node):
"""Stop visiting at scopes."""
def visit_Block(self, node):
"""Stop visiting at blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None,
defer_init=False):
if stream is None:
stream = NativeStringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame):
"""Return the buffer contents of the frame."""
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
else:
self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically
unless the force_generator parameter is set to False.
"""
if frame.buffer is None:
self.writeline('if 0: yield None')
else:
self.writeline('pass')
try:
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_locals(self, frame):
"""Pull all the references identifiers into the local scope."""
for name in frame.identifiers.undeclared:
self.writeline('l_%s = context.resolve(%r)' % (name, name))
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name))
def unoptimize_scope(self, frame):
"""Disable Python optimizations for the frame."""
# XXX: this is not that nice but it has no real overhead. It
# mainly works because python finds the locals before dead code
# is removed. If that breaks we have to add a dummy function
# that just accepts the arguments and does nothing.
if frame.identifiers.declared:
self.writeline('%sdummy(%s)' % (
unoptimize_before_dead_code and 'if 0: ' or '',
', '.join('l_' + name for name in frame.identifiers.declared)
))
def push_scope(self, frame, extra_vars=()):
"""This function returns all the shadowed variables in a dict
in the form name: alias and will write the required assignments
into the current scope. No indentation takes place.
This also predefines locally declared variables from the loop
body because under some circumstances it may be the case that
`extra_vars` is passed to `Frame.find_shadowed`.
"""
aliases = {}
for name in frame.find_shadowed(extra_vars):
aliases[name] = ident = self.temporary_identifier()
self.writeline('%s = l_%s' % (ident, name))
to_declare = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_declare.add('l_' + name)
if to_declare:
self.writeline(' = '.join(to_declare) + ' = missing')
return aliases
def pop_scope(self, aliases, frame):
"""Restore all aliases and delete unused variables."""
for name, alias in iteritems(aliases):
self.writeline('l_%s = %s' % (name, alias))
to_delete = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_delete.add('l_' + name)
if to_delete:
# we cannot use the del statement here because enclosed
# scopes can trigger a SyntaxError:
# a = 42; b = lambda: a; del a
self.writeline(' = '.join(to_delete) + ' = missing')
def function_scoping(self, node, frame, children=None,
find_special=True):
"""In Jinja a few statements require the help of anonymous
functions. Those are currently macros and call blocks and in
the future also recursive loops. As there is currently
technical limitation that doesn't allow reading and writing a
variable in a scope where the initial value is coming from an
outer scope, this function tries to fall back with a common
error message. Additionally the frame passed is modified so
that the argumetns are collected and callers are looked up.
This will return the modified frame.
"""
# we have to iterate twice over it, make sure that works
if children is None:
children = node.iter_child_nodes()
children = list(children)
func_frame = frame.inner()
func_frame.inspect(children)
# variables that are undeclared (accessed before declaration) and
# declared locally *and* part of an outside scope raise a template
# assertion error. Reason: we can't generate reasonable code from
# it without aliasing all the variables.
# this could be fixed in Python 3 where we have the nonlocal
# keyword or if we switch to bytecode generation
overridden_closure_vars = (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared &
(func_frame.identifiers.declared_locally |
func_frame.identifiers.declared_parameter)
)
if overridden_closure_vars:
self.fail('It\'s not possible to set and access variables '
'derived from an outer scope! (affects: %s)' %
', '.join(sorted(overridden_closure_vars)), node.lineno)
# remove variables from a closure from the frame's undeclared
# identifiers.
func_frame.identifiers.undeclared -= (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared
)
# no special variables for this scope, abort early
if not find_special:
return func_frame
func_frame.accesses_kwargs = False
func_frame.accesses_varargs = False
func_frame.accesses_caller = False
func_frame.arguments = args = ['l_' + x.name for x in node.args]
undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))
if 'caller' in undeclared:
func_frame.accesses_caller = True
func_frame.identifiers.add_special('caller')
args.append('l_caller')
if 'kwargs' in undeclared:
func_frame.accesses_kwargs = True
func_frame.identifiers.add_special('kwargs')
args.append('l_kwargs')
if 'varargs' in undeclared:
func_frame.accesses_varargs = True
func_frame.identifiers.add_special('varargs')
args.append('l_varargs')
return func_frame
def macro_body(self, node, frame, children=None):
"""Dump the function def of a macro or call block."""
frame = self.function_scoping(node, frame, children)
# macros are delayed, they never require output checks
frame.require_output_check = False
args = frame.arguments
# XXX: this is an ugly fix for the loop nesting bug
# (tests.test_old_bugs.test_loop_call_bug). This works around
# a identifier nesting problem we have in general. It's just more
# likely to happen in loops which is why we work around it. The
# real solution would be "nonlocal" all the identifiers that are
# leaking into a new python frame and might be used both unassigned
# and assigned.
if 'loop' in frame.identifiers.declared:
args = args + ['l_loop=l_loop']
self.writeline('def macro(%s):' % ', '.join(args), node)
self.indent()
self.buffer(frame)
self.pull_locals(frame)
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame)
self.outdent()
return frame
def macro_def(self, node, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ', '.join(repr(x.name) for x in node.args)
name = getattr(node, 'name', None)
if len(node.args) == 1:
arg_tuple += ','
self.write('Macro(environment, macro, %r, (%s), (' %
(name, arg_tuple))
for arg in node.defaults:
self.visit(arg, frame)
self.write(', ')
self.write('), %r, %r, %r)' % (
bool(frame.accesses_kwargs),
bool(frame.accesses_varargs),
bool(frame.accesses_caller)
))
def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import division')
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if not unoptimize_before_dead_code:
self.writeline('dummy = lambda *x: None')
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if '.' in imp:
module, obj = imp.rsplit('.', 1)
self.writeline('from %s import %s as %s' %
(module, obj, alias))
else:
self.writeline('import %s as %s' % (imp, alias))
# add the load name
self.writeline('name = %r' % self.name)
# generate the root render function.
self.writeline('def root(context%s):' % envenv, extra=1)
# process the root
frame = Frame(eval_ctx)
frame.inspect(node.body)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
self.indent()
if have_extends:
self.writeline('parent_template = None')
if 'self' in find_undeclared(node.body, ('self',)):
frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
self.pull_locals(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('for event in parent_template.'
'root_render_func(context):')
self.indent()
self.writeline('yield event')
self.outdent(2 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
block_frame = Frame(eval_ctx)
block_frame.inspect(block.body)
block_frame.block = name
self.writeline('def block_%s(context%s):' % (name, envenv),
block, 1)
self.indent()
undeclared = find_undeclared(block.body, ('self', 'super'))
if 'self' in undeclared:
block_frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
if 'super' in undeclared:
block_frame.identifiers.add_special('super')
self.writeline('l_super = context.super(%r, '
'block_%s)' % (name, name))
self.pull_locals(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.outdent()
self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
for x in self.blocks),
extra=1)
# add a function that returns the debug info
self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 1
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
context = node.scoped and 'context.derived(locals())' or 'context'
self.writeline('for event in context.blocks[%r][0](%s):' % (
node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.with_context:
self.unoptimize_scope(frame)
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
if node.with_context:
self.writeline('for event in template.root_render_func('
'template.new_context(context.parent, True, '
'locals())):')
else:
self.writeline('for event in template.module._body_stream:')
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
if node.with_context:
self.unoptimize_scope(frame)
self.writeline('l_%s = ' % node.target, node)
if frame.toplevel:
self.write('context.vars[%r] = ' % node.target)
self.write('environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True, locals())')
else:
self.write('module')
if frame.toplevel and not node.target.startswith('_'):
self.writeline('context.exported_vars.discard(%r)' % node.target)
frame.assigned_names.add(node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True)')
else:
self.write('module')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('l_%s = getattr(included_template, '
'%r, missing)' % (alias, name))
self.writeline('if l_%s is missing:' % alias)
self.indent()
self.writeline('l_%s = environment.undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(alias, 'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
frame.assigned_names.add(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: l_%s' % (name, name) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names)))
def visit_For(self, node, frame):
# when calculating the nodes for the inner frame we have to exclude
# the iterator contents from it
children = node.iter_child_nodes(exclude=('iter',))
if node.recursive:
loop_frame = self.function_scoping(node, frame, children,
find_special=False)
else:
loop_frame = frame.inner()
loop_frame.inspect(children)
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or 'loop' in \
find_undeclared(node.iter_child_nodes(
only=('body',)), ('loop',))
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if not node.recursive:
aliases = self.push_scope(loop_frame, ('loop',))
# otherwise we set up a buffer and add a function def
else:
self.writeline('def loop(reciter, loop_render_func, depth=0):', node)
self.indent()
self.buffer(loop_frame)
aliases = {}
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline('l_loop = missing')
loop_frame.identifiers.add_special('loop')
for name in node.find_all(nodes.Name):
if name.ctx == 'store' and name.name == 'loop':
self.fail('Can\'t assign to special loop variable '
'in for-loop target', name.lineno)
self.pull_locals(loop_frame)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline('%s = 1' % iteration_indicator)
# Create a fake parent loop if the else or test section of a
# loop is accessing the special loop variable and no parent loop
# exists.
if 'loop' not in aliases and 'loop' in find_undeclared(
node.iter_child_nodes(only=('else_', 'test')), ('loop',)):
self.writeline("l_loop = environment.undefined(%r, name='loop')" %
("'loop' is undefined. the filter section of a loop as well "
"as the else block don't have access to the special 'loop'"
" variable of the current loop. Because there is no parent "
"loop it's undefined. Happened in loop on %s" %
self.position(node)))
self.writeline('for ', node)
self.visit(node.target, loop_frame)
self.write(extended_loop and ', l_loop in LoopContext(' or ' in ')
# if we have an extened loop and a node test, we filter in the
# "outer frame".
if extended_loop and node.test is not None:
self.write('(')
self.visit(node.target, loop_frame)
self.write(' for ')
self.visit(node.target, loop_frame)
self.write(' in ')
if node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
self.write(' if (')
test_frame = loop_frame.copy()
self.visit(node.test, test_frame)
self.write('))')
elif node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
if node.recursive:
self.write(', loop_render_func, depth):')
else:
self.write(extended_loop and '):' or ':')
# tests in not extended loops become a continue
if not extended_loop and node.test is not None:
self.indent()
self.writeline('if not ')
self.visit(node.test, loop_frame)
self.write(':')
self.indent()
self.writeline('continue')
self.outdent(2)
self.indent()
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline('%s = 0' % iteration_indicator)
self.outdent()
if node.else_:
self.writeline('if %s:' % iteration_indicator)
self.indent()
self.blockvisit(node.else_, loop_frame)
self.outdent()
# reset the aliases if there are any.
if not node.recursive:
self.pop_scope(aliases, loop_frame)
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
self.write('loop(')
self.visit(node.iter, frame)
self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline('if ', node)
self.visit(node.test, if_frame)
self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
if node.else_:
self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith('_'):
self.write('context.exported_vars.add(%r)' % node.name)
self.writeline('context.vars[%r] = ' % node.name)
self.write('l_%s = ' % node.name)
self.macro_def(node, macro_frame)
frame.assigned_names.add(node.name)
def visit_CallBlock(self, node, frame):
children = node.iter_child_nodes(exclude=('call',))
call_frame = self.macro_body(node, frame, children)
self.writeline('caller = ')
self.macro_def(node, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, call_frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(filter_frame)
self.pull_locals(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.pop_scope(aliases, filter_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
def visit_Output(self, node, frame):
# if we have a known extends statement, we don't output anything
# if we are in a require_output_check section
if self.has_known_extends and frame.require_output_check:
return
allow_constant_finalize = True
if self.environment.finalize:
func = self.environment.finalize
if getattr(func, 'contextfunction', False) or \
getattr(func, 'evalcontextfunction', False):
allow_constant_finalize = False
elif getattr(func, 'environmentfunction', False):
finalize = lambda x: text_type(
self.environment.finalize(self.environment, x))
else:
finalize = lambda x: text_type(self.environment.finalize(x))
else:
finalize = text_type
# if we are inside a frame that requires output checking, we do so
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# try to evaluate as many chunks as possible into a static
# string at compile time.
body = []
for child in node.nodes:
try:
if not allow_constant_finalize:
raise nodes.Impossible()
const = child.as_const(frame.eval_ctx)
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, becaus otherwise the
# as_const() function would raise an Impossible exception
# at that point.
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = finalize(const)
except Exception:
# if something goes wrong here we evaluate the node
# at runtime for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ', ')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 1
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
elif frame.eval_ctx.autoescape:
self.write('escape(')
else:
self.write('to_string(')
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
"contextfunction", False):
self.write('context, ')
close += 1
self.visit(item, frame)
self.write(')' * close)
if frame.buffer is not None:
self.write(', ')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
close += 1
elif frame.eval_ctx.autoescape:
self.write('escape(')
close += 1
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
'contextfunction', False):
self.write('context, ')
elif getattr(self.environment.finalize,
'evalcontextfunction', False):
self.write('context.eval_ctx, ')
elif getattr(self.environment.finalize,
'environmentfunction', False):
self.write('environment, ')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
def make_assignment_frame(self, frame):
# toplevel assignments however go into the local namespace and
# the current template's context. We create a copy of the frame
# here and add a set so that the Name visitor can add the assigned
# names here.
if not frame.toplevel:
return frame
assignment_frame = frame.copy()
assignment_frame.toplevel_assignments = set()
return assignment_frame
def export_assigned_vars(self, frame, assignment_frame):
if not frame.toplevel:
return
public_names = [x for x in assignment_frame.toplevel_assignments
if not x.startswith('_')]
if len(assignment_frame.toplevel_assignments) == 1:
name = next(iter(assignment_frame.toplevel_assignments))
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(assignment_frame.toplevel_assignments):
if idx:
self.write(', ')
self.write('%r: l_%s' % (name, name))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names)))
def visit_Assign(self, node, frame):
self.newline(node)
assignment_frame = self.make_assignment_frame(frame)
self.visit(node.target, assignment_frame)
self.write(' = ')
self.visit(node.node, frame)
self.export_assigned_vars(frame, assignment_frame)
def visit_AssignBlock(self, node, frame):
block_frame = frame.inner()
block_frame.inspect(node.body)
aliases = self.push_scope(block_frame)
self.pull_locals(block_frame)
self.buffer(block_frame)
self.blockvisit(node.body, block_frame)
self.pop_scope(aliases, block_frame)
assignment_frame = self.make_assignment_frame(frame)
self.newline(node)
self.visit(node.target, assignment_frame)
self.write(' = concat(%s)' % block_frame.buffer)
self.export_assigned_vars(frame, assignment_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == 'store' and frame.toplevel:
frame.toplevel_assignments.add(node.name)
self.write('l_' + node.name)
frame.assigned_names.add(node.name)
def visit_Const(self, node, frame):
val = node.value
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)'
% node.data)
def visit_Tuple(self, node, frame):
self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
self.write('[')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(']')
def visit_Dict(self, node, frame):
self.write('{')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item.key, frame)
self.write(': ')
self.visit(item.value, frame)
self.write('}')
def binop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_binops:
self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
self.write(', ')
self.visit(node.right, frame)
else:
self.write('(')
self.visit(node.left, frame)
self.write(' %s ' % operator)
self.visit(node.right, frame)
self.write(')')
return visitor
def uaop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_unops:
self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
self.write('(' + operator)
self.visit(node.node, frame)
self.write(')')
return visitor
visit_Add = binop('+')
visit_Sub = binop('-')
visit_Mul = binop('*')
visit_Div = binop('/')
visit_FloorDiv = binop('//')
visit_Pow = binop('**')
visit_Mod = binop('%')
visit_And = binop('and', interceptable=False)
visit_Or = binop('or', interceptable=False)
visit_Pos = uaop('+')
visit_Neg = uaop('-')
visit_Not = uaop('not ', interceptable=False)
del binop, uaop
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = '(context.eval_ctx.volatile and' \
' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
func_name = 'markup_join'
else:
func_name = 'unicode_join'
self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(', ')
self.write('))')
def visit_Compare(self, node, frame):
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
def visit_Operand(self, node, frame):
self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
def visit_Getattr(self, node, frame):
self.write('environment.getattr(')
self.visit(node.node, frame)
self.write(', %r)' % node.attr)
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write('[')
self.visit(node.arg, frame)
self.write(']')
else:
self.write('environment.getitem(')
self.visit(node.node, frame)
self.write(', ')
self.visit(node.arg, frame)
self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(':')
self.visit(node.step, frame)
def visit_Filter(self, node, frame):
self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
self.fail('no filter named %r' % node.name, node.lineno)
if getattr(func, 'contextfilter', False):
self.write('context, ')
elif getattr(func, 'evalcontextfilter', False):
self.write('context.eval_ctx, ')
elif getattr(func, 'environmentfilter', False):
self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' Markup(concat(%s)) or concat(%s))' %
(frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
self.write('Markup(concat(%s))' % frame.buffer)
else:
self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
self.write(')')
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(')')
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write('environment.undefined(%r)' % ('the inline if-'
'expression on %s evaluated to false and '
'no else section was defined.' % self.position(node)))
self.write('(')
self.visit(node.expr1, frame)
self.write(' if ')
self.visit(node.test, frame)
self.write(' else ')
write_expr2()
self.write(')')
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.sandboxed:
self.write('environment.call(context, ')
else:
self.write('context.call(')
self.visit(node.node, frame)
extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
self.write(')')
def visit_Keyword(self, node, frame):
self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write('Markup(')
self.visit(node.expr, frame)
self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write('context')
def visit_Continue(self, node, frame):
self.writeline('continue', node)
def visit_Break(self, node, frame):
self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(scope_frame)
self.pull_locals(scope_frame)
self.blockvisit(node.body, scope_frame)
self.pop_scope(aliases, scope_frame)
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
safed_ctx = frame.eval_ctx.save()
self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(safed_ctx)
self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
|
mit
|
murphy-wang/aws-ec2
|
lib/boto-2.34.0/boto/pyami/helloworld.py
|
153
|
1238
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.pyami.scriptbase import ScriptBase
class HelloWorld(ScriptBase):
def main(self):
self.log('Hello World!!!')
|
apache-2.0
|
yxl/emscripten-calligra-mobile
|
plan/plugins/scripting/tests/project_access.py
|
7
|
3268
|
#!/usr/bin/env kross
# -*- coding: utf-8 -*-
import traceback
import Kross
import Plan
import TestResult
TestResult.setResult( True )
asserttext = "Test of property '{0}' failed:\n Expected: '{2}'\n Result: '{1}'"
asserttext2 = "Failed to set property '{0}' to '{1}'. Result: {2}"
try:
project = Plan.project()
assert project is not None
property = 'Name'
data = "Project name"
before = project.name()
Plan.beginCommand( "Set data" );
res = project.setData(project, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
result = project.name()
text = asserttext.format(property, result, data)
assert result == data, text
Plan.revertCommand()
result = project.name()
text = asserttext.format(property, result, before)
assert result == before, text
property = 'Name'
data = "Project name"
before = project.data(project, property)
Plan.beginCommand( "Set data" );
res = project.setData(project, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
result = project.data(project, property)
text = asserttext.format(property, result, data)
assert result == data, text
Plan.revertCommand()
result = project.data(project, property)
text = asserttext.format(property, result, before)
assert result == before, text
property = 'Responsible'
data = "Project responsible"
before = project.data(project, property)
Plan.beginCommand( "Set data" );
res = project.setData(project, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
result = project.data(project, property)
text = asserttext.format(property, result, data)
assert result == data, text
Plan.revertCommand()
result = project.data(project, property)
text = asserttext.format(property, result, before)
assert result == before, text
property = 'ConstraintStart'
data = "2011-08-01T10:00:00"
before = project.data(project, property)
Plan.beginCommand( "Set data" );
res = project.setData(project, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
result = project.data(project, property, 'ProgramRole')
text = asserttext.format(property, result, data)
assert result == data, text
Plan.revertCommand()
result = project.data(project, property)
text = asserttext.format(property, result, before)
assert result == before, text
property = 'ConstraintEnd'
data = "2011-08-01T11:00:00"
before = project.data(project, property)
Plan.beginCommand( "Set data" );
res = project.setData(project, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
result = project.data(project, property, 'ProgramRole')
text = asserttext.format(property, result, data)
assert result == data, text
Plan.revertCommand()
result = project.data(project, property)
text = asserttext.format(property, result, before)
assert result == before, text
except:
TestResult.setResult( False )
TestResult.setMessage("\n" + traceback.format_exc(1))
|
gpl-2.0
|
ashang/calibre
|
src/calibre/utils/complete.py
|
14
|
5285
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
'''
BASH completion for calibre commands that are too complex for simple
completion.
'''
import sys, os, shlex, glob, re, cPickle
def prints(*args, **kwargs):
'''
Print unicode arguments safely by encoding them to preferred_encoding
Has the same signature as the print function from Python 3, except for the
additional keyword argument safe_encode, which if set to True will cause the
function to use repr when encoding fails.
'''
file = kwargs.get('file', sys.stdout)
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
enc = 'utf-8'
safe_encode = kwargs.get('safe_encode', False)
for i, arg in enumerate(args):
if isinstance(arg, unicode):
try:
arg = arg.encode(enc)
except UnicodeEncodeError:
if not safe_encode:
raise
arg = repr(arg)
if not isinstance(arg, str):
try:
arg = str(arg)
except ValueError:
arg = unicode(arg)
if isinstance(arg, unicode):
try:
arg = arg.encode(enc)
except UnicodeEncodeError:
if not safe_encode:
raise
arg = repr(arg)
file.write(arg)
if i != len(args)-1:
file.write(sep)
file.write(end)
def split(src):
try:
return shlex.split(src)
except ValueError:
try:
return shlex.split(src+'"')
except ValueError:
return shlex.split(src+"'")
def files_and_dirs(prefix, allowed_exts=[]):
prefix = os.path.expanduser(prefix)
for i in glob.iglob(prefix+'*'):
_, ext = os.path.splitext(i)
ext = ext.lower().replace('.', '')
if os.path.isdir(i):
yield i+os.sep
elif allowed_exts is None or ext in allowed_exts:
yield i+' '
def get_opts_from_parser(parser, prefix):
def do_opt(opt):
for x in opt._long_opts:
if x.startswith(prefix):
yield x
for x in opt._short_opts:
if x.startswith(prefix):
yield x
for o in parser.option_list:
for x in do_opt(o): yield x+' '
for g in parser.option_groups:
for o in g.option_list:
for x in do_opt(o): yield x+' '
def send(ans):
pat = re.compile('([^0-9a-zA-Z_./-])')
for x in sorted(set(ans)):
x = pat.sub(lambda m : '\\'+m.group(1), x)
if x.endswith('\\ '):
x = x[:-2]+' '
prints(x)
class EbookConvert(object):
def __init__(self, comp_line, pos):
words = split(comp_line[:pos])
char_before = comp_line[pos-1]
prefix = words[-1] if words[-1].endswith(char_before) else ''
wc = len(words)
if not prefix:
wc += 1
self.words = words
self.prefix = prefix
self.previous = words[-2 if prefix else -1]
self.cache = cPickle.load(open(os.path.join(sys.resources_location,
'ebook-convert-complete.pickle'), 'rb'))
self.complete(wc)
def complete(self, wc):
if wc == 2:
self.complete_input()
elif wc == 3:
self.complete_output()
else:
q = list(self.words[1:3])
q = [os.path.splitext(x)[0 if x.startswith('.') else 1].partition('.')[-1].lower() for x in q]
if not q[1]:
q[1] = 'oeb'
q = tuple(q)
if q in self.cache:
ans = [x for x in self.cache[q] if x.startswith(self.prefix)]
else:
from calibre.ebooks.conversion.cli import create_option_parser
from calibre.utils.logging import Log
log = Log()
log.outputs = []
ans = []
if not self.prefix or self.prefix.startswith('-'):
try:
parser, _ = create_option_parser(self.words[:3], log)
ans += list(get_opts_from_parser(parser, self.prefix))
except:
pass
if self.previous.startswith('-'):
ans += list(files_and_dirs(self.prefix, None))
send(ans)
def complete_input(self):
ans = list(files_and_dirs(self.prefix, self.cache['input_fmts']))
ans += [t for t in self.cache['input_recipes'] if
t.startswith(self.prefix)]
send(ans)
def complete_output(self):
fmts = self.cache['output']
ans = list(files_and_dirs(self.prefix, fmts))
ans += ['.'+x+' ' for x in fmts if ('.'+x).startswith(self.prefix)]
send(ans)
def main(args=sys.argv):
comp_line, pos = os.environ['COMP_LINE'], int(os.environ['COMP_POINT'])
module = split(comp_line)[0].split(os.sep)[-1]
if module == 'ebook-convert':
EbookConvert(comp_line, pos)
return 0
if __name__ == '__main__':
raise sys.exit(main())
|
gpl-3.0
|
pekermert/django-socketio
|
django_socketio/views.py
|
10
|
2745
|
from django.http import HttpResponse
from django_socketio import events
from django_socketio.channels import SocketIOChannelProxy
from django_socketio.clients import client_start, client_end
from django_socketio.utils import format_log
def socketio(request):
"""
Socket.IO handler - maintains the lifecycle of a Socket.IO
request, sending the each of the events. Also handles
adding/removing request/socket pairs to the CLIENTS dict
which is used for sending on_finish events when the server
stops.
"""
context = {}
socket = SocketIOChannelProxy(request.environ["socketio"])
client_start(request, socket, context)
try:
if socket.on_connect():
events.on_connect.send(request, socket, context)
while True:
messages = socket.recv()
if not messages and not socket.connected():
events.on_disconnect.send(request, socket, context)
break
# Subscribe and unsubscribe messages are in two parts, the
# name of either and the channel, so we use an iterator that
# lets us jump a step in iteration to grab the channel name
# for these.
messages = iter(messages)
for message in messages:
if message == "__subscribe__":
message = messages.next()
message_type = "subscribe"
socket.subscribe(message)
events.on_subscribe.send(request, socket, context, message)
elif message == "__unsubscribe__":
message = messages.next()
message_type = "unsubscribe"
socket.unsubscribe(message)
events.on_unsubscribe.send(request, socket, context, message)
else:
# Socket.IO sends arrays as individual messages, so
# they're put into an object in socketio_scripts.html
# and given the __array__ key so that they can be
# handled consistently in the on_message event.
message_type = "message"
if message == "__array__":
message = messages.next()
events.on_message.send(request, socket, context, message)
log_message = format_log(request, message_type, message)
if log_message:
socket.handler.server.log.write(log_message)
except Exception, exception:
from traceback import print_exc
print_exc()
events.on_error.send(request, socket, context, exception)
client_end(request, socket, context)
return HttpResponse("")
|
bsd-2-clause
|
cloudbase/nova-virtualbox
|
nova/tests/unit/keymgr/test_single_key_mgr.py
|
78
|
2448
|
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test cases for the single key manager.
"""
import array
from nova import exception
from nova.keymgr import key
from nova.keymgr import single_key_mgr
from nova.tests.unit.keymgr import test_mock_key_mgr
class SingleKeyManagerTestCase(test_mock_key_mgr.MockKeyManagerTestCase):
def _create_key_manager(self):
return single_key_mgr.SingleKeyManager()
def setUp(self):
super(SingleKeyManagerTestCase, self).setUp()
self.key_id = '00000000-0000-0000-0000-000000000000'
encoded = array.array('B', ('0' * 64).decode('hex')).tolist()
self.key = key.SymmetricKey('AES', encoded)
def test___init__(self):
self.assertEqual(self.key,
self.key_mgr.get_key(self.ctxt, self.key_id))
def test_create_key(self):
key_id_1 = self.key_mgr.create_key(self.ctxt)
key_id_2 = self.key_mgr.create_key(self.ctxt)
# ensure that the UUIDs are the same
self.assertEqual(key_id_1, key_id_2)
def test_create_key_with_length(self):
pass
def test_store_null_context(self):
self.assertRaises(exception.Forbidden,
self.key_mgr.store_key, None, self.key)
def test_copy_key(self):
key_id = self.key_mgr.create_key(self.ctxt)
key = self.key_mgr.get_key(self.ctxt, key_id)
copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id)
copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id)
self.assertEqual(key_id, copied_key_id)
self.assertEqual(key, copied_key)
def test_delete_key(self):
pass
def test_delete_unknown_key(self):
self.assertRaises(exception.KeyManagerError,
self.key_mgr.delete_key, self.ctxt, None)
|
apache-2.0
|
elkingtonmcb/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
78
|
34552
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
bsd-3-clause
|
danic96/Practica1
|
Practica1/Aplicacio/views.py
|
1
|
4321
|
# from django.shortcuts import render
# Create your views here.
# from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic import DetailView, DeleteView
from rest_framework import generics
from models import Movie, Character, Team, Power, Location
from forms import MovieForm, CharacterForm, TeamForm, PowerForm, LocationForm
from Practica1.serializers import MovieSerializer
# Security Mixins
class LoginRequiredMixin(object):
@method_decorator(login_required())
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class CheckIsOwnerMixin(object):
def get_object(self, *args, **kwargs):
obj = super(CheckIsOwnerMixin, self).get_object(*args, **kwargs)
if not obj.user == self.request.user:
raise PermissionDenied
return obj
class LoginRequiredCheckIsOwnerUpdateView(LoginRequiredMixin, CheckIsOwnerMixin, UpdateView):
template_name = 'Aplicacio/form.html'
class MovieCreate(LoginRequiredMixin, CreateView):
model = Movie
template_name = 'Aplicacio/form.html'
form_class = MovieForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(MovieCreate, self).form_valid(form)
class CharacterCreate(LoginRequiredMixin, CreateView):
model = Character
template_name = 'Aplicacio/form.html'
form_class = CharacterForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(CharacterCreate, self).form_valid(form)
class TeamCreate(LoginRequiredMixin, CreateView):
model = Team
template_name = 'Aplicacio/form.html'
form_class = TeamForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(TeamCreate, self).form_valid(form)
class PowerCreate(LoginRequiredMixin, CreateView):
model = Power
template_name = 'Aplicacio/form.html'
form_class = PowerForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(PowerCreate, self).form_valid(form)
class LocationCreate(LoginRequiredMixin, CreateView):
model = Location
template_name = 'Aplicacio/form.html'
form_class = LocationForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(LocationCreate, self).form_valid(form)
"""
class LocationDelete(LoginRequiredMixin, CreateView):
model = Location
template_name = 'Aplicacio/form.html'
form_class = LocationForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(LocationDelete, self).form_valid(form)
"""
"""
class Delete(DeleteView):
model = Location
success_url = reverse_lazy('all_locations') # This is where this view will
# redirect the user
template_name = 'Aplicacio/delete_location.html'
"""
class MovieDetail(DetailView):
model = Movie
template_name = 'Aplicacio/movie_detail.html'
"""
def get_context_data(self, **kwargs):
context = super(MovieDetail, self).get_context_data(**kwargs)
context['RATING_CHOICES'] = RestaurantReview.RATING_CHOICES
return context
"""
class CharacterDetail(DetailView):
model = Character
template_name = 'Aplicacio/character_detail.html'
class TeamDetail(DetailView):
model = Team
template_name = 'Aplicacio/team_detail.html'
class PowerDetail(DetailView):
model = Power
template_name = 'Aplicacio/power_detail.html'
class LocationDetail(DetailView):
model = Location
template_name = 'Aplicacio/location_detail.html'
def form_valid(self, form):
form.instance.user = self.request.user
return super(CharacterCreate, self).form_valid(form)
### RESTful API views ###
class APIMovieList(generics.ListCreateAPIView):
model = Movie
queryset = Movie.objects.all()
serializer_class = MovieSerializer
class APIMovieDetail(generics.RetrieveUpdateDestroyAPIView):
model = Movie
queryset = Movie.objects.all()
serializer_class = MovieSerializer
|
mit
|
cristiana214/cristianachavez214-cristianachavez
|
python/src/Lib/test/test_difflib.py
|
74
|
5281
|
import difflib
from test.test_support import run_unittest, findfile
import unittest
import doctest
import sys
class TestSFbugs(unittest.TestCase):
def test_ratio_for_null_seqn(self):
# Check clearing of SF bug 763023
s = difflib.SequenceMatcher(None, [], [])
self.assertEqual(s.ratio(), 1)
self.assertEqual(s.quick_ratio(), 1)
self.assertEqual(s.real_quick_ratio(), 1)
def test_comparing_empty_lists(self):
# Check fix for bug #979794
group_gen = difflib.SequenceMatcher(None, [], []).get_grouped_opcodes()
self.assertRaises(StopIteration, group_gen.next)
diff_gen = difflib.unified_diff([], [])
self.assertRaises(StopIteration, diff_gen.next)
patch914575_from1 = """
1. Beautiful is beTTer than ugly.
2. Explicit is better than implicit.
3. Simple is better than complex.
4. Complex is better than complicated.
"""
patch914575_to1 = """
1. Beautiful is better than ugly.
3. Simple is better than complex.
4. Complicated is better than complex.
5. Flat is better than nested.
"""
patch914575_from2 = """
\t\tLine 1: preceeded by from:[tt] to:[ssss]
\t\tLine 2: preceeded by from:[sstt] to:[sssst]
\t \tLine 3: preceeded by from:[sstst] to:[ssssss]
Line 4: \thas from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end\t
"""
patch914575_to2 = """
Line 1: preceeded by from:[tt] to:[ssss]
\tLine 2: preceeded by from:[sstt] to:[sssst]
Line 3: preceeded by from:[sstst] to:[ssssss]
Line 4: has from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end
"""
patch914575_from3 = """line 0
1234567890123456789012345689012345
line 1
line 2
line 3
line 4 changed
line 5 changed
line 6 changed
line 7
line 8 subtracted
line 9
1234567890123456789012345689012345
short line
just fits in!!
just fits in two lines yup!!
the end"""
patch914575_to3 = """line 0
1234567890123456789012345689012345
line 1
line 2 added
line 3
line 4 chanGEd
line 5a chanGed
line 6a changEd
line 7
line 8
line 9
1234567890
another long line that needs to be wrapped
just fitS in!!
just fits in two lineS yup!!
the end"""
class TestSFpatches(unittest.TestCase):
def test_html_diff(self):
# Check SF patch 914575 for generating HTML differences
f1a = ((patch914575_from1 + '123\n'*10)*3)
t1a = (patch914575_to1 + '123\n'*10)*3
f1b = '456\n'*10 + f1a
t1b = '456\n'*10 + t1a
f1a = f1a.splitlines()
t1a = t1a.splitlines()
f1b = f1b.splitlines()
t1b = t1b.splitlines()
f2 = patch914575_from2.splitlines()
t2 = patch914575_to2.splitlines()
f3 = patch914575_from3
t3 = patch914575_to3
i = difflib.HtmlDiff()
j = difflib.HtmlDiff(tabsize=2)
k = difflib.HtmlDiff(wrapcolumn=14)
full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5)
tables = '\n'.join(
[
'<h2>Context (first diff within numlines=5(default))</h2>',
i.make_table(f1a,t1a,'from','to',context=True),
'<h2>Context (first diff after numlines=5(default))</h2>',
i.make_table(f1b,t1b,'from','to',context=True),
'<h2>Context (numlines=6)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=6),
'<h2>Context (numlines=0)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=0),
'<h2>Same Context</h2>',
i.make_table(f1a,f1a,'from','to',context=True),
'<h2>Same Full</h2>',
i.make_table(f1a,f1a,'from','to',context=False),
'<h2>Empty Context</h2>',
i.make_table([],[],'from','to',context=True),
'<h2>Empty Full</h2>',
i.make_table([],[],'from','to',context=False),
'<h2>tabsize=2</h2>',
j.make_table(f2,t2),
'<h2>tabsize=default</h2>',
i.make_table(f2,t2),
'<h2>Context (wrapcolumn=14,numlines=0)</h2>',
k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0),
'<h2>wrapcolumn=14,splitlines()</h2>',
k.make_table(f3.splitlines(),t3.splitlines()),
'<h2>wrapcolumn=14,splitlines(True)</h2>',
k.make_table(f3.splitlines(True),t3.splitlines(True)),
])
actual = full.replace('</body>','\n%s\n</body>' % tables)
# temporarily uncomment next three lines to baseline this test
#f = open('test_difflib_expect.html','w')
#f.write(actual)
#f.close()
expect = open(findfile('test_difflib_expect.html')).read()
self.assertEqual(actual,expect)
def test_recursion_limit(self):
# Check if the problem described in patch #1413711 exists.
limit = sys.getrecursionlimit()
old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)]
new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)]
difflib.SequenceMatcher(None, old, new).get_opcodes()
def test_main():
difflib.HtmlDiff._default_prefix = 0
Doctests = doctest.DocTestSuite(difflib)
run_unittest(TestSFpatches, TestSFbugs, Doctests)
if __name__ == '__main__':
test_main()
|
apache-2.0
|
EdgarSun/Django-Demo
|
django/conf/locale/ko/formats.py
|
313
|
2016
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y년 n월 j일'
TIME_FORMAT = 'A g:i:s'
DATETIME_FORMAT = 'Y년 n월 j일 g:i:s A'
YEAR_MONTH_FORMAT = 'Y년 F월'
MONTH_DAY_FORMAT = 'F월 j일'
SHORT_DATE_FORMAT = 'Y-n-j.'
SHORT_DATETIME_FORMAT = 'Y-n-j H:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
'%Y년 %m월 %d일', # '2006년 10월 25일', with localized suffix.
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
'%H시 %M분 %S초', # '14시 30분 59초'
'%H시 %M분', # '14시 30분'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
'%Y년 %m월 %d일 %H시 %M분 %S초', # '2006년 10월 25일 14시 30분 59초'
'%Y년 %m월 %d일 %H시 %M분', # '2006년 10월 25일 14시 30분'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
mit
|
android-ia/platform_external_chromium_org
|
media/tools/layout_tests/trend_graph.py
|
174
|
3309
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for manipulating trend graph with analyzer result history."""
import os
import layouttest_analyzer_helpers
DEFAULT_TREND_GRAPH_PATH = os.path.join('graph', 'graph.html')
# The following is necesasry to decide the point to insert.
LINE_INSERT_POINT_FOR_NUMBERS = r'// insert 1'
LINE_INSERT_POINT_FOR_PASSING_RATE = r'// insert 2'
class TrendGraph(object):
"""A class to manage trend graph which is using Google Visualization APIs.
Google Visualization API (http://code.google.com/apis/chart/interactive/docs/
gallery/annotatedtimeline.html) is used to present the historical analyzer
result. Currently, data is directly written to JavaScript file using file
in-place replacement for simplicity.
TODO(imasaki): use GoogleSpreadsheet to store the analyzer result.
"""
def __init__(self, location=DEFAULT_TREND_GRAPH_PATH):
"""Initialize this object with the location of trend graph."""
self._location = location
def Update(self, datetime_string, data_map):
"""Update trend graphs using |datetime_string| and |data_map|.
There are two kinds of graphs to be updated (one is for numbers and the
other is for passing rates).
Args:
datetime_string: a datetime string delimited by ','
(e.g., '2008,1,1,13,45,00)'. For example, in the case of the year
2008, this ranges from '2008,1,1,0,0,00' to '2008,12,31,23,59,99'.
data_map: a dictionary containing 'whole', 'skip' , 'nonskip',
'passingrate' as its keys and (number, tile, text) string tuples
as values for graph annotation.
"""
joined_str = ''
# For a date format in GViz, month is shifted (e.g., '2008,2,1' means
# March 1, 2008). So, the input parameter |datetime_string| (before this
# conversion) must be shifted in order to show the date properly on GViz.
# After the below conversion, for example, in the case of the year 2008,
# |datetime_string| ranges from '2008,0,1,0,0,00' to '2008,11,31,23,59,99'.
str_list = datetime_string.split(',')
str_list[1] = str(int(str_list[1])-1) # Month
datetime_string = ','.join(str_list)
for key in ['whole', 'skip', 'nonskip']:
joined_str += str(len(data_map[key][0])) + ','
joined_str += ','.join(data_map[key][1:]) + ','
new_line_for_numbers = ' [new Date(%s),%s],\n' % (datetime_string,
joined_str)
new_line_for_numbers += ' %s\n' % (
LINE_INSERT_POINT_FOR_NUMBERS)
layouttest_analyzer_helpers.ReplaceLineInFile(
self._location, LINE_INSERT_POINT_FOR_NUMBERS,
new_line_for_numbers)
joined_str = '%s,%s,%s' % (
str(data_map['passingrate'][0]), data_map['nonskip'][1],
data_map['nonskip'][2])
new_line_for_passingrate = ' [new Date(%s),%s],\n' % (
datetime_string, joined_str)
new_line_for_passingrate += ' %s\n' % (
LINE_INSERT_POINT_FOR_PASSING_RATE)
layouttest_analyzer_helpers.ReplaceLineInFile(
self._location, LINE_INSERT_POINT_FOR_PASSING_RATE,
new_line_for_passingrate)
|
bsd-3-clause
|
rbalda/neural_ocr
|
env/lib/python2.7/site-packages/numpy/lib/npyio.py
|
42
|
71218
|
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
|
mit
|
pastebt/you-get
|
src/you_get/extractors/miomio.py
|
19
|
1195
|
#!/usr/bin/env python
__all__ = ['miomio_download']
from ..common import *
from .sina import sina_download_by_xml
from .tudou import tudou_download_by_id
from .youku import youku_download_by_vid
def miomio_download(url, output_dir = '.', merge = True, info_only = False):
html = get_html(url)
title = r1(r'<meta name="description" content="([^"]*)"', html)
flashvars = r1(r'flashvars="(type=[^"]*)"', html)
t = r1(r'type=(\w+)', flashvars)
id = r1(r'vid=([^"]+)', flashvars)
if t == 'youku':
youku_download_by_vid(id, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
elif t == 'tudou':
tudou_download_by_id(id, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif t == 'sina' or t=='video':
url = "http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?vid=" + id
xml = get_content (url, headers=fake_headers, decoded=True)
sina_download_by_xml(xml, title, output_dir=output_dir, merge=merge, info_only=info_only)
else:
raise NotImplementedError(flashvars)
site_info = "MioMio.tv"
download = miomio_download
download_playlist = playlist_not_supported('miomio')
|
mit
|
gundalow/ansible
|
test/integration/targets/module_utils_urls/library/test_peercert.py
|
29
|
2346
|
#!/usr/bin/python
# Copyright: (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: test_perrcert
short_description: Test getting the peer certificate of a HTTP response
description: Test getting the peer certificate of a HTTP response.
options:
url:
description: The endpoint to get the peer cert for
required: true
type: str
author:
- Ansible Project
'''
EXAMPLES = r'''
#
'''
RETURN = r'''
#
'''
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.urls import getpeercert, Request
def get_x509_shorthand(name, value):
prefix = {
'countryName': 'C',
'stateOrProvinceName': 'ST',
'localityName': 'L',
'organizationName': 'O',
'commonName': 'CN',
'organizationalUnitName': 'OU',
}[name]
return '%s=%s' % (prefix, value)
def main():
module_args = dict(
url=dict(type='str', required=True),
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
)
result = {
'changed': False,
'cert': None,
'raw_cert': None,
}
req = Request().get(module.params['url'])
try:
cert = getpeercert(req)
b_cert = getpeercert(req, binary_form=True)
finally:
req.close()
if cert:
processed_cert = {
'issuer': '',
'not_after': cert.get('notAfter', None),
'not_before': cert.get('notBefore', None),
'serial_number': cert.get('serialNumber', None),
'subject': '',
'version': cert.get('version', None),
}
for field in ['issuer', 'subject']:
field_values = []
for x509_part in cert.get(field, []):
field_values.append(get_x509_shorthand(x509_part[0][0], x509_part[0][1]))
processed_cert[field] = ",".join(field_values)
result['cert'] = processed_cert
if b_cert:
result['raw_cert'] = to_text(base64.b64encode(b_cert))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
bholbrook73/thrift
|
contrib/parse_profiling.py
|
57
|
10542
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This script can be used to make the output from
apache::thrift::profile_print_info() more human-readable.
It translates each executable file name and address into the corresponding
source file name, line number, and function name. By default, it also
demangles C++ symbol names.
"""
import optparse
import os
import re
import subprocess
import sys
class AddressInfo(object):
"""
A class to store information about a particular address in an object file.
"""
def __init__(self, obj_file, address):
self.objectFile = obj_file
self.address = address
self.sourceFile = None
self.sourceLine = None
self.function = None
g_addrs_by_filename = {}
def get_address(filename, address):
"""
Retrieve an AddressInfo object for the specified object file and address.
Keeps a global list of AddressInfo objects. Two calls to get_address()
with the same filename and address will always return the same AddressInfo
object.
"""
global g_addrs_by_filename
try:
by_address = g_addrs_by_filename[filename]
except KeyError:
by_address = {}
g_addrs_by_filename[filename] = by_address
try:
addr_info = by_address[address]
except KeyError:
addr_info = AddressInfo(filename, address)
by_address[address] = addr_info
return addr_info
def translate_file_addresses(filename, addresses, options):
"""
Use addr2line to look up information for the specified addresses.
All of the addresses must belong to the same object file.
"""
# Do nothing if we can't find the file
if not os.path.isfile(filename):
return
args = ['addr2line']
if options.printFunctions:
args.append('-f')
args.extend(['-e', filename])
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
for address in addresses:
assert address.objectFile == filename
proc.stdin.write(address.address + '\n')
if options.printFunctions:
function = proc.stdout.readline()
function = function.strip()
if not function:
raise Exception('unexpected EOF from addr2line')
address.function = function
file_and_line = proc.stdout.readline()
file_and_line = file_and_line.strip()
if not file_and_line:
raise Exception('unexpected EOF from addr2line')
idx = file_and_line.rfind(':')
if idx < 0:
msg = 'expected file and line number from addr2line; got %r' % \
(file_and_line,)
msg += '\nfile=%r, address=%r' % (filename, address.address)
raise Exception(msg)
address.sourceFile = file_and_line[:idx]
address.sourceLine = file_and_line[idx+1:]
(remaining_out, cmd_err) = proc.communicate()
retcode = proc.wait()
if retcode != 0:
raise subprocess.CalledProcessError(retcode, args)
def lookup_addresses(options):
"""
Look up source file information for all of the addresses currently stored
in the global list of AddressInfo objects.
"""
global g_addrs_by_filename
for (file, addresses) in g_addrs_by_filename.items():
translate_file_addresses(file, addresses.values(), options)
class Entry(object):
"""
An entry in the thrift profile output.
Contains a header line, and a backtrace.
"""
def __init__(self, header):
self.header = header
self.bt = []
def addFrame(self, filename, address):
# If libc was able to determine the symbols names, the filename
# argument will be of the form <filename>(<function>+<offset>)
# So, strip off anything after the last '('
idx = filename.rfind('(')
if idx >= 0:
filename = filename[:idx]
addr = get_address(filename, address)
self.bt.append(addr)
def write(self, f, options):
f.write(self.header)
f.write('\n')
n = 0
for address in self.bt:
f.write(' #%-2d %s:%s\n' % (n, address.sourceFile,
address.sourceLine))
n += 1
if options.printFunctions:
if address.function:
f.write(' %s\n' % (address.function,))
else:
f.write(' ??\n')
def process_file(in_file, out_file, options):
"""
Read thrift profile output from the specified input file, and print
prettier information on the output file.
"""
#
# A naive approach would be to read the input line by line,
# and each time we come to a filename and address, pass it to addr2line
# and print the resulting information. Unfortunately, addr2line can be
# quite slow, especially with large executables.
#
# This approach is much faster. We read in all of the input, storing
# the addresses in each file that need to be resolved. We then call
# addr2line just once for each file. This is much faster than calling
# addr2line once per address.
#
virt_call_regex = re.compile(r'^\s*T_VIRTUAL_CALL: (\d+) calls on (.*):$')
gen_prot_regex = re.compile(
r'^\s*T_GENERIC_PROTOCOL: (\d+) calls to (.*) with a (.*):$')
bt_regex = re.compile(r'^\s*#(\d+)\s*(.*) \[(0x[0-9A-Za-z]+)\]$')
# Parse all of the input, and store it as Entry objects
entries = []
current_entry = None
while True:
line = in_file.readline()
if not line:
break
if line == '\n' or line.startswith('Thrift virtual call info:'):
continue
virt_call_match = virt_call_regex.match(line)
if virt_call_match:
num_calls = int(virt_call_match.group(1))
type_name = virt_call_match.group(2)
if options.cxxfilt:
# Type names reported by typeid() are internal names.
# By default, c++filt doesn't demangle internal type names.
# (Some versions of c++filt have a "-t" option to enable this.
# Other versions don't have this argument, but demangle type
# names passed as an argument, but not on stdin.)
#
# If the output is being filtered through c++filt, prepend
# "_Z" to the type name to make it look like an external name.
type_name = '_Z' + type_name
header = 'T_VIRTUAL_CALL: %d calls on "%s"' % \
(num_calls, type_name)
if current_entry is not None:
entries.append(current_entry)
current_entry = Entry(header)
continue
gen_prot_match = gen_prot_regex.match(line)
if gen_prot_match:
num_calls = int(gen_prot_match.group(1))
type_name1 = gen_prot_match.group(2)
type_name2 = gen_prot_match.group(3)
if options.cxxfilt:
type_name1 = '_Z' + type_name1
type_name2 = '_Z' + type_name2
header = 'T_GENERIC_PROTOCOL: %d calls to "%s" with a "%s"' % \
(num_calls, type_name1, type_name2)
if current_entry is not None:
entries.append(current_entry)
current_entry = Entry(header)
continue
bt_match = bt_regex.match(line)
if bt_match:
if current_entry is None:
raise Exception('found backtrace frame before entry header')
frame_num = int(bt_match.group(1))
filename = bt_match.group(2)
address = bt_match.group(3)
current_entry.addFrame(filename, address)
continue
raise Exception('unexpected line in input: %r' % (line,))
# Add the last entry we were processing to the list
if current_entry is not None:
entries.append(current_entry)
current_entry = None
# Look up all of the addresses
lookup_addresses(options)
# Print out the entries, now that the information has been translated
for entry in entries:
entry.write(out_file, options)
out_file.write('\n')
def start_cppfilt():
(read_pipe, write_pipe) = os.pipe()
# Fork. Run c++filt in the parent process,
# and then continue normal processing in the child.
pid = os.fork()
if pid == 0:
# child
os.dup2(write_pipe, sys.stdout.fileno())
os.close(read_pipe)
os.close(write_pipe)
return
else:
# parent
os.dup2(read_pipe, sys.stdin.fileno())
os.close(read_pipe)
os.close(write_pipe)
cmd = ['c++filt']
os.execvp(cmd[0], cmd)
def main(argv):
parser = optparse.OptionParser(usage='%prog [options] [<file>]')
parser.add_option('--no-functions', help='Don\'t print function names',
dest='printFunctions', action='store_false',
default=True)
parser.add_option('--no-demangle',
help='Don\'t demangle C++ symbol names',
dest='cxxfilt', action='store_false',
default=True)
(options, args) = parser.parse_args(argv[1:])
num_args = len(args)
if num_args == 0:
in_file = sys.stdin
elif num_args == 1:
in_file = open(argv[1], 'r')
else:
parser.print_usage(sys.stderr)
print >> sys.stderr, 'trailing arguments: %s' % (' '.join(args[1:],))
return 1
if options.cxxfilt:
start_cppfilt()
process_file(in_file, sys.stdout, options)
if __name__ == '__main__':
rc = main(sys.argv)
sys.exit(rc)
|
apache-2.0
|
madjar/aurifere
|
tests/test_pkgbuild.py
|
1
|
1304
|
import os
import unittest
here = os.path.dirname(__file__)
class PkgbuildTest(unittest.TestCase):
def _get_pkgbuild(self):
from aurifere.pkgbuild import PKGBUILD
return PKGBUILD(os.path.join(here, 'fixtures/PKGBUILD'))
def test_attributes(self):
p = self._get_pkgbuild()
self.assertEqual(p['name'], 'pep8')
self.assertEqual(p['version'], '0.6.1')
def test_version(self):
p = self._get_pkgbuild()
self.assertEqual(p.version(), '0.6.1-3')
def test_all_depends(self):
p = self._get_pkgbuild()
self.assertEqual(list(p.all_depends()),
['python2', 'setuptools', 'fakedepend'])
class VersionCompareTest(unittest.TestCase):
def _get_FUT(self):
from aurifere.pkgbuild import version_is_greater
return version_is_greater
def test_classic_dotted_version_equals(self):
self.assertFalse(self._get_FUT()('2.12.4-5', '2.12.4-5'))
def test_classic_dotted_version_greater(self):
self.assertTrue(self._get_FUT()('2.0.2-1', '2.0.1-2'))
def test_classic_dotted_version_lesser(self):
self.assertFalse(self._get_FUT()('2.0.1-2', '2.0.2-1'))
def test_ugly_version_numbers(self):
self.assertTrue(self._get_FUT()('1.0.27.206_r0-1', '1.0.27.206-1'))
|
isc
|
Gitlab11/odoo
|
addons/email_template/tests/__init__.py
|
260
|
1093
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_mail, test_ir_actions
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
andres-root/bitcoinxt
|
qa/rpc-tests/mempool_coinbase_spends.py
|
125
|
3785
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(102, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = self.create_tx(coinbase_txids[0], node1_address, 50)
spend_102_raw = self.create_tx(coinbase_txids[1], node0_address, 50)
spend_103_raw = self.create_tx(coinbase_txids[2], node0_address, 50)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Create 102_1 and 103_1:
spend_102_1_raw = self.create_tx(spend_102_id, node1_address, 50)
spend_103_1_raw = self.create_tx(spend_103_id, node1_address, 50)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
self.nodes[0].generate(1)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), set([ spend_101_id, spend_102_1_id ]))
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
mit
|
wechasing/tornado
|
demos/appengine/blog.py
|
114
|
5385
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os.path
import re
import tornado.escape
import tornado.web
import tornado.wsgi
import unicodedata
from google.appengine.api import users
from google.appengine.ext import db
class Entry(db.Model):
"""A single blog entry."""
author = db.UserProperty()
title = db.StringProperty(required=True)
slug = db.StringProperty(required=True)
body_source = db.TextProperty(required=True)
html = db.TextProperty(required=True)
published = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
def administrator(method):
"""Decorate with this method to restrict to site admins."""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method == "GET":
self.redirect(self.get_login_url())
return
raise tornado.web.HTTPError(403)
elif not self.current_user.administrator:
if self.request.method == "GET":
self.redirect("/")
return
raise tornado.web.HTTPError(403)
else:
return method(self, *args, **kwargs)
return wrapper
class BaseHandler(tornado.web.RequestHandler):
"""Implements Google Accounts authentication methods."""
def get_current_user(self):
user = users.get_current_user()
if user: user.administrator = users.is_current_user_admin()
return user
def get_login_url(self):
return users.create_login_url(self.request.uri)
def get_template_namespace(self):
# Let the templates access the users module to generate login URLs
ns = super(BaseHandler, self).get_template_namespace()
ns['users'] = users
return ns
class HomeHandler(BaseHandler):
def get(self):
entries = db.Query(Entry).order('-published').fetch(limit=5)
if not entries:
if not self.current_user or self.current_user.administrator:
self.redirect("/compose")
return
self.render("home.html", entries=entries)
class EntryHandler(BaseHandler):
def get(self, slug):
entry = db.Query(Entry).filter("slug =", slug).get()
if not entry: raise tornado.web.HTTPError(404)
self.render("entry.html", entry=entry)
class ArchiveHandler(BaseHandler):
def get(self):
entries = db.Query(Entry).order('-published')
self.render("archive.html", entries=entries)
class FeedHandler(BaseHandler):
def get(self):
entries = db.Query(Entry).order('-published').fetch(limit=10)
self.set_header("Content-Type", "application/atom+xml")
self.render("feed.xml", entries=entries)
class ComposeHandler(BaseHandler):
@administrator
def get(self):
key = self.get_argument("key", None)
entry = Entry.get(key) if key else None
self.render("compose.html", entry=entry)
@administrator
def post(self):
key = self.get_argument("key", None)
if key:
entry = Entry.get(key)
entry.title = self.get_argument("title")
entry.body_source = self.get_argument("body_source")
entry.html = tornado.escape.linkify(
self.get_argument("body_source"))
else:
title = self.get_argument("title")
slug = unicodedata.normalize("NFKD", title).encode(
"ascii", "ignore")
slug = re.sub(r"[^\w]+", " ", slug)
slug = "-".join(slug.lower().strip().split())
if not slug: slug = "entry"
while True:
existing = db.Query(Entry).filter("slug =", slug).get()
if not existing or str(existing.key()) == key:
break
slug += "-2"
entry = Entry(
author=self.current_user,
title=title,
slug=slug,
body_source=self.get_argument("body_source"),
html=tornado.escape.linkify(self.get_argument("body_source")),
)
entry.put()
self.redirect("/entry/" + entry.slug)
class EntryModule(tornado.web.UIModule):
def render(self, entry):
return self.render_string("modules/entry.html", entry=entry)
settings = {
"blog_title": u"Tornado Blog",
"template_path": os.path.join(os.path.dirname(__file__), "templates"),
"ui_modules": {"Entry": EntryModule},
"xsrf_cookies": True,
}
application = tornado.web.Application([
(r"/", HomeHandler),
(r"/archive", ArchiveHandler),
(r"/feed", FeedHandler),
(r"/entry/([^/]+)", EntryHandler),
(r"/compose", ComposeHandler),
], **settings)
application = tornado.wsgi.WSGIAdapter(application)
|
apache-2.0
|
j-coll/opencga
|
opencga-app/app/cloud/azure/arm/scripts/mount.py
|
4
|
7799
|
import sys
import socket
import fcntl
import struct
import random
import os
import shutil
import subprocess
import time
import csv
import ipaddress
# Run `python3 -m unittest discover` in this dir to execute tests
default_mount_options_nfs = "nfs hard,nointr,proto=tcp,mountproto=tcp,retry=30 0 0"
default_mount_options_cifs = "dir_mode=0777,file_mode=0777,serverino,nofail,uid=1001,gid=1001,vers=3.0"
def get_ip_address():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
try:
# doesn't even have to be reachable
s.connect(("10.255.255.255", 1))
return s.getsockname()[0]
except:
return "127.0.0.1"
def ip_as_int(ip):
return int(ipaddress.ip_address(ip))
def remove_lines_containing(file, contains):
with open(file, "r+") as file:
d = file.readlines()
file.seek(0)
for i in d:
if contains not in i and i != "\n":
file.write(i)
file.truncate()
def print_help():
print("For example 'sudo python mount.py nfs '10.20.0.1:/folder1/nfsfolder2,10.20.0.1:/folder1/nfsfolder2'")
print(
"or 'sudo python mount.py azurefiles <storage-account-name>,<share-name>,<storage-account-key>'"
)
def install_apt_package(package):
try:
print("Attempt to install {}".format(package))
subprocess.check_call(["apt", "install", package, "-y"])
print("Install completed successfully")
except subprocess.CalledProcessError as e:
print("Failed install {} error: {}".format(package, e))
raise
# main allows the the mount script to be executable
def main():
if len(sys.argv) < 3:
print("Expected arg1: 'mount_type' and arg2 'mount_data'")
print_help()
exit(1)
mount_type = str(sys.argv[1])
mount_data = str(sys.argv[2])
mount_share(mount_type, mount_data)
# mount_share allows it to be invoked from other python scripts
def mount_share(mount_type, mount_data):
if mount_type.lower() != "nfs" and mount_type.lower() != "azurefiles":
print("Expected first arg to be either 'nfs' or 'azurefiles'")
print_help()
exit(1)
if mount_data == "":
print(
"""Expected second arg to be the mounting data. For NFS, this should be a CSV of IPs/FQDNS for the NFS servers with NFSExported dirs.
For example, '10.20.0.1:/folder1/nfsfolder2,10.20.0.1:/folder1/nfsfolder2'
For azure files this should be the azure files connection details."""
)
print_help()
exit(2)
print("Mounting type: {}".format(sys.argv[1]))
print("Mounting data: {}".format(sys.argv[2]))
mount_point_permissions = 0o0777 # Todo: What permissions does this really need?
primary_mount_folder = "/media/primarynfs"
seconday_mount_folder_prefix = "/media/secondarynfs"
fstab_file_path = "/etc/fstab"
try:
# Create folder to mount to
if not os.path.exists(primary_mount_folder):
os.makedirs(primary_mount_folder)
os.chmod(primary_mount_folder, mount_point_permissions)
# Make a backup of the fstab config incase we go wrong
shutil.copy(fstab_file_path, "/etc/fstab-mountscriptbackup")
# Clear existing NFS mount data to make script idempotent
remove_lines_containing(fstab_file_path, primary_mount_folder)
remove_lines_containing(fstab_file_path, seconday_mount_folder_prefix)
if mount_type.lower() == "azurefiles":
mount_azurefiles(fstab_file_path, mount_data, primary_mount_folder)
if mount_type.lower() == "nfs":
mount_nfs(fstab_file_path, mount_data, primary_mount_folder, mount_point_permissions)
except IOError as e:
print("I/O error({0})".format(e))
exit(1)
except:
print("Unexpected error:{0}".format, sys.exc_info())
raise
print("Done editing fstab ... attempting mount")
def mount_all():
subprocess.check_call(["mount", "-a"])
retryFunc("mount shares", mount_all, 100)
def retryFunc(desc, funcToRetry, maxRetries):
# Retry mounting for a while to handle race where VM exists before storage
# or temporary issue with storage
print("Attempting, with retries, to: {}".format(desc))
retryExponentialFactor = 3
for i in range(1, maxRetries):
if i == maxRetries:
print("Failed after max retries")
exit(3)
try:
print("Attempt #{}".format(str(i)))
funcToRetry()
except subprocess.CalledProcessError as e:
print("Failed:{0}".format(e))
retry_in = i * retryExponentialFactor
print("retrying in {0}secs".format(retry_in))
time.sleep(retry_in)
continue
else:
print("Succeeded to: {0} after {1} retries".format(desc, i))
break
def mount_nfs(fstab_file_path, mount_data, primary_mount_folder, mount_point_permissions):
# # Other apt instances on the machine may be doing an install
# # this means ours will fail so we retry to ensure success
def install_nfs():
install_apt_package("nfs-common")
retryFunc("install nfs-common", install_nfs, 20)
ips = mount_data.split(",")
print("Found ips:{}".format(",".join(ips)))
# Deterministically select a primary node from the available
# servers for this vm to use. By using the ip as a seed this ensures
# re-running will get the same node as primary.
# This enables spreading the load across multiple storage servers in a cluster
# like `Avere` or `Gluster` for higher throughput.
current_ip = get_ip_address()
current_ip_int = ip_as_int(current_ip)
print("Using ip as int: {0} for random seed".format((current_ip_int)))
random.seed(current_ip_int)
random_node = random.randint(0, len(ips) - 1)
primary = ips[random_node]
ips.remove(primary)
secondarys = ips
print("Primary node selected: {}".format(primary))
print("Secondary nodes selected: {}".format(",".join(secondarys)))
with open(fstab_file_path, "a") as file:
print("Mounting primary")
file.write(
"\n{} {} {}".format(
primary.strip(), primary_mount_folder, default_mount_options_nfs
)
)
print("Mounting secondarys")
number = 0
for ip in secondarys:
number = number + 1
folder = "/media/secondarynfs" + str(number)
if not os.path.exists(folder):
os.makedirs(folder)
os.chmod(folder, mount_point_permissions)
file.write(
"\n{} {} {}".format(ip.strip(), folder, default_mount_options_nfs)
)
def mount_azurefiles(fstab_file_path, mount_data, primary_mount_folder):
# Other apt instances on the machine may be doing an install
# this means ours will fail so we retry to ensure success
def install_cifs():
install_apt_package("cifs-utils")
retryFunc("install cifs-utils", install_cifs, 20)
params = mount_data.split(",")
if len(params) != 3:
print("Wrong params for azure files mount, expected 3 as CSV")
print_help()
exit(1)
account_name = params[0]
share_name = params[1]
account_key = params[2]
with open(fstab_file_path, "a") as file:
print("Mounting primary")
file.write(
"\n//{0}.file.core.windows.net/{1} {2} cifs username={0},password={3},{4}".format(
account_name,
share_name,
primary_mount_folder,
account_key,
default_mount_options_cifs,
)
)
if __name__ == "__main__":
main()
|
apache-2.0
|
dotCipher/xcrypt-linux-kernel-module
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
|
11088
|
3246
|
# Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
|
gpl-2.0
|
etos/django
|
django/conf/locale/ka/formats.py
|
65
|
2113
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'l, j F, Y'
TIME_FORMAT = 'h:i a'
DATETIME_FORMAT = 'j F, Y h:i a'
YEAR_MONTH_FORMAT = 'F, Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j.M.Y'
SHORT_DATETIME_FORMAT = 'j.M.Y H:i'
FIRST_DAY_OF_WEEK = 1 # (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
# '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3
|
bsd-3-clause
|
tuxfux-hlp-notes/python-batches
|
archieves/batch-58/modules/sheets/lib/python2.7/site-packages/pip/cmdoptions.py
|
361
|
9507
|
"""
shared options and groups
The principle here is to define options once, but *not* instantiate them globally.
One reason being that options with action='append' can carry state between parses.
pip parse's general options twice internally, and shouldn't pass on state.
To be consistent, all options will follow this design.
"""
import copy
from optparse import OptionGroup, SUPPRESS_HELP, Option
from pip.locations import build_prefix, default_log_file
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option.make())
return option_group
class OptionMaker(object):
"""Class that stores the args/kwargs that would be used to make an Option,
for making them later, and uses deepcopy's to reset state."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def make(self):
args_copy = copy.deepcopy(self.args)
kwargs_copy = copy.deepcopy(self.kwargs)
return Option(*args_copy, **kwargs_copy)
###########
# options #
###########
help_ = OptionMaker(
'-h', '--help',
dest='help',
action='help',
help='Show help.')
require_virtualenv = OptionMaker(
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = OptionMaker(
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.')
version = OptionMaker(
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = OptionMaker(
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.')
log = OptionMaker(
'--log',
dest='log',
metavar='path',
help='Path to a verbose appending log. This log is inactive by default.')
log_explicit_levels = OptionMaker(
# Writes the log levels explicitely to the log'
'--log-explicit-levels',
dest='log_explicit_levels',
action='store_true',
default=False,
help=SUPPRESS_HELP)
log_file = OptionMaker(
# The default log file
'--log-file', '--local-log',
dest='log_file',
metavar='path',
default=default_log_file,
help='Path to a verbose non-appending log, that only logs failures. This log is active by default at %default.')
no_input = OptionMaker(
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = OptionMaker(
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
timeout = OptionMaker(
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = OptionMaker(
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = OptionMaker(
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
exists_action = OptionMaker(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup.")
cert = OptionMaker(
'--cert',
dest='cert',
type='str',
default='',
metavar='path',
help = "Path to alternate CA bundle.")
index_url = OptionMaker(
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default='https://pypi.python.org/simple/',
help='Base URL of Python Package Index (default %default).')
extra_index_url = OptionMaker(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url.')
no_index = OptionMaker(
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
find_links = OptionMaker(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to archives. If a local path or file:// url that's a directory, then look for archives in the directory listing.")
# TODO: Remove after 1.6
use_mirrors = OptionMaker(
'-M', '--use-mirrors',
dest='use_mirrors',
action='store_true',
default=False,
help=SUPPRESS_HELP)
# TODO: Remove after 1.6
mirrors = OptionMaker(
'--mirrors',
dest='mirrors',
metavar='URL',
action='append',
default=[],
help=SUPPRESS_HELP)
allow_external = OptionMaker(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of externally hosted files",
)
allow_all_external = OptionMaker(
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help="Allow the installation of all externally hosted files",
)
# Remove after 1.7
no_allow_external = OptionMaker(
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 1.7
allow_unsafe = OptionMaker(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of insecure and unverifiable files",
)
# Remove after 1.7
no_allow_unsafe = OptionMaker(
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = OptionMaker(
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
requirements = OptionMaker(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
use_wheel = OptionMaker(
'--use-wheel',
dest='use_wheel',
action='store_true',
help=SUPPRESS_HELP,
)
no_use_wheel = OptionMaker(
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations.'),
)
download_cache = OptionMaker(
'--download-cache',
dest='download_cache',
metavar='dir',
default=None,
help='Cache downloaded packages in <dir>.')
no_deps = OptionMaker(
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = OptionMaker(
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
default=build_prefix,
help='Directory to unpack packages into and build in. '
'The default in a virtualenv is "<venv path>/build". '
'The default for global installs is "<OS temp dir>/pip_build_<username>".')
install_options = OptionMaker(
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/bin\"). "
"Use multiple --install-option options to pass multiple options to setup.py install. "
"If you are using an option with a directory path, be sure to use absolute path.")
global_options = OptionMaker(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = OptionMaker(
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
require_virtualenv,
verbose,
version,
quiet,
log_file,
log,
log_explicit_levels,
no_input,
proxy,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
cert,
]
}
index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
use_mirrors,
mirrors,
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
process_dependency_links,
]
}
|
gpl-3.0
|
rolandmansilla/microblog
|
flask/lib/python2.7/site-packages/coverage/__init__.py
|
43
|
1177
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Code coverage measurement for Python.
Ned Batchelder
http://nedbatchelder.com/code/coverage
"""
from coverage.version import __version__, __url__, version_info
from coverage.control import Coverage, process_startup
from coverage.data import CoverageData
from coverage.misc import CoverageException
from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
# Backward compatibility.
coverage = Coverage
# On Windows, we encode and decode deep enough that something goes wrong and
# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
# Adding a reference here prevents it from being unloaded. Yuk.
import encodings.utf_8
# Because of the "from coverage.control import fooey" lines at the top of the
# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
# This makes some inspection tools (like pydoc) unable to find the class
# coverage.coverage. So remove that entry.
import sys
try:
del sys.modules['coverage.coverage']
except KeyError:
pass
|
bsd-3-clause
|
erickpeirson/eratosthenes
|
eratosthenes/eratosthenes/managers.py
|
2
|
3658
|
from django.conf import settings
import requests
from bs4 import BeautifulSoup
import json
class RepositoryManager(object):
__name__ = 'RepositoryManager'
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __repr__(self):
return self.__name__
def collections(self):
return []
def collection(self, collection_id):
return []
def get(self, uri):
return {}
def browse(self):
return []
def search(self, query):
return []
class JARSManager(RepositoryManager):
__name__ = 'JARS'
getPattern = '{endpoint}/rest/resource/?uri={uri}'
browsePattern = '{endpoint}/rest/resource/'
collectionPattern = '{endpoint}/rest/collection/'
collectionBrowsePattern = '{endpoint}/rest/collection/{collection}/'
contentPattern = '{endpoint}{content_location}'
token = settings.JARS_KEY
def _cast(self, resource):
return {
'title': resource['name'],
'uri': resource['uri'],
}
def _cast_collection(self, collection):
return {
'id': collection['id'],
'uri': collection['uri'],
'title': collection['name'],
}
def collections(self):
remote = self.collectionPattern.format(endpoint=self.endpoint)
response = requests.get(remote, allow_redirects=True)
jdata = json.loads(response.text)
return [self._cast_collection(c) for c in jdata]
def collection(self, collection_id):
remote = self.collectionBrowsePattern.format(
endpoint=self.endpoint,
collection=collection_id
)
response = requests.get(remote, allow_redirects=True)
jdata = json.loads(response.text)['resources']
return [self._cast(r) for r in jdata if r['stored']]
def browse(self):
remote = self.browsePattern.format(endpoint=self.endpoint)
response = requests.get(remote, allow_redirects=True)
jdata = json.loads(response.text)
return [self._cast(r) for r in jdata if r['stored']]
def get(self, uri):
remote = self.getPattern.format(endpoint=self.endpoint, uri=uri)
headers = {
'Authorization': 'Token {token}'.format(token=self.token),
}
response = requests.get(remote, allow_redirects=True)
try:
jdata = json.loads(response.text)[0]
except:
return
remoteContent = self.contentPattern.format(
endpoint = self.endpoint,
content_location = jdata['content_location']
)
responseContent = requests.get(remoteContent,
allow_redirects=True,
headers=headers)
if responseContent.status_code != requests.codes.ok:
raise RuntimeError('Error retrieving resource')
textData = {
'title': jdata['name'],
'content': responseContent.text,
'content-type': response.headers['content-type'],
}
return textData
class WebManager(RepositoryManager):
__name__ = 'WWW'
def get(self, uri):
response = requests.get(uri, allow_redirects=True)
soup = BeautifulSoup(response.text, "html.parser")
textData = {
'title': soup.title.string,
'content': response.text,
'content-type': response.headers['content-type'],
}
return textData
repositoryManagers = [
('JARS', JARSManager),
('WWW', WebManager),
]
|
gpl-2.0
|
mkaluza/external_chromium_org
|
chrome/common/extensions/docs/server2/test_file_system_test.py
|
23
|
6554
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from copy import deepcopy
from file_system import FileNotFoundError, StatInfo
from test_file_system import TestFileSystem, MoveTo
import unittest
_TEST_DATA = {
'404.html': '404.html contents',
'apps': {
'a11y.html': 'a11y.html contents',
'about_apps.html': 'about_apps.html contents',
'fakedir': {
'file.html': 'file.html contents'
}
},
'extensions': {
'activeTab.html': 'activeTab.html contents',
'alarms.html': 'alarms.html contents'
}
}
def _Get(fn):
'''Returns a function which calls Future.Get on the result of |fn|.
'''
return lambda *args: fn(*args).Get()
class TestFileSystemTest(unittest.TestCase):
def testEmptyFileSystem(self):
self._TestMetasyntacticPaths(TestFileSystem({}))
def testNonemptyFileNotFoundErrors(self):
fs = TestFileSystem(deepcopy(_TEST_DATA))
self._TestMetasyntacticPaths(fs)
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['404.html/'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['apps/foo/'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['apps/foo.html'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['apps/foo.html'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['apps/foo/',
'apps/foo.html'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['apps/foo/',
'apps/a11y.html'])
def _TestMetasyntacticPaths(self, fs):
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['foo'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['bar/'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['bar/baz'])
self.assertRaises(FileNotFoundError, _Get(fs.Read), ['foo',
'bar/',
'bar/baz'])
self.assertRaises(FileNotFoundError, fs.Stat, 'foo')
self.assertRaises(FileNotFoundError, fs.Stat, 'bar/')
self.assertRaises(FileNotFoundError, fs.Stat, 'bar/baz')
def testNonemptySuccess(self):
fs = TestFileSystem(deepcopy(_TEST_DATA))
self.assertEqual('404.html contents', fs.ReadSingle('404.html').Get())
self.assertEqual('404.html contents', fs.ReadSingle('/404.html').Get())
self.assertEqual('a11y.html contents',
fs.ReadSingle('apps/a11y.html').Get())
self.assertEqual(['404.html', 'apps/', 'extensions/'],
sorted(fs.ReadSingle('/').Get()))
self.assertEqual(['a11y.html', 'about_apps.html', 'fakedir/'],
sorted(fs.ReadSingle('apps/').Get()))
self.assertEqual(['a11y.html', 'about_apps.html', 'fakedir/'],
sorted(fs.ReadSingle('/apps/').Get()))
def testReadFiles(self):
fs = TestFileSystem(deepcopy(_TEST_DATA))
self.assertEqual('404.html contents',
fs.ReadSingle('404.html').Get())
self.assertEqual('404.html contents',
fs.ReadSingle('/404.html').Get())
self.assertEqual('a11y.html contents',
fs.ReadSingle('apps/a11y.html').Get())
self.assertEqual('a11y.html contents',
fs.ReadSingle('/apps/a11y.html').Get())
self.assertEqual('file.html contents',
fs.ReadSingle('apps/fakedir/file.html').Get())
self.assertEqual('file.html contents',
fs.ReadSingle('/apps/fakedir/file.html').Get())
def testReadDirs(self):
fs = TestFileSystem(deepcopy(_TEST_DATA))
self.assertEqual(['404.html', 'apps/', 'extensions/'],
sorted(fs.ReadSingle('/').Get()))
self.assertEqual(['a11y.html', 'about_apps.html', 'fakedir/'],
sorted(fs.ReadSingle('/apps/').Get()))
self.assertEqual(['a11y.html', 'about_apps.html', 'fakedir/'],
sorted(fs.ReadSingle('apps/').Get()))
self.assertEqual(['file.html'], fs.ReadSingle('/apps/fakedir/').Get())
self.assertEqual(['file.html'], fs.ReadSingle('apps/fakedir/').Get())
def testStat(self):
fs = TestFileSystem(deepcopy(_TEST_DATA))
self.assertRaises(FileNotFoundError, fs.Stat, 'foo')
self.assertRaises(FileNotFoundError, fs.Stat, '404.html/')
self.assertEquals(StatInfo('0'), fs.Stat('404.html'))
self.assertEquals(StatInfo('0', child_versions={
'activeTab.html': '0',
'alarms.html': '0',
}), fs.Stat('extensions/'))
fs.IncrementStat()
self.assertEquals(StatInfo('1'), fs.Stat('404.html'))
self.assertEquals(StatInfo('1', child_versions={
'activeTab.html': '1',
'alarms.html': '1',
}), fs.Stat('extensions/'))
fs.IncrementStat(path='404.html')
self.assertEquals(StatInfo('2'), fs.Stat('404.html'))
self.assertEquals(StatInfo('1', child_versions={
'activeTab.html': '1',
'alarms.html': '1',
}), fs.Stat('extensions/'))
fs.IncrementStat()
self.assertEquals(StatInfo('3'), fs.Stat('404.html'))
self.assertEquals(StatInfo('2', child_versions={
'activeTab.html': '2',
'alarms.html': '2',
}), fs.Stat('extensions/'))
fs.IncrementStat(path='extensions/')
self.assertEquals(StatInfo('3'), fs.Stat('404.html'))
self.assertEquals(StatInfo('3', child_versions={
'activeTab.html': '2',
'alarms.html': '2',
}), fs.Stat('extensions/'))
fs.IncrementStat(path='extensions/alarms.html')
self.assertEquals(StatInfo('3'), fs.Stat('404.html'))
self.assertEquals(StatInfo('3', child_versions={
'activeTab.html': '2',
'alarms.html': '3',
}), fs.Stat('extensions/'))
def testMoveTo(self):
self.assertEqual({'foo': {'a': 'b', 'c': 'd'}},
MoveTo('foo', {'a': 'b', 'c': 'd'}))
self.assertEqual({'foo': {'bar': {'a': 'b', 'c': 'd'}}},
MoveTo('foo/bar', {'a': 'b', 'c': 'd'}))
self.assertEqual({'foo': {'bar': {'baz': {'a': 'b'}}}},
MoveTo('foo/bar/baz', {'a': 'b'}))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
bslatkin/pycon2014
|
lib/asyncio-0.4.1/asyncio/locks.py
|
16
|
14325
|
"""Synchronization primitives."""
__all__ = ['Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore']
import collections
from . import events
from . import futures
from . import tasks
class _ContextManager:
"""Context manager.
This enables the following idiom for acquiring and releasing a
lock around a block:
with (yield from lock):
<block>
while failing loudly when accidentally using:
with lock:
<block>
"""
def __init__(self, lock):
self._lock = lock
def __enter__(self):
# We have no use for the "as ..." clause in the with
# statement for locks.
return None
def __exit__(self, *args):
try:
self._lock.release()
finally:
self._lock = None # Crudely prevent reuse.
class Lock:
"""Primitive lock objects.
A primitive lock is a synchronization primitive that is not owned
by a particular coroutine when locked. A primitive lock is in one
of two states, 'locked' or 'unlocked'.
It is created in the unlocked state. It has two basic methods,
acquire() and release(). When the state is unlocked, acquire()
changes the state to locked and returns immediately. When the
state is locked, acquire() blocks until a call to release() in
another coroutine changes it to unlocked, then the acquire() call
resets it to locked and returns. The release() method should only
be called in the locked state; it changes the state to unlocked
and returns immediately. If an attempt is made to release an
unlocked lock, a RuntimeError will be raised.
When more than one coroutine is blocked in acquire() waiting for
the state to turn to unlocked, only one coroutine proceeds when a
release() call resets the state to unlocked; first coroutine which
is blocked in acquire() is being processed.
acquire() is a coroutine and should be called with 'yield from'.
Locks also support the context manager protocol. '(yield from lock)'
should be used as context manager expression.
Usage:
lock = Lock()
...
yield from lock
try:
...
finally:
lock.release()
Context manager usage:
lock = Lock()
...
with (yield from lock):
...
Lock objects can be tested for locking state:
if not lock.locked():
yield from lock
else:
# lock is acquired
...
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._locked = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self._locked else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Return True if lock is acquired."""
return self._locked
@tasks.coroutine
def acquire(self):
"""Acquire a lock.
This method blocks until the lock is unlocked, then sets it to
locked and returns True.
"""
if not self._waiters and not self._locked:
self._locked = True
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
self._locked = True
return True
finally:
self._waiters.remove(fut)
def release(self):
"""Release a lock.
When the lock is locked, reset it to unlocked, and return.
If any other coroutines are blocked waiting for the lock to become
unlocked, allow exactly one of them to proceed.
When invoked on an unlocked lock, a RuntimeError is raised.
There is no return value.
"""
if self._locked:
self._locked = False
# Wake up the first waiter who isn't cancelled.
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
break
else:
raise RuntimeError('Lock is not acquired.')
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
# This must exist because __enter__ exists, even though that
# always raises; that's how the with-statement works.
pass
def __iter__(self):
# This is not a coroutine. It is meant to enable the idiom:
#
# with (yield from lock):
# <block>
#
# as an alternative to:
#
# yield from lock.acquire()
# try:
# <block>
# finally:
# lock.release()
yield from self.acquire()
return _ContextManager(self)
class Event:
"""Asynchronous equivalent to threading.Event.
Class implementing event objects. An event manages a flag that can be set
to true with the set() method and reset to false with the clear() method.
The wait() method blocks until the flag is true. The flag is initially
false.
"""
def __init__(self, *, loop=None):
self._waiters = collections.deque()
self._value = False
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'set' if self._value else 'unset'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def is_set(self):
"""Return True if and only if the internal flag is true."""
return self._value
def set(self):
"""Set the internal flag to true. All coroutines waiting for it to
become true are awakened. Coroutine that call wait() once the flag is
true will not block at all.
"""
if not self._value:
self._value = True
for fut in self._waiters:
if not fut.done():
fut.set_result(True)
def clear(self):
"""Reset the internal flag to false. Subsequently, coroutines calling
wait() will block until set() is called to set the internal flag
to true again."""
self._value = False
@tasks.coroutine
def wait(self):
"""Block until the internal flag is true.
If the internal flag is true on entry, return True
immediately. Otherwise, block until another coroutine calls
set() to set the flag to true, then return True.
"""
if self._value:
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
class Condition:
"""Asynchronous equivalent to threading.Condition.
This class implements condition variable objects. A condition variable
allows one or more coroutines to wait until they are notified by another
coroutine.
A new Lock object is created and used as the underlying lock.
"""
def __init__(self, *, loop=None):
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
# Lock as an attribute as in threading.Condition.
lock = Lock(loop=self._loop)
self._lock = lock
# Export the lock's locked(), acquire() and release() methods.
self.locked = lock.locked
self.acquire = lock.acquire
self.release = lock.release
self._waiters = collections.deque()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked'
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
@tasks.coroutine
def wait(self):
"""Wait until notified.
If the calling coroutine has not acquired the lock when this
method is called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks
until it is awakened by a notify() or notify_all() call for
the same condition variable in another coroutine. Once
awakened, it re-acquires the lock and returns True.
"""
if not self.locked():
raise RuntimeError('cannot wait on un-acquired lock')
self.release()
try:
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
return True
finally:
self._waiters.remove(fut)
finally:
yield from self.acquire()
@tasks.coroutine
def wait_for(self, predicate):
"""Wait until a predicate becomes true.
The predicate should be a callable which result will be
interpreted as a boolean value. The final predicate value is
the return value.
"""
result = predicate()
while not result:
yield from self.wait()
result = predicate()
return result
def notify(self, n=1):
"""By default, wake up one coroutine waiting on this condition, if any.
If the calling coroutine has not acquired the lock when this method
is called, a RuntimeError is raised.
This method wakes up at most n of the coroutines waiting for the
condition variable; it is a no-op if no coroutines are waiting.
Note: an awakened coroutine does not actually return from its
wait() call until it can reacquire the lock. Since notify() does
not release the lock, its caller should.
"""
if not self.locked():
raise RuntimeError('cannot notify on un-acquired lock')
idx = 0
for fut in self._waiters:
if idx >= n:
break
if not fut.done():
idx += 1
fut.set_result(False)
def notify_all(self):
"""Wake up all threads waiting on this condition. This method acts
like notify(), but wakes up all waiting threads instead of one. If the
calling thread has not acquired the lock when this method is called,
a RuntimeError is raised.
"""
self.notify(len(self._waiters))
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
pass
def __iter__(self):
# See comment in Lock.__iter__().
yield from self.acquire()
return _ContextManager(self)
class Semaphore:
"""A Semaphore implementation.
A semaphore manages an internal counter which is decremented by each
acquire() call and incremented by each release() call. The counter
can never go below zero; when acquire() finds that it is zero, it blocks,
waiting until some other thread calls release().
Semaphores also support the context manager protocol.
The optional argument gives the initial value for the internal
counter; it defaults to 1. If the value given is less than 0,
ValueError is raised.
"""
def __init__(self, value=1, *, loop=None):
if value < 0:
raise ValueError("Semaphore initial value must be >= 0")
self._value = value
self._waiters = collections.deque()
if loop is not None:
self._loop = loop
else:
self._loop = events.get_event_loop()
def __repr__(self):
res = super().__repr__()
extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
self._value)
if self._waiters:
extra = '{},waiters:{}'.format(extra, len(self._waiters))
return '<{} [{}]>'.format(res[1:-1], extra)
def locked(self):
"""Returns True if semaphore can not be acquired immediately."""
return self._value == 0
@tasks.coroutine
def acquire(self):
"""Acquire a semaphore.
If the internal counter is larger than zero on entry,
decrement it by one and return True immediately. If it is
zero on entry, block, waiting until some other coroutine has
called release() to make it larger than 0, and then return
True.
"""
if not self._waiters and self._value > 0:
self._value -= 1
return True
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
yield from fut
self._value -= 1
return True
finally:
self._waiters.remove(fut)
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When it was zero on entry and another coroutine is waiting for it to
become larger than zero again, wake up that coroutine.
"""
self._value += 1
for waiter in self._waiters:
if not waiter.done():
waiter.set_result(True)
break
def __enter__(self):
raise RuntimeError(
'"yield from" should be used as context manager expression')
def __exit__(self, *args):
pass
def __iter__(self):
# See comment in Lock.__iter__().
yield from self.acquire()
return _ContextManager(self)
class BoundedSemaphore(Semaphore):
"""A bounded semaphore implementation.
This raises ValueError in release() if it would increase the value
above the initial value.
"""
def __init__(self, value=1, *, loop=None):
self._bound_value = value
super().__init__(value, loop=loop)
def release(self):
if self._value >= self._bound_value:
raise ValueError('BoundedSemaphore released too many times')
super().release()
|
apache-2.0
|
TathagataChakraborti/resource-conflicts
|
PLANROB-2015/seq-sat-lama/py2.5/lib/python2.5/functools.py
|
91
|
2155
|
"""functools.py - Tools for working with functions and callable objects
"""
# Python module wrapper for _functools C module
# to allow utilities written in Python to be added
# to the functools module.
# Written by Nick Coghlan <ncoghlan at gmail.com>
# Copyright (C) 2006 Python Software Foundation.
# See C source code for _functools credits/copyright
from _functools import partial
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes off the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
|
mit
|
aljex/iTerm2
|
tests/esctest/tests/cnl.py
|
31
|
2269
|
import esccmd
from escutil import AssertEQ, GetCursorPosition, GetScreenSize
from esctypes import Point
class CNLTests(object):
def test_CNL_DefaultParam(self):
"""CNL moves the cursor down 1 with no parameter given."""
esccmd.CUP(Point(5, 3))
esccmd.CNL()
position = GetCursorPosition()
AssertEQ(position.x(), 1)
AssertEQ(position.y(), 4)
def test_CNL_ExplicitParam(self):
"""CNL moves the cursor down by the passed-in number of lines."""
esccmd.CUP(Point(6, 3))
esccmd.CNL(2)
position = GetCursorPosition()
AssertEQ(position.x(), 1)
AssertEQ(position.y(), 5)
def test_CNL_StopsAtBottomLine(self):
"""CNL moves the cursor down, stopping at the last line."""
esccmd.CUP(Point(6, 3))
height = GetScreenSize().height()
esccmd.CNL(height)
position = GetCursorPosition()
AssertEQ(position.x(), 1)
AssertEQ(position.y(), height)
def test_CNL_StopsAtBottomLineWhenBegunBelowScrollRegion(self):
"""When the cursor starts below the scroll region, CNL moves it down to the
bottom of the screen."""
# Set a scroll region. This must be done first because DECSTBM moves the cursor to the origin.
esccmd.DECSTBM(4, 5)
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(5, 10)
# Position the cursor below the scroll region
esccmd.CUP(Point(7, 6))
# Move it down by a lot
height = GetScreenSize().height()
esccmd.CNL(height)
# Ensure it stopped at the bottom of the screen
position = GetCursorPosition()
AssertEQ(position.y(), height)
AssertEQ(position.x(), 5)
def test_CNL_StopsAtBottomMarginInScrollRegion(self):
"""When the cursor starts within the scroll region, CNL moves it down to the
bottom margin but no farther."""
# Set a scroll region. This must be done first because DECSTBM moves the cursor to the origin.
esccmd.DECSTBM(2, 4)
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(5, 10)
# Position the cursor within the scroll region
esccmd.CUP(Point(7, 3))
# Move it up by more than the height of the scroll region
esccmd.CNL(99)
# Ensure it stopped at the bottom of the scroll region.
position = GetCursorPosition()
AssertEQ(position.y(), 4)
AssertEQ(position.x(), 5)
|
gpl-2.0
|
Chilledheart/chromium
|
tools/telemetry/telemetry/value/summary_unittest.py
|
3
|
14006
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import story
from telemetry.internal.results import page_test_results
from telemetry import page as page_module
from telemetry.value import failure
from telemetry.value import histogram
from telemetry.value import improvement_direction
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
from telemetry.value import summary as summary_module
class TestBase(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set, story_set.base_dir))
story_set.AddStory(
page_module.Page('http://www.baz.com/', story_set, story_set.base_dir))
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set, story_set.base_dir))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
class SummaryTest(TestBase):
def testBasicSummary(self):
page0 = self.pages[0]
page1 = self.pages[1]
results = page_test_results.PageTestResults()
results.WillRunPage(page0)
v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(v0)
results.DidRunPage(page0)
results.WillRunPage(page1)
v1 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
improvement_direction=improvement_direction.UP)
results.AddValue(v1)
results.DidRunPage(page1)
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
v0_list = list_of_scalar_values.ListOfScalarValues(
page0, 'a', 'seconds', [3],
improvement_direction=improvement_direction.UP)
v1_list = list_of_scalar_values.ListOfScalarValues(
page1, 'a', 'seconds', [7],
improvement_direction=improvement_direction.UP)
merged_value = list_of_scalar_values.ListOfScalarValues(
None, 'a', 'seconds', [3, 7],
improvement_direction=improvement_direction.UP)
self.assertEquals(3, len(values))
self.assertIn(v0_list, values)
self.assertIn(v1_list, values)
self.assertIn(merged_value, values)
def testBasicSummaryWithOnlyOnePage(self):
page0 = self.pages[0]
results = page_test_results.PageTestResults()
results.WillRunPage(page0)
v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(v0)
results.DidRunPage(page0)
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
v0_list = list_of_scalar_values.ListOfScalarValues(
page0, 'a', 'seconds', [3],
improvement_direction=improvement_direction.UP)
merged_list = list_of_scalar_values.ListOfScalarValues(
None, 'a', 'seconds', [3],
improvement_direction=improvement_direction.UP)
self.assertEquals(2, len(values))
self.assertIn(v0_list, values)
self.assertIn(merged_list, values)
def testBasicSummaryNonuniformResults(self):
page0 = self.pages[0]
page1 = self.pages[1]
page2 = self.pages[2]
results = page_test_results.PageTestResults()
results.WillRunPage(page0)
v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(v0)
v1 = scalar.ScalarValue(page0, 'b', 'seconds', 10,
improvement_direction=improvement_direction.UP)
results.AddValue(v1)
results.DidRunPage(page0)
results.WillRunPage(page1)
v2 = scalar.ScalarValue(page1, 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(v2)
v3 = scalar.ScalarValue(page1, 'b', 'seconds', 10,
improvement_direction=improvement_direction.UP)
results.AddValue(v3)
results.DidRunPage(page1)
results.WillRunPage(page2)
v4 = scalar.ScalarValue(page2, 'a', 'seconds', 7,
improvement_direction=improvement_direction.UP)
results.AddValue(v4)
# Note, page[2] does not report a 'b' metric.
results.DidRunPage(page2)
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
v0_list = list_of_scalar_values.ListOfScalarValues(
page0, 'a', 'seconds', [3],
improvement_direction=improvement_direction.UP)
v1_list = list_of_scalar_values.ListOfScalarValues(
page0, 'b', 'seconds', [10],
improvement_direction=improvement_direction.UP)
v2_list = list_of_scalar_values.ListOfScalarValues(
page1, 'a', 'seconds', [3],
improvement_direction=improvement_direction.UP)
v3_list = list_of_scalar_values.ListOfScalarValues(
page1, 'b', 'seconds', [10],
improvement_direction=improvement_direction.UP)
v4_list = list_of_scalar_values.ListOfScalarValues(
page2, 'a', 'seconds', [7],
improvement_direction=improvement_direction.UP)
a_summary = list_of_scalar_values.ListOfScalarValues(
None, 'a', 'seconds', [3, 3, 7],
improvement_direction=improvement_direction.UP)
b_summary = list_of_scalar_values.ListOfScalarValues(
None, 'b', 'seconds', [10, 10],
improvement_direction=improvement_direction.UP)
self.assertEquals(7, len(values))
self.assertIn(v0_list, values)
self.assertIn(v1_list, values)
self.assertIn(v2_list, values)
self.assertIn(v3_list, values)
self.assertIn(v4_list, values)
self.assertIn(a_summary, values)
self.assertIn(b_summary, values)
def testBasicSummaryPassAndFailPage(self):
"""If a page failed, only print summary for individual pages."""
page0 = self.pages[0]
page1 = self.pages[1]
results = page_test_results.PageTestResults()
results.WillRunPage(page0)
v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(v0)
v1 = failure.FailureValue.FromMessage(page0, 'message')
results.AddValue(v1)
results.DidRunPage(page0)
results.WillRunPage(page1)
v2 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
improvement_direction=improvement_direction.UP)
results.AddValue(v2)
results.DidRunPage(page1)
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
v0_list = list_of_scalar_values.ListOfScalarValues(
page0, 'a', 'seconds', [3],
improvement_direction=improvement_direction.UP)
v2_list = list_of_scalar_values.ListOfScalarValues(
page1, 'a', 'seconds', [7],
improvement_direction=improvement_direction.UP)
self.assertEquals(2, len(values))
self.assertIn(v0_list, values)
self.assertIn(v2_list, values)
def testRepeatedPagesetOneIterationOnePageFails(self):
"""Page fails on one iteration, no averaged results should print."""
page0 = self.pages[0]
page1 = self.pages[1]
results = page_test_results.PageTestResults()
results.WillRunPage(page0)
v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(v0)
results.DidRunPage(page0)
results.WillRunPage(page1)
v1 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
improvement_direction=improvement_direction.UP)
results.AddValue(v1)
v2 = failure.FailureValue.FromMessage(page1, 'message')
results.AddValue(v2)
results.DidRunPage(page1)
results.WillRunPage(page0)
v3 = scalar.ScalarValue(page0, 'a', 'seconds', 4,
improvement_direction=improvement_direction.UP)
results.AddValue(v3)
results.DidRunPage(page0)
results.WillRunPage(page1)
v4 = scalar.ScalarValue(page1, 'a', 'seconds', 8,
improvement_direction=improvement_direction.UP)
results.AddValue(v4)
results.DidRunPage(page1)
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
page0_aggregated = list_of_scalar_values.ListOfScalarValues(
page0, 'a', 'seconds', [3, 4],
improvement_direction=improvement_direction.UP)
page1_aggregated = list_of_scalar_values.ListOfScalarValues(
page1, 'a', 'seconds', [7, 8],
improvement_direction=improvement_direction.UP)
self.assertEquals(2, len(values))
self.assertIn(page0_aggregated, values)
self.assertIn(page1_aggregated, values)
def testRepeatedPages(self):
page0 = self.pages[0]
page1 = self.pages[1]
results = page_test_results.PageTestResults()
results.WillRunPage(page0)
v0 = scalar.ScalarValue(page0, 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(v0)
results.DidRunPage(page0)
results.WillRunPage(page0)
v2 = scalar.ScalarValue(page0, 'a', 'seconds', 4,
improvement_direction=improvement_direction.UP)
results.AddValue(v2)
results.DidRunPage(page0)
results.WillRunPage(page1)
v1 = scalar.ScalarValue(page1, 'a', 'seconds', 7,
improvement_direction=improvement_direction.UP)
results.AddValue(v1)
results.DidRunPage(page1)
results.WillRunPage(page1)
v3 = scalar.ScalarValue(page1, 'a', 'seconds', 8,
improvement_direction=improvement_direction.UP)
results.AddValue(v3)
results.DidRunPage(page1)
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
page0_aggregated = list_of_scalar_values.ListOfScalarValues(
page0, 'a', 'seconds', [3, 4],
improvement_direction=improvement_direction.UP)
page1_aggregated = list_of_scalar_values.ListOfScalarValues(
page1, 'a', 'seconds', [7, 8],
improvement_direction=improvement_direction.UP)
a_summary = list_of_scalar_values.ListOfScalarValues(
None, 'a', 'seconds', [3, 4, 7, 8],
improvement_direction=improvement_direction.UP)
self.assertEquals(3, len(values))
self.assertIn(page0_aggregated, values)
self.assertIn(page1_aggregated, values)
self.assertIn(a_summary, values)
def testPageRunsTwice(self):
page0 = self.pages[0]
results = page_test_results.PageTestResults()
results.WillRunPage(page0)
v0 = scalar.ScalarValue(page0, 'b', 'seconds', 2,
improvement_direction=improvement_direction.UP)
results.AddValue(v0)
results.DidRunPage(page0)
results.WillRunPage(page0)
v1 = scalar.ScalarValue(page0, 'b', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(v1)
results.DidRunPage(page0)
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
page0_aggregated = list_of_scalar_values.ListOfScalarValues(
page0, 'b', 'seconds', [2, 3],
improvement_direction=improvement_direction.UP)
b_summary = list_of_scalar_values.ListOfScalarValues(
None, 'b', 'seconds', [2, 3],
improvement_direction=improvement_direction.UP)
self.assertEquals(2, len(values))
self.assertIn(page0_aggregated, values)
self.assertIn(b_summary, values)
def testListValue(self):
page0 = self.pages[0]
page1 = self.pages[1]
results = page_test_results.PageTestResults()
results.WillRunPage(page0)
v0 = list_of_scalar_values.ListOfScalarValues(
page0, 'b', 'seconds', [2, 2],
improvement_direction=improvement_direction.UP)
results.AddValue(v0)
results.DidRunPage(page0)
results.WillRunPage(page1)
v1 = list_of_scalar_values.ListOfScalarValues(
page1, 'b', 'seconds', [3, 3],
improvement_direction=improvement_direction.UP)
results.AddValue(v1)
results.DidRunPage(page1)
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
b_summary = list_of_scalar_values.ListOfScalarValues(
None, 'b', 'seconds', [2, 2, 3, 3],
improvement_direction=improvement_direction.UP)
self.assertEquals(3, len(values))
self.assertIn(v0, values)
self.assertIn(v1, values)
self.assertIn(b_summary, values)
def testHistogram(self):
page0 = self.pages[0]
page1 = self.pages[1]
results = page_test_results.PageTestResults()
results.WillRunPage(page0)
v0 = histogram.HistogramValue(
page0, 'a', 'units',
raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}',
important=False, improvement_direction=improvement_direction.UP)
results.AddValue(v0)
results.DidRunPage(page0)
results.WillRunPage(page1)
v1 = histogram.HistogramValue(
page1, 'a', 'units',
raw_value_json='{"buckets": [{"low": 2, "high": 3, "count": 1}]}',
important=False, improvement_direction=improvement_direction.UP)
results.AddValue(v1)
results.DidRunPage(page1)
summary = summary_module.Summary(results.all_page_specific_values)
values = summary.interleaved_computed_per_page_values_and_summaries
self.assertEquals(2, len(values))
self.assertIn(v0, values)
self.assertIn(v1, values)
|
bsd-3-clause
|
ifduyue/django
|
tests/flatpages_tests/test_views.py
|
130
|
6846
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin:
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# no 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageViewTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view"
response = self.client.get('/flatpage_root/flatpage/')
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"""A nonexistent flatpage raises 404 when served through a view."""
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
user = User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.force_login(user)
response = self.client.get('/flatpage_root/sekrit/')
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_non_existent_flatpage(self):
"""
A nonexistent flatpage won't be served if the fallback middleware is
disabled.
"""
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/flatpage_root/some.very_special~chars-here/')
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# no 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageViewAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"""
A nonexistent flatpage raises 404 when served through a view and
should not add a slash.
"""
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A fallback flatpage won't be served if the middleware is disabled and should not add a slash"
response = self.client.get('/flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_non_existent_flatpage(self):
"""
A nonexistent flatpage won't be served if the fallback middleware is
disabled and should not add a slash.
"""
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_view_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served through a view and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/flatpage_root/some.very_special~chars-here')
self.assertRedirects(response, '/flatpage_root/some.very_special~chars-here/', status_code=301)
|
bsd-3-clause
|
urrego093/proyecto_mv
|
scripts/extract_pgsql_models.py
|
15
|
10260
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create web2py model (python code) to represent PostgreSQL tables.
Features:
* Uses ANSI Standard INFORMATION_SCHEMA (might work with other RDBMS)
* Detects legacy "keyed" tables (not having an "id" PK)
* Connects directly to running databases, no need to do a SQL dump
* Handles notnull, unique and referential constraints
* Detects most common datatypes and default values
* Support PostgreSQL columns comments (ie. for documentation)
Requeriments:
* Needs PostgreSQL pyscopg2 python connector (same as web2py)
* If used against other RDBMS, import and use proper connector (remove pg_ code)
Created by Mariano Reingart, based on a script to "generate schemas from dbs"
(mysql) by Alexandre Andrade
"""
_author__ = "Mariano Reingart <[email protected]>"
HELP = """
USAGE: extract_pgsql_models db host port user passwd
Call with PostgreSQL database connection parameters,
web2py model will be printed on standard output.
EXAMPLE: python extract_pgsql_models.py mydb localhost 5432 reingart saraza
"""
# Config options
DEBUG = False # print debug messages to STDERR
SCHEMA = 'public' # change if not using default PostgreSQL schema
# Constant for Field keyword parameter order (and filter):
KWARGS = ('type', 'length', 'default', 'required', 'ondelete',
'notnull', 'unique', 'label', 'comment')
import sys
def query(conn, sql, *args):
"Execute a SQL query and return rows as a list of dicts"
cur = conn.cursor()
ret = []
try:
if DEBUG:
print >> sys.stderr, "QUERY: ", sql % args
cur.execute(sql, args)
for row in cur:
dic = {}
for i, value in enumerate(row):
field = cur.description[i][0]
dic[field] = value
if DEBUG:
print >> sys.stderr, "RET: ", dic
ret.append(dic)
return ret
finally:
cur.close()
def get_tables(conn, schema=SCHEMA):
"List table names in a given schema"
rows = query(conn, """SELECT table_name FROM information_schema.tables
WHERE table_schema = %s
ORDER BY table_name""", schema)
return [row['table_name'] for row in rows]
def get_fields(conn, table):
"Retrieve field list for a given table"
if DEBUG:
print >> sys.stderr, "Processing TABLE", table
rows = query(conn, """
SELECT column_name, data_type,
is_nullable,
character_maximum_length,
numeric_precision, numeric_precision_radix, numeric_scale,
column_default
FROM information_schema.columns
WHERE table_name=%s
ORDER BY ordinal_position""", table)
return rows
def define_field(conn, table, field, pks):
"Determine field type, default value, references, etc."
f = {}
ref = references(conn, table, field['column_name'])
if ref:
f.update(ref)
elif field['column_default'] and \
field['column_default'].startswith("nextval") and \
field['column_name'] in pks:
# postgresql sequence (SERIAL) and primary key!
f['type'] = "'id'"
elif field['data_type'].startswith('character'):
f['type'] = "'string'"
if field['character_maximum_length']:
f['length'] = field['character_maximum_length']
elif field['data_type'] in ('text', ):
f['type'] = "'text'"
elif field['data_type'] in ('boolean', 'bit'):
f['type'] = "'boolean'"
elif field['data_type'] in ('integer', 'smallint', 'bigint'):
f['type'] = "'integer'"
elif field['data_type'] in ('double precision', 'real'):
f['type'] = "'double'"
elif field['data_type'] in ('timestamp', 'timestamp without time zone'):
f['type'] = "'datetime'"
elif field['data_type'] in ('date', ):
f['type'] = "'date'"
elif field['data_type'] in ('time', 'time without time zone'):
f['type'] = "'time'"
elif field['data_type'] in ('numeric', 'currency'):
f['precision'] = field['numeric_precision']
f['scale'] = field['numeric_scale'] or 0
f['type'] = "'decimal({},{})'".format(f['precision'],f['scale'])
elif field['data_type'] in ('bytea', ):
f['type'] = "'blob'"
elif field['data_type'] in ('point', 'lseg', 'polygon', 'unknown', 'USER-DEFINED'):
f['type'] = "" # unsupported?
else:
raise RuntimeError("Data Type not supported: %s " % str(field))
try:
if field['column_default']:
if field['column_default'] == "now()":
d = "request.now"
elif field['column_default'] == "true":
d = "True"
elif field['column_default'] == "false":
d = "False"
else:
d = repr(eval(field['column_default']))
f['default'] = str(d)
except (ValueError, SyntaxError):
pass
except Exception, e:
raise RuntimeError(
"Default unsupported '%s'" % field['column_default'])
if not field['is_nullable']:
f['notnull'] = "True"
comment = get_comment(conn, table, field)
if comment is not None:
f['comment'] = repr(comment)
return f
def is_unique(conn, table, field):
"Find unique columns (incomplete support)"
rows = query(conn, """
SELECT information_schema.constraint_column_usage.column_name
FROM information_schema.table_constraints
NATURAL JOIN information_schema.constraint_column_usage
WHERE information_schema.table_constraints.table_name=%s
AND information_schema.constraint_column_usage.column_name=%s
AND information_schema.table_constraints.constraint_type='UNIQUE'
;""", table, field['column_name'])
return rows and True or False
def get_comment(conn, table, field):
"Find the column comment (postgres specific)"
rows = query(conn, """
SELECT d.description AS comment
FROM pg_class c
JOIN pg_description d ON c.oid=d.objoid
JOIN pg_attribute a ON c.oid = a.attrelid
WHERE c.relname=%s AND a.attname=%s
AND a.attnum = d.objsubid
;""", table, field['column_name'])
return rows and rows[0]['comment'] or None
def primarykeys(conn, table):
"Find primary keys"
rows = query(conn, """
SELECT information_schema.constraint_column_usage.column_name
FROM information_schema.table_constraints
NATURAL JOIN information_schema.constraint_column_usage
WHERE information_schema.table_constraints.table_name=%s
AND information_schema.table_constraints.constraint_type='PRIMARY KEY'
;""", table)
return [row['column_name'] for row in rows]
def references(conn, table, field):
"Find a FK (fails if multiple)"
rows1 = query(conn, """
SELECT table_name, column_name, constraint_name,
update_rule, delete_rule, ordinal_position
FROM information_schema.key_column_usage
NATURAL JOIN information_schema.referential_constraints
NATURAL JOIN information_schema.table_constraints
WHERE information_schema.key_column_usage.table_name=%s
AND information_schema.key_column_usage.column_name=%s
AND information_schema.table_constraints.constraint_type='FOREIGN KEY'
;""", table, field)
if len(rows1) == 1:
rows2 = query(conn, """
SELECT table_name, column_name, *
FROM information_schema.constraint_column_usage
WHERE constraint_name=%s
""", rows1[0]['constraint_name'])
row = None
if len(rows2) > 1:
row = rows2[int(rows1[0]['ordinal_position']) - 1]
keyed = True
if len(rows2) == 1:
row = rows2[0]
keyed = False
if row:
if keyed: # THIS IS BAD, DON'T MIX "id" and primarykey!!!
ref = {'type': "'reference %s.%s'" % (row['table_name'],
row['column_name'])}
else:
ref = {'type': "'reference %s'" % (row['table_name'],)}
if rows1[0]['delete_rule'] != "NO ACTION":
ref['ondelete'] = repr(rows1[0]['delete_rule'])
return ref
elif rows2:
raise RuntimeError("Unsupported foreign key reference: %s" %
str(rows2))
elif rows1:
raise RuntimeError("Unsupported referential constraint: %s" %
str(rows1))
def define_table(conn, table):
"Output single table definition"
fields = get_fields(conn, table)
pks = primarykeys(conn, table)
print "db.define_table('%s'," % (table, )
for field in fields:
fname = field['column_name']
fdef = define_field(conn, table, field, pks)
if fname not in pks and is_unique(conn, table, field):
fdef['unique'] = "True"
if fdef['type'] == "'id'" and fname in pks:
pks.pop(pks.index(fname))
print " Field('%s', %s)," % (fname,
', '.join(["%s=%s" % (k, fdef[k]) for k in KWARGS
if k in fdef and fdef[k]]))
if pks:
print " primarykey=[%s]," % ", ".join(["'%s'" % pk for pk in pks])
print " migrate=migrate)"
print
def define_db(conn, db, host, port, user, passwd):
"Output database definition (model)"
dal = 'db = DAL("postgres://%s:%s@%s:%s/%s", pool_size=10)'
print dal % (user, passwd, host, port, db)
print
print "migrate = False"
print
for table in get_tables(conn):
define_table(conn, table)
if __name__ == "__main__":
if len(sys.argv) < 6:
print HELP
else:
# Parse arguments from command line:
db, host, port, user, passwd = sys.argv[1:6]
# Make the database connection (change driver if required)
import psycopg2
cnn = psycopg2.connect(database=db, host=host, port=port,
user=user, password=passwd,
)
# Start model code generation:
define_db(cnn, db, host, port, user, passwd)
|
gpl-3.0
|
CorundumGames/Invasodado
|
game/ufo.py
|
1
|
3605
|
from math import sin
from random import choice, uniform, expovariate
from pygame import Rect
from core import color
from core import config
from core.particles import ParticleEmitter
from game.block import get_block
from game.gameobject import GameObject
from game import gamedata
### Constants ##################################################################
AVG_WAIT = 9000 #Expected time in frames between UFO appearance
DEATH = config.load_sound('ufo_explosion.wav')
FRAMES = tuple(
Rect(64 * (i % 4), 192 + 32 * (i // 4), 64, 32)
for i in range(10, -1, -1)
)
INVADE = config.load_sound('ufo.wav')
START_POS = (640, 16)
UFO_FRAMES = color.get_colored_objects(FRAMES)
UFO_STATES = ('IDLE', 'APPEARING', 'ACTIVE', 'DYING', 'LEAVING', 'LOWERING', 'GAMEOVER')
################################################################################
class UFO(GameObject):
STATES = config.Enum(*UFO_STATES)
GROUP = None
BLOCK_GROUP = None
def __init__(self):
super().__init__()
self._anim = 0.0
self.column = None
self.current_frame_list = UFO_FRAMES
self.image = config.get_sprite(FRAMES[0])
self.odds = expovariate(AVG_WAIT)
self.position = list(START_POS)
self.rect = Rect(START_POS, self.image.get_size())
self.state = UFO.STATES.IDLE
self.emitter = ParticleEmitter(color.random_color_particles, self.rect)
del self.acceleration
def appear(self):
'''
Appear on-screen, but not for very long!
'''
INVADE.play(-1)
self.position = list(START_POS)
self.rect.topleft = list(START_POS)
self.change_state(UFO.STATES.ACTIVE)
self.velocity[0] = -2.0
def move(self):
'''
Move left on the screen, and oscillate up and down.
'''
position = self.position
rect = self.rect
self._anim += 0.5
self.image = UFO_FRAMES[id(choice(color.LIST)) ] \
[int(self._anim) % len(FRAMES)]
position[0] += self.velocity[0]
position[1] += sin(self._anim/4)
rect.topleft = (position[0] + .5, position[1] + .5)
if rect.right < 0:
#If we've gone past the left edge of the screen...
self.change_state(UFO.STATES.LEAVING)
def die(self):
'''
Vanish and release a special Block that clears lots of other Blocks.
'''
self.emitter.rect = self.rect
self.emitter.burst(30)
DEATH.play()
UFO.BLOCK_GROUP.add(get_block((self.rect.centerx, 0), special=True))
gamedata.score += 90
self.change_state(UFO.STATES.LEAVING)
def leave(self):
INVADE.stop()
self.velocity[0] = 0
self.position = list(START_POS)
self.rect.topleft = START_POS
self.change_state(UFO.STATES.IDLE)
def wait(self):
'''
Wait off-screen, and only come back with a specific probability.
'''
if uniform(0, 1) < self.odds:
#With a certain probability...
self.odds = expovariate(AVG_WAIT)
self.change_state(UFO.STATES.APPEARING)
actions = {
STATES.IDLE : 'wait' ,
STATES.APPEARING: 'appear',
STATES.ACTIVE : 'move' ,
STATES.DYING : 'die' ,
STATES.LEAVING : 'leave' ,
STATES.GAMEOVER : None ,
}
|
gpl-3.0
|
wandec/grr
|
config/data_server.py
|
13
|
1402
|
#!/usr/bin/env python
"""Configuration parameters for the data servers."""
from grr.lib import config_lib
# The Data Store server.
config_lib.DEFINE_integer("Dataserver.stats_frequency", 60,
("Time interval in seconds for data server "
"statistics updates"))
config_lib.DEFINE_list("Dataserver.server_list",
["http://127.0.0.1:7000", "http://127.0.0.1:7001"],
"List of allowed data servers (first is the master).")
config_lib.DEFINE_integer("Dataserver.max_connections", 5,
("Maximum number of connections to the data server "
"per process."))
config_lib.DEFINE_integer("Dataserver.port", 7000,
"Port for a specific data server.")
# Login information for clients of the data servers.
config_lib.DEFINE_list("Dataserver.client_credentials", ["user:pass:rw"],
"List of data server client credentials, given as "
"<username>:<password>:<mode> where mode is r or rw.")
# Login information used by data servers when registering with the master.
config_lib.DEFINE_string("Dataserver.server_username", "server",
"Username for servers.")
config_lib.DEFINE_string("Dataserver.server_password", "server",
"Password for servers.")
|
apache-2.0
|
lamby/python-social-auth
|
social/backends/lastfm.py
|
70
|
1888
|
import hashlib
from social.utils import handle_http_errors
from social.backends.base import BaseAuth
class LastFmAuth(BaseAuth):
"""
Last.Fm authentication backend. Requires two settings:
SOCIAL_AUTH_LASTFM_KEY
SOCIAL_AUTH_LASTFM_SECRET
Don't forget to set the Last.fm callback to something sensible like
http://your.site/lastfm/complete
"""
name = 'lastfm'
AUTH_URL = 'http://www.last.fm/api/auth/?api_key={api_key}'
EXTRA_DATA = [
('key', 'session_key')
]
def auth_url(self):
return self.AUTH_URL.format(api_key=self.setting('KEY'))
@handle_http_errors
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
key, secret = self.get_key_and_secret()
token = self.data['token']
signature = hashlib.md5(''.join(
('api_key', key, 'methodauth.getSession', 'token', token, secret)
).encode()).hexdigest()
response = self.get_json('http://ws.audioscrobbler.com/2.0/', data={
'method': 'auth.getSession',
'api_key': key,
'token': token,
'api_sig': signature,
'format': 'json'
}, method='POST')
kwargs.update({'response': response['session'], 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response."""
return response.get('name')
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(response['name'])
return {
'username': response['name'],
'email': '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
|
bsd-3-clause
|
stamen/fieldpapers
|
decoder/tasks.py
|
3
|
1966
|
from celery import Celery
from raven import Client
import compose, decode, forms
celery = Celery('tasks', broker='redis://')
client = Client()
@celery.task
def decodeScan(apibase, password, **msg):
""" Process an uploaded scan.
"""
url = msg['url']
print 'Decoding scan', msg['scan_id']
try:
decode.main(apibase, password, msg['scan_id'], url)
except:
client.captureException()
raise
@celery.task
def composePrint(apibase, password, **msg):
""" Create an atlas.
"""
kwargs = dict(print_id=msg['print_id'],
paper_size=msg['paper_size'],
orientation=msg['orientation'],
layout=msg.get('layout', 'full-page'),
pages=msg['pages'])
if 'form_id' in msg and 'form_url' in msg:
def on_fields(fields):
for page in msg['pages']:
page['text'] = (page.get('text', '').strip() + '\n\n' + forms.fields_as_text(fields['fields'])).strip()
print 'Composing print', msg['print_id'], 'and form', msg['form_id']
try:
compose.main(apibase, password, **kwargs)
forms.main(apibase, password, msg['form_id'], msg['form_url'], on_fields)
except:
client.captureException()
raise
else:
if 'form_fields' in msg:
for page in msg['pages']:
page['text'] = (page.get('text', '').strip() + '\n\n' + forms.fields_as_text(msg['form_fields'])).strip()
print 'Composing print', msg['print_id']
try:
compose.main(apibase, password, **kwargs)
except:
client.captureException()
raise
@celery.task
def parseForm(apibase, password, **msg):
"""
"""
print 'Parsing a form.'
try:
return forms.main(apibase, password, msg['form_id'], msg['url'])
except:
client.captureException()
raise
|
gpl-2.0
|
tmenjo/cinder-2015.1.0
|
cinder/tests/api/v1/stubs.py
|
6
|
4045
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from cinder import exception as exc
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUIDS = {}
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'attached_mode': 'rw',
'status': 'fakestatus',
'migration_status': None,
'attach_status': 'attached',
'bootable': 'false',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'snapshot_id': None,
'source_volid': None,
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'},
'volume_attachment': [],
'multiattach': False,
'readonly': 'False'}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['source_volid'] = None
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_create_from_image(self, context, size, name, description,
snapshot, volume_type, metadata,
availability_zone):
vol = stub_volume('1')
vol['status'] = 'creating'
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
vol['availability_zone'] = 'cinder'
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_delete(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_get_notfound(self, context, volume_id):
raise exc.NotFound
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_get_all_by_project(self, context, search_opts=None):
return [stub_volume_get(self, context, '1')]
def stub_snapshot(id, **kwargs):
snapshot = {'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_get_all(self):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_snapshot_get_all_by_project(self, context):
return [stub_snapshot(1)]
def stub_snapshot_update(self, context, *args, **param):
pass
def stub_service_get_all_by_topic(context, topic):
return [{'availability_zone': "zone1:host1", "disabled": 0}]
|
apache-2.0
|
mikewiebe-ansible/ansible
|
test/units/modules/network/fortios/test_fortios_web_proxy_forward_server.py
|
21
|
10345
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_web_proxy_forward_server
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_web_proxy_forward_server.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_web_proxy_forward_server_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'web_proxy_forward_server': {
'addr_type': 'ip',
'comment': 'Comment.',
'fqdn': 'test_value_5',
'healthcheck': 'disable',
'ip': 'test_value_7',
'monitor': 'test_value_8',
'name': 'default_name_9',
'port': '10',
'server_down_option': 'block'
},
'vdom': 'root'}
is_error, changed, response = fortios_web_proxy_forward_server.fortios_web_proxy(input_data, fos_instance)
expected_data = {
'addr-type': 'ip',
'comment': 'Comment.',
'fqdn': 'test_value_5',
'healthcheck': 'disable',
'ip': 'test_value_7',
'monitor': 'test_value_8',
'name': 'default_name_9',
'port': '10',
'server-down-option': 'block'
}
set_method_mock.assert_called_with('web-proxy', 'forward-server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_web_proxy_forward_server_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'web_proxy_forward_server': {
'addr_type': 'ip',
'comment': 'Comment.',
'fqdn': 'test_value_5',
'healthcheck': 'disable',
'ip': 'test_value_7',
'monitor': 'test_value_8',
'name': 'default_name_9',
'port': '10',
'server_down_option': 'block'
},
'vdom': 'root'}
is_error, changed, response = fortios_web_proxy_forward_server.fortios_web_proxy(input_data, fos_instance)
expected_data = {
'addr-type': 'ip',
'comment': 'Comment.',
'fqdn': 'test_value_5',
'healthcheck': 'disable',
'ip': 'test_value_7',
'monitor': 'test_value_8',
'name': 'default_name_9',
'port': '10',
'server-down-option': 'block'
}
set_method_mock.assert_called_with('web-proxy', 'forward-server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_web_proxy_forward_server_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'web_proxy_forward_server': {
'addr_type': 'ip',
'comment': 'Comment.',
'fqdn': 'test_value_5',
'healthcheck': 'disable',
'ip': 'test_value_7',
'monitor': 'test_value_8',
'name': 'default_name_9',
'port': '10',
'server_down_option': 'block'
},
'vdom': 'root'}
is_error, changed, response = fortios_web_proxy_forward_server.fortios_web_proxy(input_data, fos_instance)
delete_method_mock.assert_called_with('web-proxy', 'forward-server', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_web_proxy_forward_server_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'web_proxy_forward_server': {
'addr_type': 'ip',
'comment': 'Comment.',
'fqdn': 'test_value_5',
'healthcheck': 'disable',
'ip': 'test_value_7',
'monitor': 'test_value_8',
'name': 'default_name_9',
'port': '10',
'server_down_option': 'block'
},
'vdom': 'root'}
is_error, changed, response = fortios_web_proxy_forward_server.fortios_web_proxy(input_data, fos_instance)
delete_method_mock.assert_called_with('web-proxy', 'forward-server', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_web_proxy_forward_server_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'web_proxy_forward_server': {
'addr_type': 'ip',
'comment': 'Comment.',
'fqdn': 'test_value_5',
'healthcheck': 'disable',
'ip': 'test_value_7',
'monitor': 'test_value_8',
'name': 'default_name_9',
'port': '10',
'server_down_option': 'block'
},
'vdom': 'root'}
is_error, changed, response = fortios_web_proxy_forward_server.fortios_web_proxy(input_data, fos_instance)
expected_data = {
'addr-type': 'ip',
'comment': 'Comment.',
'fqdn': 'test_value_5',
'healthcheck': 'disable',
'ip': 'test_value_7',
'monitor': 'test_value_8',
'name': 'default_name_9',
'port': '10',
'server-down-option': 'block'
}
set_method_mock.assert_called_with('web-proxy', 'forward-server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_web_proxy_forward_server_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'web_proxy_forward_server': {
'random_attribute_not_valid': 'tag',
'addr_type': 'ip',
'comment': 'Comment.',
'fqdn': 'test_value_5',
'healthcheck': 'disable',
'ip': 'test_value_7',
'monitor': 'test_value_8',
'name': 'default_name_9',
'port': '10',
'server_down_option': 'block'
},
'vdom': 'root'}
is_error, changed, response = fortios_web_proxy_forward_server.fortios_web_proxy(input_data, fos_instance)
expected_data = {
'addr-type': 'ip',
'comment': 'Comment.',
'fqdn': 'test_value_5',
'healthcheck': 'disable',
'ip': 'test_value_7',
'monitor': 'test_value_8',
'name': 'default_name_9',
'port': '10',
'server-down-option': 'block'
}
set_method_mock.assert_called_with('web-proxy', 'forward-server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
gpl-3.0
|
rdo-infra/ci-config
|
ci-scripts/dlrnapi_promoter/qcow_client.py
|
1
|
10534
|
"""
This file contains classes and functionto interact with qcow images servers
"""
import copy
import logging
import os
import paramiko
from common import PromotionError
class QcowConnectionClient(object):
"""
Proxy class for client connection
"""
_log = logging.getLogger("promoter")
def __init__(self, server_conf):
self._host = server_conf['host']
self._user = server_conf['user']
self._client_type = server_conf['client']
self._keypath = server_conf['keypath']
self._client = os
if self._client_type == "sftp":
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
keypath = os.path.expanduser(self._keypath)
self.key = paramiko.RSAKey.from_private_key_file(filename=keypath)
self.kwargs = {}
if self._user is not None:
self.kwargs['username'] = self._user
else:
self.kwargs['username'] = os.environ.get("USER")
self._log.debug("Connecting to %s as user %s", self._host,
self._user)
self.ssh_client = client
def connect(self):
if hasattr(self, 'ssh_client'):
self.ssh_client.connect(self._host, pkey=self.key, **self.kwargs)
self._client = self.ssh_client.open_sftp()
def __getattr__(self, item):
return getattr(self._client, item)
def close(self):
if self._client_type == "sftp":
self._client.close()
class QcowClient(object):
"""
This class interacts with qcow images servers
"""
log = logging.getLogger("promoter")
def __init__(self, config):
self.config = config
self.git_root = self.config.git_root
self.promote_script = os.path.join(self.git_root,
'ci-scripts', 'promote-images.sh')
self.distro_name = self.config.distro_name
self.distro_version = self.config.distro_version
self.rollback_links = {}
server_conf = self.config.overcloud_images.get('qcow_servers')
qcow_server = self.config.default_qcow_server
self.user = server_conf[qcow_server]['user']
self.root = server_conf[qcow_server]['root']
self.host = server_conf[qcow_server]['host']
self.client = QcowConnectionClient(server_conf[qcow_server])
self.images_dir = os.path.join(
os.path.join(config.stage_root, self.root),
config.distro, config.release, "rdo_trunk")
def validate_qcows(self, dlrn_hash, name=None, assume_valid=False):
"""
Check we have the images dir in the server
if name is specified, verify that name points to the hash
- maybe qcow ran and failed
Check at which point of qcow promotion we stopped
1) did we create a new symlink ?
2) did we create the previous symlink ?
3) are all the images uploaded correctly ?
:param dlrn_hash: The hash to check
:param name: The promotion name
:param assume_valid: report everything worked unconditionally
:return: A dict with result of the validation
"""
try:
self.client.listdir(self.images_dir)
self.client.chdir(self.images_dir)
except EnvironmentError as ex:
self.log.error("Qcow-client: Image root dir %s does not exist "
"in the server, or is not accessible")
self.log.exception(ex)
raise
results = {
"hash_valid": False,
"promotion_valid": False,
"qcow_valid": False,
"missing_qcows": copy.copy(
self.config.overcloud_images['qcow_images']),
"present_qcows": [],
}
stat = None
images = None
images_path = os.path.join(self.images_dir, dlrn_hash.full_hash)
try:
stat = self.client.stat(images_path)
images = sorted(self.client.listdir(images_path))
except EnvironmentError:
self.log.error("Images path for hash %s not present or "
"accessible", dlrn_hash)
if not images:
self.log.error("No images found")
if stat and images:
results['hash_valid'] = True
results['present_qcows'] = images
results['missing_qcows'] = \
list(set(self.config.overcloud_images[
'qcow_images']).difference(
images))
if images == self.config.overcloud_images['qcow_images']:
results['qcow_valid'] = True
if name is not None:
try:
link = self.client.readlink(name)
if link == dlrn_hash.full_hash:
results['promotion_valid'] = True
except EnvironmentError:
self.log.error("%s was not promoted to %s",
dlrn_hash.full_hash, name)
return results
def rollback(self):
"""
Rolls back the link to the initial status
Rollback is guaranteed to work only for caught exceptions, and it may
not be really useful. We have a rollback only if a remove or a symlink
fails.
- If a remove fails, it means that we don't need to rollback
- If a symlink fails, then it will probably fail on rollback too.
:return: None
"""
for name, target in self.rollback_links.items():
self.client.remove(name)
self.client.symlink(target, name)
self.rollback_links = {}
def promote(self, candidate_hash, target_label, candidate_label=None,
create_previous=True, validation=True):
"""
Effective promotion of the images. This method will handle symbolic
links to the dir containing images from the candidate hash,
optionally saving the current link as previous
:param candidate_hash: The dlrn hash to promote
:param target_label: The name of the link to create
:param candidate_label: Currently unused
:param create_previous: A bool to determine if previous link is created
:param validation: A bool to determine if qcow validation should be done
:return: None
"""
self.client.connect()
if validation:
self.validate_qcows(candidate_hash)
self.client.chdir(self.images_dir)
log_header = "Qcow promote '{}' to {}:".format(candidate_hash,
target_label)
self.log.info("%s Attempting promotion", log_header)
# Check if candidate_hash dir is present
try:
self.client.stat(candidate_hash.full_hash)
except EnvironmentError as ex:
self.log.error("%s images dir for hash %s not present or not "
"accessible", log_header, candidate_hash)
self.log.exception(ex)
self.client.close()
raise PromotionError("{} No images dir for hash {}"
"".format(log_header, candidate_hash))
# Check if the target label exists and points to a hash dir
current_hash = None
try:
current_hash = self.client.readlink(target_label)
except EnvironmentError:
self.log.debug("%s No link named %s exists", log_header,
target_label)
# If this exists Check if we can remove the symlink
if current_hash:
self.rollback_links['target_label'] = current_hash
try:
self.client.remove(target_label)
except EnvironmentError as ex:
self.log.debug("Unable to remove the target_label: %s",
target_label)
self.log.exception(ex)
self.client.close()
raise
# Check if a previous link exists and points to an hash-dir
previous_label = "previous-{}".format(target_label)
previous_hash = None
try:
previous_hash = self.client.readlink(previous_label)
except EnvironmentError:
self.log.debug("%s No previous-link named %s exists",
log_header,
previous_label)
self.log.debug("Previous hash %s", previous_hash)
# If it exists and we are handling it, check if we can remove and
# reassign it
if current_hash and previous_hash and create_previous:
self.rollback_links[previous_label] = previous_hash
try:
self.client.remove(previous_label)
except EnvironmentError as ex:
self.log.debug("Unable to remove the target_label: %s",
target_label)
self.log.exception(ex)
self.client.close()
# Rollback is not tested, we enable it later, when tests are
# easier to add
# self.rollback()
raise
try:
self.client.symlink(current_hash, previous_label)
except EnvironmentError as ex:
self.log.error("%s failed to link %s to %s", log_header,
previous_label, current_hash)
self.log.exception(ex)
# Rollback is not tested, we enable it later, when tests are
# easier to add
# self.rollback()
self.client.close()
raise
# Finally the effective promotion
try:
c_hash = os.path.join(self.images_dir, candidate_hash.full_hash)
self.client.symlink(c_hash, target_label)
self.log.debug("Created link {} -> {}".format(
candidate_hash.full_hash, target_label))
except EnvironmentError as ex:
self.log.error("%s failed to link %s to %s", log_header,
target_label, candidate_hash.full_hash)
self.log.exception(ex)
# Rollback is not tested, we enable it later, when tests are
# easier to add
# self.rollback()
finally:
self.client.close()
self.log.info("%s Successful promotion", log_header)
|
apache-2.0
|
gautam1858/tensorflow
|
tensorflow/contrib/constrained_optimization/python/swap_regret_optimizer_test.py
|
25
|
9262
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for constrained_optimization.python.swap_regret_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.constrained_optimization.python import swap_regret_optimizer
from tensorflow.contrib.constrained_optimization.python import test_util
from tensorflow.python.ops import standard_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class AdditiveSwapRegretOptimizerWrapper(
swap_regret_optimizer.AdditiveSwapRegretOptimizer):
"""Testing wrapper class around AdditiveSwapRegretOptimizer.
This class is identical to AdditiveSwapRegretOptimizer, except that it caches
the internal optimization state when _stochastic_matrix() is called, so that
we can test that the stochastic matrices take on their expected values.
"""
def __init__(self, optimizer, constraint_optimizer=None):
"""Same as AdditiveSwapRegretOptimizer.__init__()."""
super(AdditiveSwapRegretOptimizerWrapper, self).__init__(
optimizer=optimizer, constraint_optimizer=constraint_optimizer)
self._cached_stochastic_matrix = None
@property
def stochastic_matrix(self):
"""Returns the cached stochastic matrix."""
return self._cached_stochastic_matrix
def _stochastic_matrix(self, state):
"""Caches the internal state for testing."""
self._cached_stochastic_matrix = super(AdditiveSwapRegretOptimizerWrapper,
self)._stochastic_matrix(state)
return self._cached_stochastic_matrix
class MultiplicativeSwapRegretOptimizerWrapper(
swap_regret_optimizer.MultiplicativeSwapRegretOptimizer):
"""Testing wrapper class around MultiplicativeSwapRegretOptimizer.
This class is identical to MultiplicativeSwapRegretOptimizer, except that it
caches the internal optimization state when _stochastic_matrix() is called, so
that we can test that the stochastic matrices take on their expected values.
"""
def __init__(self,
optimizer,
constraint_optimizer=None,
minimum_multiplier_radius=None,
initial_multiplier_radius=None):
"""Same as MultiplicativeSwapRegretOptimizer.__init__()."""
super(MultiplicativeSwapRegretOptimizerWrapper, self).__init__(
optimizer=optimizer,
constraint_optimizer=constraint_optimizer,
minimum_multiplier_radius=1e-3,
initial_multiplier_radius=initial_multiplier_radius)
self._cached_stochastic_matrix = None
@property
def stochastic_matrix(self):
"""Returns the cached stochastic matrix."""
return self._cached_stochastic_matrix
def _stochastic_matrix(self, state):
"""Caches the internal state for testing."""
self._cached_stochastic_matrix = super(
MultiplicativeSwapRegretOptimizerWrapper,
self)._stochastic_matrix(state)
return self._cached_stochastic_matrix
class SwapRegretOptimizerTest(test.TestCase):
def test_maximum_eigenvector_power_method(self):
"""Tests power method routine on some known left-stochastic matrices."""
matrix1 = np.matrix([[0.6, 0.1, 0.1], [0.0, 0.6, 0.9], [0.4, 0.3, 0.0]])
matrix2 = np.matrix([[0.4, 0.4, 0.2], [0.2, 0.1, 0.5], [0.4, 0.5, 0.3]])
with self.cached_session() as session:
eigenvector1 = session.run(
swap_regret_optimizer._maximal_eigenvector_power_method(
standard_ops.constant(matrix1)))
eigenvector2 = session.run(
swap_regret_optimizer._maximal_eigenvector_power_method(
standard_ops.constant(matrix2)))
# Check that eigenvector1 and eigenvector2 are eigenvectors of matrix1 and
# matrix2 (respectively) with associated eigenvalue 1.
matrix_eigenvector1 = np.tensordot(matrix1, eigenvector1, axes=1)
matrix_eigenvector2 = np.tensordot(matrix2, eigenvector2, axes=1)
self.assertAllClose(eigenvector1, matrix_eigenvector1, rtol=0, atol=1e-6)
self.assertAllClose(eigenvector2, matrix_eigenvector2, rtol=0, atol=1e-6)
def test_project_stochastic_matrix_wrt_euclidean_norm(self):
"""Tests Euclidean projection routine on some known values."""
matrix = standard_ops.constant([[-0.1, -0.1, 0.4], [-0.8, 0.4, 1.2],
[-0.3, 0.1, 0.2]])
expected_projected_matrix = np.array([[0.6, 0.1, 0.1], [0.0, 0.6, 0.9],
[0.4, 0.3, 0.0]])
with self.cached_session() as session:
projected_matrix = session.run(
swap_regret_optimizer._project_stochastic_matrix_wrt_euclidean_norm(
matrix))
self.assertAllClose(
expected_projected_matrix, projected_matrix, rtol=0, atol=1e-6)
def test_project_log_stochastic_matrix_wrt_kl_divergence(self):
"""Tests KL-divergence projection routine on some known values."""
matrix = standard_ops.constant([[0.2, 0.8, 0.6], [0.1, 0.2, 1.5],
[0.2, 1.0, 0.9]])
expected_projected_matrix = np.array([[0.4, 0.4, 0.2], [0.2, 0.1, 0.5],
[0.4, 0.5, 0.3]])
with self.cached_session() as session:
projected_matrix = session.run(
standard_ops.exp(
swap_regret_optimizer.
_project_log_stochastic_matrix_wrt_kl_divergence(
standard_ops.log(matrix))))
self.assertAllClose(
expected_projected_matrix, projected_matrix, rtol=0, atol=1e-6)
def test_additive_swap_regret_optimizer(self):
"""Tests that the stochastic matrices update as expected."""
minimization_problem = test_util.ConstantMinimizationProblem(
np.array([0.6, -0.1, 0.4]))
optimizer = AdditiveSwapRegretOptimizerWrapper(
gradient_descent.GradientDescentOptimizer(1.0))
train_op = optimizer.minimize_constrained(minimization_problem)
# Calculated using a numpy+python implementation of the algorithm.
expected_matrices = [
np.array([[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]),
np.array([[0.66666667, 1.0, 1.0, 1.0], [0.26666667, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0], [0.06666667, 0.0, 0.0, 0.0]]),
np.array([[0.41666667, 0.93333333, 1.0,
0.98333333], [0.46666667, 0.05333333, 0.0,
0.01333333], [0.0, 0.0, 0.0, 0.0],
[0.11666667, 0.01333333, 0.0, 0.00333333]]),
]
matrices = []
with self.cached_session() as session:
session.run(standard_ops.global_variables_initializer())
while len(matrices) < len(expected_matrices):
matrices.append(session.run(optimizer.stochastic_matrix))
session.run(train_op)
for expected, actual in zip(expected_matrices, matrices):
self.assertAllClose(expected, actual, rtol=0, atol=1e-6)
def test_multiplicative_swap_regret_optimizer(self):
"""Tests that the stochastic matrices update as expected."""
minimization_problem = test_util.ConstantMinimizationProblem(
np.array([0.6, -0.1, 0.4]))
optimizer = MultiplicativeSwapRegretOptimizerWrapper(
gradient_descent.GradientDescentOptimizer(1.0),
initial_multiplier_radius=0.8)
train_op = optimizer.minimize_constrained(minimization_problem)
# Calculated using a numpy+python implementation of the algorithm.
expected_matrices = [
np.array([[0.4, 0.4, 0.4, 0.4], [0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2]]),
np.array([[0.36999014, 0.38528351, 0.38528351, 0.38528351], [
0.23517483, 0.21720297, 0.21720297, 0.21720297
], [0.17774131, 0.18882719, 0.18882719, 0.18882719],
[0.21709373, 0.20868632, 0.20868632, 0.20868632]]),
np.array([[0.33972109, 0.36811863, 0.37118462, 0.36906575], [
0.27114826, 0.23738228, 0.23376693, 0.23626491
], [0.15712313, 0.17641793, 0.17858959, 0.17708679],
[0.23200752, 0.21808115, 0.21645886, 0.21758255]]),
]
matrices = []
with self.cached_session() as session:
session.run(standard_ops.global_variables_initializer())
while len(matrices) < len(expected_matrices):
matrices.append(session.run(optimizer.stochastic_matrix))
session.run(train_op)
for expected, actual in zip(expected_matrices, matrices):
self.assertAllClose(expected, actual, rtol=0, atol=1e-6)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
zzxuanyuan/root
|
tutorials/pyroot/zdemo.py
|
44
|
8459
|
## \file
## \ingroup tutorial_pyroot
## \notebook
## This macro is an example of graphs in log scales with annotations.
##
## The presented results
## are predictions of invariant cross-section of Direct Photons produced
## at RHIC energies, based on the universality of scaling function H(z).
##
##
## These Figures were published in JINR preprint E2-98-64, Dubna,
## 1998 and submitted to CPC.
##
## \macro_image
## \macro_code
##
## \authors Michael Tokarev, Elena Potrebenikova (JINR Dubna)
import ROOT
from array import array
NMAX = 20
Z = array( 'f', [0.]*NMAX )
HZ = array( 'f', [0.]*NMAX )
PT = array( 'f', [0.]*NMAX )
INVSIG = array( 'f', [0.]*NMAX )
NLOOP = 0
saves = {}
#_______________________________________________________________________________
def hz_calc( ENERG, DENS, TGRAD, PTMIN, PTMAX, DELP ):
from math import sin, cos, sqrt
global NLOOP
global Z, HZ, PT, INVSIG
CSEFT= 1.
GM1 = 0.00001
GM2 = 0.00001
A1 = 1.
A2 = 1.
ALX = 2.
BETA = 1.
KF1 = 8.E-7
KF2 = 5.215
MN = 0.9383
DEGRAD=0.01745329
# print 'ENR= %f DENS= %f PTMIN= %f PTMAX= %f DELP= %f ' % (ENERG,DENS,PTMIN,PTMAX,DELP)
DNDETA= DENS
MB1 = MN*A1
MB2 = MN*A2
EB1 = ENERG/2.*A1
EB2 = ENERG/2.*A2
M1 = GM1
M2 = GM2
THET = TGRAD*DEGRAD
NLOOP = int((PTMAX-PTMIN)/DELP)
for I in range(NLOOP):
PT[I]=PTMIN+I*DELP
PTOT = PT[I]/sin(THET)
ETOT = sqrt(M1*M1 + PTOT*PTOT)
PB1 = sqrt(EB1*EB1 - MB1*MB1)
PB2 = sqrt(EB2*EB2 - MB2*MB2)
P2P3 = EB2*ETOT+PB2*PTOT*cos(THET)
P1P2 = EB2*EB1+PB2*PB1
P1P3 = EB1*ETOT-PB1*PTOT*cos(THET)
X1 = P2P3/P1P2
X2 = P1P3/P1P2
Y1 = X1+sqrt(X1*X2*(1.-X1)/(1.-X2))
Y2 = X2+sqrt(X1*X2*(1.-X2)/(1.-X1))
S = (MB1*MB1)+2.*P1P2+(MB2*MB2)
SMIN = 4.*((MB1*MB1)*(X1*X1) +2.*X1*X2*P1P2+(MB2*MB2)*(X2*X2))
SX1 = 4.*( 2*(MB1*MB1)*X1+2*X2*P1P2)
SX2 = 4.*( 2*(MB2*MB2)*X2+2*X1*P1P2)
SX1X2= 4.*(2*P1P2)
DELM = pow((1.-Y1)*(1.-Y2),ALX)
Z[I] = sqrt(SMIN)/DELM/pow(DNDETA,BETA)
Y1X1 = 1. +X2*(1-2.*X1)/(2.*(Y1-X1)*(1.-X2))
Y1X2 = X1*(1-X1)/(2.*(Y1-X1)*(1.-X2)*(1.-X2))
Y2X1 = X2*(1-X2)/(2.*(Y2-X2)*(1.-X1)*(1.-X1))
Y2X2 = 1. +X1*(1-2.*X2)/(2.*(Y2-X2)*(1.-X1))
Y2X1X2= Y2X1*( (1.-2.*X2)/(X2*(1-X2)) -( Y2X2-1.)/(Y2-X2))
Y1X1X2= Y1X2*( (1.-2.*X1)/(X1*(1-X1)) -( Y1X1-1.)/(Y1-X1))
KX1=-DELM*(Y1X1*ALX/(1.-Y1) + Y2X1*ALX/(1.-Y2))
KX2=-DELM*(Y2X2*ALX/(1.-Y2) + Y1X2*ALX/(1.-Y1))
ZX1=Z[I]*(SX1/(2.*SMIN)-KX1/DELM)
ZX2=Z[I]*(SX2/(2.*SMIN)-KX2/DELM)
H1=ZX1*ZX2
HZ[I]=KF1/pow(Z[I],KF2)
INVSIG[I]=(HZ[I]*H1*16.)/S
#_______________________________________________________________________________
def zdemo():
from array import array
global NLOOP
global Z, HZ, PT, INVSIG
global saves
global hz_calc
# Create a new canvas.
c1 = ROOT.TCanvas( 'zdemo', 'Monte Carlo Study of Z scaling', 10, 40, 800, 600 )
c1.Range( 0, 0, 25, 18 )
c1.SetFillColor( 40 )
saves[ 'c1' ] = c1 # prevent deteletion at end of zdemo
pl = ROOT.TPaveLabel( 1, 16.3, 24, 17.5,
'Z-scaling of Direct Photon Productions in pp Collisions at RHIC Energies', 'br' )
pl.SetFillColor(18)
pl.SetTextFont(32)
pl.SetTextColor(49)
pl.Draw()
saves[ 'pl' ] = pl
t = ROOT.TLatex()
t.SetTextFont(32)
t.SetTextColor(1)
t.SetTextSize(0.03)
t.SetTextAlign(12)
t.DrawLatex( 3.1, 15.5, 'M.Tokarev, E.Potrebenikova ')
t.DrawLatex( 14., 15.5, 'JINR preprint E2-98-64, Dubna, 1998 ')
saves[ 't' ] = t
pad1 = ROOT.TPad( 'pad1', 'This is pad1', 0.02, 0.02, 0.48, 0.83, 33 )
pad2 = ROOT.TPad( 'pad2', 'This is pad2', 0.52, 0.02, 0.98, 0.83, 33 )
pad1.Draw()
pad2.Draw()
saves[ 'pad1' ] = pad1; saves[ 'pad2' ] = pad2
#
# Cross-section of direct photon production in pp collisions at 500 GeV vs Pt
#
energ = 63
dens = 1.766
tgrad = 90.
ptmin = 4.
ptmax = 24.
delp = 2.
hz_calc( energ, dens, tgrad, ptmin, ptmax, delp )
pad1.cd()
pad1.Range( -0.255174, -19.25, 2.29657, -6.75 )
pad1.SetLogx()
pad1.SetLogy()
# create a 2-d histogram to define the range
pad1.DrawFrame( 1, 1e-18, 110, 1e-8 )
pad1.GetFrame().SetFillColor( 19 )
t = ROOT.TLatex()
t.SetNDC()
t.SetTextFont( 62 )
t.SetTextColor( 36 )
t.SetTextSize( 0.08 )
t.SetTextAlign( 12 )
t.DrawLatex( 0.6, 0.85, 'p - p' )
t.SetTextSize( 0.05 )
t.DrawLatex( 0.6, 0.79, 'Direct #gamma' )
t.DrawLatex( 0.6, 0.75, '#theta = 90^{o}' )
t.DrawLatex( 0.20, 0.45, 'Ed^{3}#sigma/dq^{3}' )
t.DrawLatex( 0.18, 0.40, '(barn/Gev^{2})' )
t.SetTextSize( 0.045 )
t.SetTextColor( ROOT.kBlue )
t.DrawLatex( 0.22, 0.260, '#sqrt{s} = 63(GeV)' )
t.SetTextColor( ROOT.kRed )
t.DrawLatex( 0.22, 0.205,'#sqrt{s} = 200(GeV)' )
t.SetTextColor( 6 )
t.DrawLatex( 0.22, 0.15, '#sqrt{s} = 500(GeV)' )
t.SetTextSize( 0.05 )
t.SetTextColor( 1 )
t.DrawLatex( 0.6, 0.06, 'q_{T} (Gev/c)' )
saves[ 't2' ] = t # note the label that is used!
gr1 = ROOT.TGraph( NLOOP, PT, INVSIG )
gr1.SetLineColor( 38 )
gr1.SetMarkerColor( ROOT.kBlue )
gr1.SetMarkerStyle( 21 )
gr1.SetMarkerSize( 1.1 )
gr1.Draw( 'LP' )
saves[ 'gr1' ] = gr1
#
# Cross-section of direct photon production in pp collisions at 200 GeV vs Pt
#
energ = 200
dens = 2.25
tgrad = 90.
ptmin = 4.
ptmax = 64.
delp = 6.
hz_calc( energ, dens, tgrad, ptmin, ptmax, delp )
gr2 = ROOT.TGraph( NLOOP, PT, INVSIG )
gr2.SetLineColor( 38 )
gr2.SetMarkerColor( ROOT.kRed )
gr2.SetMarkerStyle( 29 )
gr2.SetMarkerSize( 1.5 )
gr2.Draw( 'LP' )
saves[ 'gr2' ] = gr2
#
# Cross-section of direct photon production in pp collisions at 500 GeV vs Pt
#
energ = 500
dens = 2.73
tgrad = 90.
ptmin = 4.
ptmax = 104.
delp = 10.
hz_calc( energ, dens, tgrad, ptmin, ptmax, delp )
gr3 = ROOT.TGraph( NLOOP, PT, INVSIG )
gr3.SetLineColor( 38 )
gr3.SetMarkerColor( 6 )
gr3.SetMarkerStyle( 8 )
gr3.SetMarkerSize( 1.1 )
gr3.Draw( 'LP' )
saves[ 'gr3' ] = gr3
dum = array( 'f', [0.] )
graph = ROOT.TGraph( 1, dum, dum )
graph.SetMarkerColor( ROOT.kBlue )
graph.SetMarkerStyle( 21 )
graph.SetMarkerSize( 1.1 )
graph.SetPoint( 0, 1.7, 1.e-16 )
graph.Draw( 'LP' )
saves[ 'graph' ] = graph
graph = ROOT.TGraph( 1, dum, dum )
graph.SetMarkerColor( ROOT.kRed )
graph.SetMarkerStyle( 29 )
graph.SetMarkerSize( 1.5 )
graph.SetPoint( 0, 1.7, 2.e-17 )
graph.Draw( 'LP' )
saves[ 'graph2' ] = graph # note the label that is used!
graph = ROOT.TGraph( 1, dum, dum )
graph.SetMarkerColor( 6 )
graph.SetMarkerStyle( 8 )
graph.SetMarkerSize( 1.1 )
graph.SetPoint( 0, 1.7, 4.e-18)
graph.Draw( 'LP' )
saves[ 'graph3' ] = graph # note the label that is used!
pad2.cd()
pad2.Range( -0.43642, -23.75, 3.92778, -6.25 )
pad2.SetLogx()
pad2.SetLogy()
pad2.DrawFrame( 1, 1e-22, 3100, 1e-8 )
pad2.GetFrame().SetFillColor( 19 )
gr = ROOT.TGraph( NLOOP, Z, HZ )
gr.SetTitle( 'HZ vs Z' )
gr.SetFillColor( 19 )
gr.SetLineColor( 9 )
gr.SetMarkerColor( 50 )
gr.SetMarkerStyle( 29 )
gr.SetMarkerSize( 1.5 )
gr.Draw( 'LP' )
saves[ 'gr' ] = gr
t = ROOT.TLatex()
t.SetNDC()
t.SetTextFont( 62 )
t.SetTextColor( 36 )
t.SetTextSize( 0.08 )
t.SetTextAlign( 12 )
t.DrawLatex( 0.6, 0.85, 'p - p' )
t.SetTextSize( 0.05 )
t.DrawLatex( 0.6, 0.79, 'Direct #gamma' )
t.DrawLatex( 0.6, 0.75, '#theta = 90^{o}' )
t.DrawLatex( 0.70, 0.55, 'H(z)' )
t.DrawLatex( 0.68, 0.50, '(barn)' )
t.SetTextSize( 0.045 )
t.SetTextColor( 46 )
t.DrawLatex( 0.20, 0.30, '#sqrt{s}, GeV' )
t.DrawLatex( 0.22, 0.26, '63' )
t.DrawLatex( 0.22, 0.22, '200' )
t.DrawLatex( 0.22, 0.18, '500' )
t.SetTextSize( 0.05 )
t.SetTextColor( 1 )
t.DrawLatex( 0.88, 0.06, 'z' )
saves[ 't3' ] = t # note the label that is used!
c1.Modified()
c1.Update()
if __name__ == '__main__': # run if loaded as script
zdemo()
|
lgpl-2.1
|
lekum/ansible-modules-extras
|
monitoring/logicmonitor_facts.py
|
3
|
22050
|
#!/usr/bin/python
"""LogicMonitor Ansible module for managing Collectors, Hosts and Hostgroups
Copyright (C) 2015 LogicMonitor
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA"""
import socket
import types
import urllib
HAS_LIB_JSON = True
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if (
not isinstance(json.loads, types.FunctionType) or
not isinstance(json.dumps, types.FunctionType)
):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print(
'\n{"msg": "Error: ansible requires the stdlib json or ' +
'simplejson module, neither was found!", "failed": true}'
)
HAS_LIB_JSON = False
except SyntaxError:
print(
'\n{"msg": "SyntaxError: probably due to installed simplejson ' +
'being for a different python version", "failed": true}'
)
HAS_LIB_JSON = False
DOCUMENTATION = '''
---
module: logicmonitor_facts
short_description: Collect facts about LogicMonitor objects
description:
- LogicMonitor is a hosted, full-stack, infrastructure monitoring platform.
- This module collects facts about hosts abd host groups within your LogicMonitor account.
version_added: "2.2"
author: [Ethan Culler-Mayeno (@ethanculler), Jeff Wozniak (@woz5999)]
notes:
- You must have an existing LogicMonitor account for this module to function.
requirements: ["An existing LogicMonitor account", "Linux"]
options:
target:
description:
- The LogicMonitor object you wish to manage.
required: true
default: null
choices: ['host', 'hostgroup']
company:
description:
- The LogicMonitor account company name. If you would log in to your account at "superheroes.logicmonitor.com" you would use "superheroes".
required: true
default: null
user:
description:
- A LogicMonitor user name. The module will authenticate and perform actions on behalf of this user.
required: true
default: null
password:
description:
- The password for the chosen LogicMonitor User.
- If an md5 hash is used, the digest flag must be set to true.
required: true
default: null
collector:
description:
- The fully qualified domain name of a collector in your LogicMonitor account.
- This is optional for querying a LogicMonitor host when a displayname is specified.
- This is required for querying a LogicMonitor host when a displayname is not specified.
required: false
default: null
hostname:
description:
- The hostname of a host in your LogicMonitor account, or the desired hostname of a device to add into monitoring.
- Required for managing hosts (target=host).
required: false
default: 'hostname -f'
displayname:
description:
- The display name of a host in your LogicMonitor account or the desired display name of a device to add into monitoring.
required: false
default: 'hostname -f'
fullpath:
description:
- The fullpath of the hostgroup object you would like to manage.
- Recommend running on a single ansible host.
- Required for management of LogicMonitor host groups (target=hostgroup).
required: false
default: null
...
'''
EXAMPLES = '''
#example of querying a list of hosts
```
---
- hosts: hosts
user: root
vars:
company: 'yourcompany'
user: 'Luigi'
password: 'ImaLuigi,number1!'
tasks:
- name: query a list of hosts
# All tasks should use local_action
local_action:
logicmonitor_facts:
target: host
company: '{{ company }}'
user: '{{ user }}'
password: '{{ password }}'
```
#example of querying a hostgroup
```
---
- hosts: somemachine.superheroes.com
user: root
vars:
company: 'yourcompany'
user: 'mario'
password: 'itsame.Mario!'
tasks:
- name: query a host group
# All tasks should use local_action
local_action:
logicmonitor_facts:
target: hostgroup
fullpath: '/servers/production'
company: '{{ company }}'
user: '{{ user }}'
password: '{{ password }}'
```
'''
RETURN = '''
---
ansible_facts:
description: LogicMonitor properties set for the specified object
returned: success
type: list of dicts containing name/value pairs
example: >
{
"name": "dc",
"value": "1"
},
{
"name": "type",
"value": "prod"
},
{
"name": "system.categories",
"value": ""
},
{
"name": "snmp.community",
"value": "********"
}
...
'''
class LogicMonitor(object):
def __init__(self, module, **params):
self.__version__ = "1.0-python"
self.module = module
self.module.debug("Instantiating LogicMonitor object")
self.check_mode = False
self.company = params["company"]
self.user = params["user"]
self.password = params["password"]
self.fqdn = socket.getfqdn()
self.lm_url = "logicmonitor.com/santaba"
self.__version__ = self.__version__ + "-ansible-module"
def rpc(self, action, params):
"""Make a call to the LogicMonitor RPC library
and return the response"""
self.module.debug("Running LogicMonitor.rpc")
param_str = urllib.urlencode(params)
creds = urllib.urlencode(
{"c": self.company,
"u": self.user,
"p": self.password})
if param_str:
param_str = param_str + "&"
param_str = param_str + creds
try:
url = ("https://" + self.company + "." + self.lm_url +
"/rpc/" + action + "?" + param_str)
# Set custom LogicMonitor header with version
headers = {"X-LM-User-Agent": self.__version__}
# Set headers
f = open_url(url, headers=headers)
raw = f.read()
resp = json.loads(raw)
if resp["status"] == 403:
self.module.debug("Authentication failed.")
self.fail(msg="Error: " + resp["errmsg"])
else:
return raw
except IOError:
ioe = get_exception()
self.fail(msg="Error: Exception making RPC call to " +
"https://" + self.company + "." + self.lm_url +
"/rpc/" + action + "\nException" + str(ioe))
def get_collectors(self):
"""Returns a JSON object containing a list of
LogicMonitor collectors"""
self.module.debug("Running LogicMonitor.get_collectors...")
self.module.debug("Making RPC call to 'getAgents'")
resp = self.rpc("getAgents", {})
resp_json = json.loads(resp)
if resp_json["status"] is 200:
self.module.debug("RPC call succeeded")
return resp_json["data"]
else:
self.fail(msg=resp)
def get_host_by_hostname(self, hostname, collector):
"""Returns a host object for the host matching the
specified hostname"""
self.module.debug("Running LogicMonitor.get_host_by_hostname...")
self.module.debug("Looking for hostname " + hostname)
self.module.debug("Making RPC call to 'getHosts'")
hostlist_json = json.loads(self.rpc("getHosts", {"hostGroupId": 1}))
if collector:
if hostlist_json["status"] == 200:
self.module.debug("RPC call succeeded")
hosts = hostlist_json["data"]["hosts"]
self.module.debug(
"Looking for host matching: hostname " + hostname +
" and collector " + str(collector["id"]))
for host in hosts:
if (host["hostName"] == hostname and
host["agentId"] == collector["id"]):
self.module.debug("Host match found")
return host
self.module.debug("No host match found")
return None
else:
self.module.debug("RPC call failed")
self.module.debug(hostlist_json)
else:
self.module.debug("No collector specified")
return None
def get_host_by_displayname(self, displayname):
"""Returns a host object for the host matching the
specified display name"""
self.module.debug("Running LogicMonitor.get_host_by_displayname...")
self.module.debug("Looking for displayname " + displayname)
self.module.debug("Making RPC call to 'getHost'")
host_json = (json.loads(self.rpc("getHost",
{"displayName": displayname})))
if host_json["status"] == 200:
self.module.debug("RPC call succeeded")
return host_json["data"]
else:
self.module.debug("RPC call failed")
self.module.debug(host_json)
return None
def get_collector_by_description(self, description):
"""Returns a JSON collector object for the collector
matching the specified FQDN (description)"""
self.module.debug(
"Running LogicMonitor.get_collector_by_description..."
)
collector_list = self.get_collectors()
if collector_list is not None:
self.module.debug("Looking for collector with description " +
description)
for collector in collector_list:
if collector["description"] == description:
self.module.debug("Collector match found")
return collector
self.module.debug("No collector match found")
return None
def get_group(self, fullpath):
"""Returns a JSON group object for the group matching the
specified path"""
self.module.debug("Running LogicMonitor.get_group...")
self.module.debug("Making RPC call to getHostGroups")
resp = json.loads(self.rpc("getHostGroups", {}))
if resp["status"] == 200:
self.module.debug("RPC called succeeded")
groups = resp["data"]
self.module.debug("Looking for group matching " + fullpath)
for group in groups:
if group["fullPath"] == fullpath.lstrip('/'):
self.module.debug("Group match found")
return group
self.module.debug("No group match found")
return None
else:
self.module.debug("RPC call failed")
self.module.debug(resp)
return None
def create_group(self, fullpath):
"""Recursively create a path of host groups.
Returns the id of the newly created hostgroup"""
self.module.debug("Running LogicMonitor.create_group...")
res = self.get_group(fullpath)
if res:
self.module.debug("Group " + fullpath + " exists.")
return res["id"]
if fullpath == "/":
self.module.debug("Specified group is root. Doing nothing.")
return 1
else:
self.module.debug("Creating group named " + fullpath)
self.module.debug("System changed")
self.change = True
if self.check_mode:
self.exit(changed=True)
parentpath, name = fullpath.rsplit('/', 1)
parentgroup = self.get_group(parentpath)
parentid = 1
if parentpath == "":
parentid = 1
elif parentgroup:
parentid = parentgroup["id"]
else:
parentid = self.create_group(parentpath)
h = None
# Determine if we're creating a group from host or hostgroup class
if hasattr(self, '_build_host_group_hash'):
h = self._build_host_group_hash(
fullpath,
self.description,
self.properties,
self.alertenable)
h["name"] = name
h["parentId"] = parentid
else:
h = {"name": name,
"parentId": parentid,
"alertEnable": True,
"description": ""}
self.module.debug("Making RPC call to 'addHostGroup'")
resp = json.loads(
self.rpc("addHostGroup", h))
if resp["status"] == 200:
self.module.debug("RPC call succeeded")
return resp["data"]["id"]
elif resp["errmsg"] == "The record already exists":
self.module.debug("The hostgroup already exists")
group = self.get_group(fullpath)
return group["id"]
else:
self.module.debug("RPC call failed")
self.fail(
msg="Error: unable to create new hostgroup \"" + name +
"\".\n" + resp["errmsg"])
def fail(self, msg):
self.module.fail_json(msg=msg, changed=self.change)
def exit(self, changed):
self.module.debug("Changed: " + changed)
self.module.exit_json(changed=changed)
def output_info(self, info):
self.module.debug("Registering properties as Ansible facts")
self.module.exit_json(changed=False, ansible_facts=info)
class Host(LogicMonitor):
def __init__(self, params, module=None):
"""Initializor for the LogicMonitor host object"""
self.change = False
self.params = params
self.collector = None
LogicMonitor.__init__(self, module, **self.params)
self.module.debug("Instantiating Host object")
if self.params["hostname"]:
self.module.debug("Hostname is " + self.params["hostname"])
self.hostname = self.params['hostname']
else:
self.module.debug("No hostname specified. Using " + self.fqdn)
self.hostname = self.fqdn
if self.params["displayname"]:
self.module.debug("Display name is " + self.params["displayname"])
self.displayname = self.params['displayname']
else:
self.module.debug("No display name specified. Using " + self.fqdn)
self.displayname = self.fqdn
# Attempt to host information via display name of host name
self.module.debug("Attempting to find host by displayname " +
self.displayname)
info = self.get_host_by_displayname(self.displayname)
if info is not None:
self.module.debug("Host found by displayname")
# Used the host information to grab the collector description
# if not provided
if (not hasattr(self.params, "collector") and
"agentDescription" in info):
self.module.debug("Setting collector from host response. " +
"Collector " + info["agentDescription"])
self.params["collector"] = info["agentDescription"]
else:
self.module.debug("Host not found by displayname")
# At this point, a valid collector description is required for success
# Check that the description exists or fail
if self.params["collector"]:
self.module.debug("Collector specified is " +
self.params["collector"])
self.collector = (self.get_collector_by_description(
self.params["collector"]))
else:
self.fail(msg="No collector specified.")
# If the host wasn't found via displayname, attempt by hostname
if info is None:
self.module.debug("Attempting to find host by hostname " +
self.hostname)
info = self.get_host_by_hostname(self.hostname, self.collector)
self.info = info
def get_properties(self):
"""Returns a hash of the properties
associated with this LogicMonitor host"""
self.module.debug("Running Host.get_properties...")
if self.info:
self.module.debug("Making RPC call to 'getHostProperties'")
properties_json = (json.loads(self.rpc("getHostProperties",
{'hostId': self.info["id"],
"filterSystemProperties": True})))
if properties_json["status"] == 200:
self.module.debug("RPC call succeeded")
return properties_json["data"]
else:
self.module.debug("Error: there was an issue retrieving the " +
"host properties")
self.module.debug(properties_json["errmsg"])
self.fail(msg=properties_json["status"])
else:
self.module.debug(
"Unable to find LogicMonitor host which matches " +
self.displayname + " (" + self.hostname + ")"
)
return None
def site_facts(self):
"""Output current properties information for the Host"""
self.module.debug("Running Host.site_facts...")
if self.info:
self.module.debug("Host exists")
props = self.get_properties()
self.output_info(props)
else:
self.fail(msg="Error: Host doesn't exit.")
class Hostgroup(LogicMonitor):
def __init__(self, params, module=None):
"""Initializor for the LogicMonitor host object"""
self.change = False
self.params = params
LogicMonitor.__init__(self, module, **self.params)
self.module.debug("Instantiating Hostgroup object")
self.fullpath = self.params["fullpath"]
self.info = self.get_group(self.fullpath)
def get_properties(self, final=False):
"""Returns a hash of the properties
associated with this LogicMonitor host"""
self.module.debug("Running Hostgroup.get_properties...")
if self.info:
self.module.debug("Group found")
self.module.debug("Making RPC call to 'getHostGroupProperties'")
properties_json = json.loads(self.rpc(
"getHostGroupProperties",
{'hostGroupId': self.info["id"],
"finalResult": final}))
if properties_json["status"] == 200:
self.module.debug("RPC call succeeded")
return properties_json["data"]
else:
self.module.debug("RPC call failed")
self.fail(msg=properties_json["status"])
else:
self.module.debug("Group not found")
return None
def site_facts(self):
"""Output current properties information for the Hostgroup"""
self.module.debug("Running Hostgroup.site_facts...")
if self.info:
self.module.debug("Group exists")
props = self.get_properties(True)
self.output_info(props)
else:
self.fail(msg="Error: Group doesn't exit.")
def selector(module):
"""Figure out which object and which actions
to take given the right parameters"""
if module.params["target"] == "host":
target = Host(module.params, module)
target.site_facts()
elif module.params["target"] == "hostgroup":
# Validate target specific required parameters
if module.params["fullpath"] is not None:
target = Hostgroup(module.params, module)
target.site_facts()
else:
module.fail_json(
msg="Parameter 'fullpath' required for target 'hostgroup'")
else:
module.fail_json(
msg="Error: Unexpected target \"" + module.params["target"] +
"\" was specified.")
def main():
TARGETS = [
"host",
"hostgroup"]
module = AnsibleModule(
argument_spec=dict(
target=dict(required=True, default=None, choices=TARGETS),
company=dict(required=True, default=None),
user=dict(required=True, default=None),
password=dict(required=True, default=None, no_log=True),
collector=dict(require=False, default=None),
hostname=dict(required=False, default=None),
displayname=dict(required=False, default=None),
fullpath=dict(required=False, default=None)
),
supports_check_mode=True
)
if HAS_LIB_JSON is not True:
module.fail_json(msg="Unable to load JSON library")
selector(module)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.urls import open_url
if __name__ == "__main__":
main()
|
gpl-3.0
|
mlihan/msgapi
|
linebot/models/__init__.py
|
3
|
1810
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models package."""
from .base import ( # noqa
Base,
)
from .error import ( # noqa
Error,
ErrorDetail,
)
from .events import ( # noqa
Event,
MessageEvent,
FollowEvent,
UnfollowEvent,
JoinEvent,
LeaveEvent,
PostbackEvent,
BeaconEvent,
Postback,
Beacon,
)
from .imagemap import ( # noqa
ImagemapSendMessage,
BaseSize,
ImagemapAction,
URIImagemapAction,
MessageImagemapAction,
ImagemapArea,
)
from .messages import ( # noqa
Message,
TextMessage,
ImageMessage,
VideoMessage,
AudioMessage,
LocationMessage,
StickerMessage,
)
from .responses import ( # noqa
Profile,
)
from .send_messages import ( # noqa
SendMessage,
TextSendMessage,
ImageSendMessage,
VideoSendMessage,
AudioSendMessage,
LocationSendMessage,
StickerSendMessage,
)
from .sources import ( # noqa
Source,
SourceUser,
SourceGroup,
SourceRoom,
)
from .template import ( # noqa
TemplateSendMessage,
Template,
ButtonsTemplate,
ConfirmTemplate,
CarouselTemplate,
CarouselColumn,
TemplateAction,
PostbackTemplateAction,
MessageTemplateAction,
URITemplateAction,
)
|
apache-2.0
|
mark-me/Pi-Jukebox
|
venv/Lib/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/pep517/envbuild.py
|
22
|
5763
|
"""Build wheels/sdists by installing build deps to a temporary environment.
"""
import os
import logging
from pip._vendor import pytoml
import shutil
from subprocess import check_call
import sys
from sysconfig import get_paths
from tempfile import mkdtemp
from .wrappers import Pep517HookCaller
log = logging.getLogger(__name__)
def _load_pyproject(source_dir):
with open(os.path.join(source_dir, 'pyproject.toml')) as f:
pyproject_data = pytoml.load(f)
buildsys = pyproject_data['build-system']
return buildsys['requires'], buildsys['build-backend']
class BuildEnvironment(object):
"""Context manager to install build deps in a simple temporary environment
Based on code I wrote for pip, which is MIT licensed.
"""
# Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
path = None
def __init__(self, cleanup=True):
self._cleanup = cleanup
def __enter__(self):
self.path = mkdtemp(prefix='pep517-build-env-')
log.info('Temporary build environment: %s', self.path)
self.save_path = os.environ.get('PATH', None)
self.save_pythonpath = os.environ.get('PYTHONPATH', None)
install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix'
install_dirs = get_paths(install_scheme, vars={
'base': self.path,
'platbase': self.path,
})
scripts = install_dirs['scripts']
if self.save_path:
os.environ['PATH'] = scripts + os.pathsep + self.save_path
else:
os.environ['PATH'] = scripts + os.pathsep + os.defpath
if install_dirs['purelib'] == install_dirs['platlib']:
lib_dirs = install_dirs['purelib']
else:
lib_dirs = install_dirs['purelib'] + os.pathsep + \
install_dirs['platlib']
if self.save_pythonpath:
os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \
self.save_pythonpath
else:
os.environ['PYTHONPATH'] = lib_dirs
return self
def pip_install(self, reqs):
"""Install dependencies into this env by calling pip in a subprocess"""
if not reqs:
return
log.info('Calling pip to install %s', reqs)
check_call([
sys.executable, '-m', 'pip', 'install', '--ignore-installed',
'--prefix', self.path] + list(reqs))
def __exit__(self, exc_type, exc_val, exc_tb):
needs_cleanup = (
self._cleanup and
self.path is not None and
os.path.isdir(self.path)
)
if needs_cleanup:
shutil.rmtree(self.path)
if self.save_path is None:
os.environ.pop('PATH', None)
else:
os.environ['PATH'] = self.save_path
if self.save_pythonpath is None:
os.environ.pop('PYTHONPATH', None)
else:
os.environ['PYTHONPATH'] = self.save_pythonpath
def build_wheel(source_dir, wheel_dir, config_settings=None):
"""Build a wheel from a source directory using PEP 517 hooks.
:param str source_dir: Source directory containing pyproject.toml
:param str wheel_dir: Target directory to create wheel in
:param dict config_settings: Options to pass to build backend
This is a blocking function which will run pip in a subprocess to install
build requirements.
"""
if config_settings is None:
config_settings = {}
requires, backend = _load_pyproject(source_dir)
hooks = Pep517HookCaller(source_dir, backend)
with BuildEnvironment() as env:
env.pip_install(requires)
reqs = hooks.get_requires_for_build_wheel(config_settings)
env.pip_install(reqs)
return hooks.build_wheel(wheel_dir, config_settings)
def build_sdist(source_dir, sdist_dir, config_settings=None):
"""Build an sdist from a source directory using PEP 517 hooks.
:param str source_dir: Source directory containing pyproject.toml
:param str sdist_dir: Target directory to place sdist in
:param dict config_settings: Options to pass to build backend
This is a blocking function which will run pip in a subprocess to install
build requirements.
"""
if config_settings is None:
config_settings = {}
requires, backend = _load_pyproject(source_dir)
hooks = Pep517HookCaller(source_dir, backend)
with BuildEnvironment() as env:
env.pip_install(requires)
reqs = hooks.get_requires_for_build_sdist(config_settings)
env.pip_install(reqs)
return hooks.build_sdist(sdist_dir, config_settings)
|
agpl-3.0
|
liuqr/edx-xiaodun
|
common/lib/xmodule/xmodule/progress.py
|
127
|
4896
|
'''
Progress class for modules. Represents where a student is in a module.
Useful things to know:
- Use Progress.to_js_status_str() to convert a progress into a simple
status string to pass to js.
- Use Progress.to_js_detail_str() to convert a progress into a more detailed
string to pass to js.
In particular, these functions have a canonical handing of None.
For most subclassing needs, you should only need to reimplement
frac() and __str__().
'''
import numbers
class Progress(object):
'''Represents a progress of a/b (a out of b done)
a and b must be numeric, but not necessarily integer, with
0 <= a <= b and b > 0.
Progress can only represent Progress for modules where that makes sense. Other
modules (e.g. html) should return None from get_progress().
TODO: add tag for module type? Would allow for smarter merging.
'''
def __init__(self, a, b):
'''Construct a Progress object. a and b must be numbers, and must have
0 <= a <= b and b > 0
'''
# Want to do all checking at construction time, so explicitly check types
if not (isinstance(a, numbers.Number) and
isinstance(b, numbers.Number)):
raise TypeError('a and b must be numbers. Passed {0}/{1}'.format(a, b))
if a > b:
a = b
if a < 0:
a = 0
if b <= 0:
raise ValueError('fraction a/b = {0}/{1} must have b > 0'.format(a, b))
self._a = a
self._b = b
def frac(self):
''' Return tuple (a,b) representing progress of a/b'''
return (self._a, self._b)
def percent(self):
''' Returns a percentage progress as a float between 0 and 100.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return 100.0 * a / b
def started(self):
''' Returns True if fractional progress is greater than 0.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
return self.frac()[0] > 0
def inprogress(self):
''' Returns True if fractional progress is strictly between 0 and 1.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a > 0 and a < b
def done(self):
''' Return True if this represents done.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a == b
def ternary_str(self):
''' Return a string version of this progress: either
"none", "in_progress", or "done".
subclassing note: implemented in terms of frac()
'''
(a, b) = self.frac()
if a == 0:
return "none"
if a < b:
return "in_progress"
return "done"
def __eq__(self, other):
''' Two Progress objects are equal if they have identical values.
Implemented in terms of frac()'''
if not isinstance(other, Progress):
return False
(a, b) = self.frac()
(a2, b2) = other.frac()
return a == a2 and b == b2
def __ne__(self, other):
''' The opposite of equal'''
return not self.__eq__(other)
def __str__(self):
''' Return a string representation of this string.
subclassing note: implemented in terms of frac().
'''
(a, b) = self.frac()
return "{0}/{1}".format(a, b)
@staticmethod
def add_counts(a, b):
'''Add two progress indicators, assuming that each represents items done:
(a / b) + (c / d) = (a + c) / (b + d).
If either is None, returns the other.
'''
if a is None:
return b
if b is None:
return a
# get numerators + denominators
(n, d) = a.frac()
(n2, d2) = b.frac()
return Progress(n + n2, d + d2)
@staticmethod
def to_js_status_str(progress):
'''
Return the "status string" version of the passed Progress
object that should be passed to js. Use this function when
sending Progress objects to js to limit dependencies.
'''
if progress is None:
return "0"
return progress.ternary_str()
@staticmethod
def to_js_detail_str(progress):
'''
Return the "detail string" version of the passed Progress
object that should be passed to js. Use this function when
passing Progress objects to js to limit dependencies.
'''
if progress is None:
return "0"
return str(progress)
|
agpl-3.0
|
azatoth/scons
|
src/engine/SCons/Tool/intelc.py
|
2
|
21046
|
"""SCons.Tool.icl
Tool-specific initialization for the Intel C/C++ compiler.
Supports Linux and Windows compilers, v7 and up.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import math, sys, os.path, glob, string, re
is_windows = sys.platform == 'win32'
is_win64 = is_windows and (os.environ['PROCESSOR_ARCHITECTURE'] == 'AMD64' or
('PROCESSOR_ARCHITEW6432' in os.environ and
os.environ['PROCESSOR_ARCHITEW6432'] == 'AMD64'))
is_linux = sys.platform == 'linux2'
is_mac = sys.platform == 'darwin'
if is_windows:
import SCons.Tool.msvc
elif is_linux:
import SCons.Tool.gcc
elif is_mac:
import SCons.Tool.gcc
import SCons.Util
import SCons.Warnings
# Exceptions for this tool
class IntelCError(SCons.Errors.InternalError):
pass
class MissingRegistryError(IntelCError): # missing registry entry
pass
class MissingDirError(IntelCError): # dir not found
pass
class NoRegistryModuleError(IntelCError): # can't read registry at all
pass
def uniquify(s):
"""Return a sequence containing only one copy of each unique element from input sequence s.
Does not preserve order.
Input sequence must be hashable (i.e. must be usable as a dictionary key)."""
u = {}
for x in s:
u[x] = 1
return list(u.keys())
def linux_ver_normalize(vstr):
"""Normalize a Linux compiler version number.
Intel changed from "80" to "9.0" in 2005, so we assume if the number
is greater than 60 it's an old-style number and otherwise new-style.
Always returns an old-style float like 80 or 90 for compatibility with Windows.
Shades of Y2K!"""
# Check for version number like 9.1.026: return 91.026
m = re.match(r'([0-9]+)\.([0-9]+)\.([0-9]+)', vstr)
if m:
vmaj,vmin,build = m.groups()
return float(vmaj) * 10. + float(vmin) + float(build) / 1000.;
else:
f = float(vstr)
if is_windows:
return f
else:
if f < 60: return f * 10.0
else: return f
def check_abi(abi):
"""Check for valid ABI (application binary interface) name,
and map into canonical one"""
if not abi:
return None
abi = abi.lower()
# valid_abis maps input name to canonical name
if is_windows:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'ia64' : 'ia64',
'em64t' : 'em64t',
'amd64' : 'em64t'}
if is_linux:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'x86_64' : 'x86_64',
'em64t' : 'x86_64',
'amd64' : 'x86_64'}
if is_mac:
valid_abis = {'ia32' : 'ia32',
'x86' : 'ia32',
'x86_64' : 'x86_64',
'em64t' : 'x86_64'}
try:
abi = valid_abis[abi]
except KeyError:
raise SCons.Errors.UserError("Intel compiler: Invalid ABI %s, valid values are %s"% \
(abi, list(valid_abis.keys())))
return abi
def vercmp(a, b):
"""Compare strings as floats,
but Intel changed Linux naming convention at 9.0"""
return cmp(linux_ver_normalize(b), linux_ver_normalize(a))
def get_version_from_list(v, vlist):
"""See if we can match v (string) in vlist (list of strings)
Linux has to match in a fuzzy way."""
if is_windows:
# Simple case, just find it in the list
if v in vlist: return v
else: return None
else:
# Fuzzy match: normalize version number first, but still return
# original non-normalized form.
fuzz = 0.001
for vi in vlist:
if math.fabs(linux_ver_normalize(vi) - linux_ver_normalize(v)) < fuzz:
return vi
# Not found
return None
def get_intel_registry_value(valuename, version=None, abi=None):
"""
Return a value from the Intel compiler registry tree. (Windows only)
"""
# Open the key:
if is_win64:
K = 'Software\\Wow6432Node\\Intel\\Compilers\\C++\\' + version + '\\'+abi.upper()
else:
K = 'Software\\Intel\\Compilers\\C++\\' + version + '\\'+abi.upper()
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
except SCons.Util.RegError:
raise MissingRegistryError("%s was not found in the registry, for Intel compiler version %s, abi='%s'"%(K, version,abi))
# Get the value:
try:
v = SCons.Util.RegQueryValueEx(k, valuename)[0]
return v # or v.encode('iso-8859-1', 'replace') to remove unicode?
except SCons.Util.RegError:
raise MissingRegistryError("%s\\%s was not found in the registry."%(K, valuename))
def get_all_compiler_versions():
"""Returns a sorted list of strings, like "70" or "80" or "9.0"
with most recent compiler version first.
"""
versions=[]
if is_windows:
if is_win64:
keyname = 'Software\\WoW6432Node\\Intel\\Compilers\\C++'
else:
keyname = 'Software\\Intel\\Compilers\\C++'
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
keyname)
except WindowsError:
return []
i = 0
versions = []
try:
while i < 100:
subkey = SCons.Util.RegEnumKey(k, i) # raises EnvironmentError
# Check that this refers to an existing dir.
# This is not 100% perfect but should catch common
# installation issues like when the compiler was installed
# and then the install directory deleted or moved (rather
# than uninstalling properly), so the registry values
# are still there.
ok = False
for try_abi in ('IA32', 'IA32e', 'IA64', 'EM64T'):
try:
d = get_intel_registry_value('ProductDir', subkey, try_abi)
except MissingRegistryError:
continue # not found in reg, keep going
if os.path.exists(d): ok = True
if ok:
versions.append(subkey)
else:
try:
# Registry points to nonexistent dir. Ignore this
# version.
value = get_intel_registry_value('ProductDir', subkey, 'IA32')
except MissingRegistryError, e:
# Registry key is left dangling (potentially
# after uninstalling).
print \
"scons: *** Ignoring the registry key for the Intel compiler version %s.\n" \
"scons: *** It seems that the compiler was uninstalled and that the registry\n" \
"scons: *** was not cleaned up properly.\n" % subkey
else:
print "scons: *** Ignoring "+str(value)
i = i + 1
except EnvironmentError:
# no more subkeys
pass
elif is_linux:
for d in glob.glob('/opt/intel_cc_*'):
# Typical dir here is /opt/intel_cc_80.
m = re.search(r'cc_(.*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/cc*/*'):
# Typical dir here is /opt/intel/cc/9.0 for IA32,
# /opt/intel/cce/9.0 for EMT64 (AMD64)
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
for d in glob.glob('/opt/intel/Compiler/*'):
# Typical dir here is /opt/intel/Compiler/11.1
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
elif is_mac:
for d in glob.glob('/opt/intel/cc*/*'):
# Typical dir here is /opt/intel/cc/9.0 for IA32,
# /opt/intel/cce/9.0 for EMT64 (AMD64)
m = re.search(r'([0-9][0-9.]*)$', d)
if m:
versions.append(m.group(1))
def keyfunc(str):
"""Given a dot-separated version string, return a tuple of ints representing it."""
return [int(x) for x in str.split('.')]
# split into ints, sort, then remove dups
return sorted(uniquify(versions), key=keyfunc, reverse=True)
def get_intel_compiler_top(version, abi):
"""
Return the main path to the top-level dir of the Intel compiler,
using the given version.
The compiler will be in <top>/bin/icl.exe (icc on linux),
the include dir is <top>/include, etc.
"""
if is_windows:
if not SCons.Util.can_read_reg:
raise NoRegistryModuleError("No Windows registry module was found")
top = get_intel_registry_value('ProductDir', version, abi)
# pre-11, icl was in Bin. 11 and later, it's in Bin/<abi> apparently.
if not os.path.exists(os.path.join(top, "Bin", "icl.exe")) \
and not os.path.exists(os.path.join(top, "Bin", abi, "icl.exe")):
raise MissingDirError("Can't find Intel compiler in %s"%(top))
elif is_mac or is_linux:
def find_in_2008style_dir(version):
# first dir is new (>=9.0) style, second is old (8.0) style.
dirs=('/opt/intel/cc/%s', '/opt/intel_cc_%s')
if abi == 'x86_64':
dirs=('/opt/intel/cce/%s',) # 'e' stands for 'em64t', aka x86_64 aka amd64
top=None
for d in dirs:
if os.path.exists(os.path.join(d%version, "bin", "icc")):
top = d%version
break
return top
def find_in_2010style_dir(version):
dirs=('/opt/intel/Compiler/%s/*'%version)
# typically /opt/intel/Compiler/11.1/064 (then bin/intel64/icc)
dirs=glob.glob(dirs)
# find highest sub-version number by reverse sorting and picking first existing one.
dirs.sort()
dirs.reverse()
top=None
for d in dirs:
if (os.path.exists(os.path.join(d, "bin", "ia32", "icc")) or
os.path.exists(os.path.join(d, "bin", "intel64", "icc"))):
top = d
break
return top
top = find_in_2010style_dir(version) or find_in_2008style_dir(version)
print "INTELC: top=",top
if not top:
raise MissingDirError("Can't find version %s Intel compiler in %s (abi='%s')"%(version,top, abi))
return top
def generate(env, version=None, abi=None, topdir=None, verbose=0):
"""Add Builders and construction variables for Intel C/C++ compiler
to an Environment.
args:
version: (string) compiler version to use, like "80"
abi: (string) 'win32' or whatever Itanium version wants
topdir: (string) compiler top dir, like
"c:\Program Files\Intel\Compiler70"
If topdir is used, version and abi are ignored.
verbose: (int) if >0, prints compiler version used.
"""
if not (is_mac or is_linux or is_windows):
# can't handle this platform
return
if is_windows:
SCons.Tool.msvc.generate(env)
elif is_linux:
SCons.Tool.gcc.generate(env)
elif is_mac:
SCons.Tool.gcc.generate(env)
# if version is unspecified, use latest
vlist = get_all_compiler_versions()
if not version:
if vlist:
version = vlist[0]
else:
# User may have specified '90' but we need to get actual dirname '9.0'.
# get_version_from_list does that mapping.
v = get_version_from_list(version, vlist)
if not v:
raise SCons.Errors.UserError("Invalid Intel compiler version %s: "%version + \
"installed versions are %s"%(', '.join(vlist)))
version = v
# if abi is unspecified, use ia32
# alternatives are ia64 for Itanium, or amd64 or em64t or x86_64 (all synonyms here)
abi = check_abi(abi)
if abi is None:
if is_mac or is_linux:
# Check if we are on 64-bit linux, default to 64 then.
uname_m = os.uname()[4]
if uname_m == 'x86_64':
abi = 'x86_64'
else:
abi = 'ia32'
else:
if is_win64:
abi = 'em64t'
else:
abi = 'ia32'
if version and not topdir:
try:
topdir = get_intel_compiler_top(version, abi)
except (SCons.Util.RegError, IntelCError):
topdir = None
if not topdir:
# Normally this is an error, but it might not be if the compiler is
# on $PATH and the user is importing their env.
class ICLTopDirWarning(SCons.Warnings.Warning):
pass
if (is_mac or is_linux) and not env.Detect('icc') or \
is_windows and not env.Detect('icl'):
SCons.Warnings.enableWarningClass(ICLTopDirWarning)
SCons.Warnings.warn(ICLTopDirWarning,
"Failed to find Intel compiler for version='%s', abi='%s'"%
(str(version), str(abi)))
else:
# should be cleaned up to say what this other version is
# since in this case we have some other Intel compiler installed
SCons.Warnings.enableWarningClass(ICLTopDirWarning)
SCons.Warnings.warn(ICLTopDirWarning,
"Can't find Intel compiler top dir for version='%s', abi='%s'"%
(str(version), str(abi)))
if topdir:
archdir={'x86_64': 'intel64',
'amd64' : 'intel64',
'em64t' : 'intel64',
'x86' : 'ia32',
'i386' : 'ia32',
'ia32' : 'ia32'
}[abi] # for v11 and greater
if os.path.exists(os.path.join(topdir, 'bin', archdir)):
bindir="bin/%s"%archdir
libdir="lib/%s"%archdir
else:
bindir="bin"
libdir="lib"
if verbose:
print "Intel C compiler: using version %s (%g), abi %s, in '%s/%s'"%\
(repr(version), linux_ver_normalize(version),abi,topdir,bindir)
if is_linux:
# Show the actual compiler version by running the compiler.
os.system('%s/%s/icc --version'%(topdir,bindir))
if is_mac:
# Show the actual compiler version by running the compiler.
os.system('%s/%s/icc --version'%(topdir,bindir))
env['INTEL_C_COMPILER_TOP'] = topdir
if is_linux:
paths={'INCLUDE' : 'include',
'LIB' : libdir,
'PATH' : bindir,
'LD_LIBRARY_PATH' : libdir}
for p in paths.keys():
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_mac:
paths={'INCLUDE' : 'include',
'LIB' : libdir,
'PATH' : bindir,
'LD_LIBRARY_PATH' : libdir}
for p in paths.keys():
env.PrependENVPath(p, os.path.join(topdir, paths[p]))
if is_windows:
# env key reg valname default subdir of top
paths=(('INCLUDE', 'IncludeDir', 'Include'),
('LIB' , 'LibDir', 'Lib'),
('PATH' , 'BinDir', 'Bin'))
# We are supposed to ignore version if topdir is set, so set
# it to the emptry string if it's not already set.
if version is None:
version = ''
# Each path has a registry entry, use that or default to subdir
for p in paths:
try:
path=get_intel_registry_value(p[1], version, abi)
# These paths may have $(ICInstallDir)
# which needs to be substituted with the topdir.
path=path.replace('$(ICInstallDir)', topdir + os.sep)
except IntelCError:
# Couldn't get it from registry: use default subdir of topdir
env.PrependENVPath(p[0], os.path.join(topdir, p[2]))
else:
env.PrependENVPath(p[0], path.split(os.pathsep))
# print "ICL %s: %s, final=%s"%(p[0], path, str(env['ENV'][p[0]]))
if is_windows:
env['CC'] = 'icl'
env['CXX'] = 'icl'
env['LINK'] = 'xilink'
else:
env['CC'] = 'icc'
env['CXX'] = 'icpc'
# Don't reset LINK here;
# use smart_link which should already be here from link.py.
#env['LINK'] = '$CC'
env['AR'] = 'xiar'
env['LD'] = 'xild' # not used by default
# This is not the exact (detailed) compiler version,
# just the major version as determined above or specified
# by the user. It is a float like 80 or 90, in normalized form for Linux
# (i.e. even for Linux 9.0 compiler, still returns 90 rather than 9.0)
if version:
env['INTEL_C_COMPILER_VERSION']=linux_ver_normalize(version)
if is_windows:
# Look for license file dir
# in system environment, registry, and default location.
envlicdir = os.environ.get("INTEL_LICENSE_FILE", '')
K = ('SOFTWARE\Intel\Licenses')
try:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE, K)
reglicdir = SCons.Util.RegQueryValueEx(k, "w_cpp")[0]
except (AttributeError, SCons.Util.RegError):
reglicdir = ""
defaultlicdir = r'C:\Program Files\Common Files\Intel\Licenses'
licdir = None
for ld in [envlicdir, reglicdir]:
# If the string contains an '@', then assume it's a network
# license (port@system) and good by definition.
if ld and (ld.find('@') != -1 or os.path.exists(ld)):
licdir = ld
break
if not licdir:
licdir = defaultlicdir
if not os.path.exists(licdir):
class ICLLicenseDirWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(ICLLicenseDirWarning)
SCons.Warnings.warn(ICLLicenseDirWarning,
"Intel license dir was not found."
" Tried using the INTEL_LICENSE_FILE environment variable (%s), the registry (%s) and the default path (%s)."
" Using the default path as a last resort."
% (envlicdir, reglicdir, defaultlicdir))
env['ENV']['INTEL_LICENSE_FILE'] = licdir
def exists(env):
if not (is_mac or is_linux or is_windows):
# can't handle this platform
return 0
try:
versions = get_all_compiler_versions()
except (SCons.Util.RegError, IntelCError):
versions = None
detected = versions is not None and len(versions) > 0
if not detected:
# try env.Detect, maybe that will work
if is_windows:
return env.Detect('icl')
elif is_linux:
return env.Detect('icc')
elif is_mac:
return env.Detect('icc')
return detected
# end of file
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
parksandwildlife/wastd
|
occurrence/migrations/0006_auto_20181129_1812.py
|
1
|
1084
|
# Generated by Django 2.0.8 on 2018-11-29 10:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('occurrence', '0005_auto_20181025_1720'),
]
operations = [
migrations.AlterField(
model_name='areaencounter',
name='source',
field=models.PositiveIntegerField(choices=[(0, 'Direct entry'), (1, 'Manual entry from paper datasheet'), (2, 'Digital data capture (ODK)'), (10, 'Threatened Fauna'), (11, 'Threatened Flora'), (12, 'Threatened Communities'), (13, 'Threatened Communities Boundaries'), (14, 'Threatened Communities Buffers'), (15, 'Threatened Communities Sites'), (20, 'Turtle Tagging Database WAMTRAM2'), (21, 'Ningaloo Turtle Program'), (22, 'Broome Turtle Program'), (23, 'Pt Hedland Turtle Program'), (24, 'Gnaraloo Turtle Program'), (25, 'Eco Beach Turtle Program'), (30, 'Cetacean Strandings Database'), (31, 'Pinniped Strandings Database')], default=0, help_text='Where was this record captured initially?', verbose_name='Data Source'),
),
]
|
mit
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/sympy/plotting/pygletplot/plot_window.py
|
96
|
4467
|
from __future__ import print_function, division
from pyglet.gl import *
from managed_window import ManagedWindow
from plot_camera import PlotCamera
from plot_controller import PlotController
from time import clock
class PlotWindow(ManagedWindow):
def __init__(self, plot, **kwargs):
"""
Named Arguments
===============
antialiasing = True
True OR False
ortho = False
True OR False
invert_mouse_zoom = False
True OR False
"""
self.plot = plot
self.camera = None
self._calculating = False
self.antialiasing = kwargs.pop('antialiasing', True)
self.ortho = kwargs.pop('ortho', False)
self.invert_mouse_zoom = kwargs.pop('invert_mouse_zoom', False)
self.linewidth = kwargs.pop('linewidth', 1.5)
self.title = kwargs.setdefault('caption', "SymPy Plot")
self.last_caption_update = 0
self.caption_update_interval = 0.2
self.drawing_first_object = True
super(PlotWindow, self).__init__(**kwargs)
def setup(self):
self.camera = PlotCamera(self, ortho=self.ortho)
self.controller = PlotController(self,
invert_mouse_zoom=self.invert_mouse_zoom)
self.push_handlers(self.controller)
glClearColor(1.0, 1.0, 1.0, 0.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glEnable(GL_LINE_SMOOTH)
glShadeModel(GL_SMOOTH)
glLineWidth(self.linewidth)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
if self.antialiasing:
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)
self.camera.setup_projection()
def on_resize(self, w, h):
super(PlotWindow, self).on_resize(w, h)
if self.camera is not None:
self.camera.setup_projection()
def update(self, dt):
self.controller.update(dt)
def draw(self):
self.plot._render_lock.acquire()
self.camera.apply_transformation()
calc_verts_pos, calc_verts_len = 0, 0
calc_cverts_pos, calc_cverts_len = 0, 0
should_update_caption = (clock() - self.last_caption_update >
self.caption_update_interval)
if len(self.plot._functions.values()) == 0:
self.drawing_first_object = True
for r in self.plot._functions.itervalues():
if self.drawing_first_object:
self.camera.set_rot_preset(r.default_rot_preset)
self.drawing_first_object = False
glPushMatrix()
r._draw()
glPopMatrix()
# might as well do this while we are
# iterating and have the lock rather
# than locking and iterating twice
# per frame:
if should_update_caption:
try:
if r.calculating_verts:
calc_verts_pos += r.calculating_verts_pos
calc_verts_len += r.calculating_verts_len
if r.calculating_cverts:
calc_cverts_pos += r.calculating_cverts_pos
calc_cverts_len += r.calculating_cverts_len
except ValueError:
pass
for r in self.plot._pobjects:
glPushMatrix()
r._draw()
glPopMatrix()
if should_update_caption:
self.update_caption(calc_verts_pos, calc_verts_len,
calc_cverts_pos, calc_cverts_len)
self.last_caption_update = clock()
if self.plot._screenshot:
self.plot._screenshot._execute_saving()
self.plot._render_lock.release()
def update_caption(self, calc_verts_pos, calc_verts_len,
calc_cverts_pos, calc_cverts_len):
caption = self.title
if calc_verts_len or calc_cverts_len:
caption += " (calculating"
if calc_verts_len > 0:
p = (calc_verts_pos / calc_verts_len) * 100
caption += " vertices %i%%" % (p)
if calc_cverts_len > 0:
p = (calc_cverts_pos / calc_cverts_len) * 100
caption += " colors %i%%" % (p)
caption += ")"
if self.caption != caption:
self.set_caption(caption)
|
mit
|
jjmleiro/hue
|
desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/utils.py
|
35
|
2694
|
from django.conf import settings
import os
import sys
import logging
from django_extensions.management.signals import pre_command, post_command
try:
from importlib import import_module
except ImportError:
try:
from django.utils.importlib import import_module
except ImportError:
def import_module(module):
return __import__(module, {}, {}, [''])
def get_project_root():
""" get the project root directory """
django_settings_module = os.environ.get('DJANGO_SETTINGS_MODULE')
if not django_settings_module:
module_str = settings.SETTINGS_MODULE
else:
module_str = django_settings_module.split(".")[0]
mod = import_module(module_str)
return os.path.dirname(os.path.abspath(mod.__file__))
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
def setup_logger(logger, stream, filename=None, fmt=None):
"""Sets up a logger (if no handlers exist) for console output,
and file 'tee' output if desired."""
if len(logger.handlers) < 1:
console = logging.StreamHandler(stream)
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(fmt))
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
if filename:
outfile = logging.FileHandler(filename)
outfile.setLevel(logging.INFO)
outfile.setFormatter(logging.Formatter("%(asctime)s " + (fmt if fmt else '%(message)s')))
logger.addHandler(outfile)
class RedirectHandler(logging.Handler):
"""Redirect logging sent to one logger (name) to another."""
def __init__(self, name, level=logging.DEBUG):
# Contemplate feasibility of copying a destination (allow original handler) and redirecting.
logging.Handler.__init__(self, level)
self.name = name
self.logger = logging.getLogger(name)
def emit(self, record):
self.logger.handle(record)
def signalcommand(func):
"""A decorator for management command handle defs that sends out a pre/post signal."""
def inner(self, *args, **kwargs):
pre_command.send(self.__class__, args=args, kwargs=kwargs)
ret = func(self, *args, **kwargs)
post_command.send(self.__class__, args=args, kwargs=kwargs, outcome=ret)
return ret
return inner
|
apache-2.0
|
obulpathi/vaultcoincpp
|
share/qt/make_spinner.py
|
4415
|
1035
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
mit
|
chhao91/pysal
|
pysal/spreg/tests/test_error_sp.py
|
3
|
14241
|
import unittest
import scipy
import pysal
import numpy as np
from pysal.spreg import error_sp as SP
class TestBaseGMError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.BaseGM_Error(self.y, self.X, self.w.sparse)
betas = np.array([[ 47.94371455], [ 0.70598088], [ -0.55571746], [ 0.37230161]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 27.4739775])
np.testing.assert_allclose(reg.u[0],u,4)
predy = np.array([ 52.9930255])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n,4)
k = 3
np.testing.assert_allclose(reg.k,k,4)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,4)
e = np.array([ 31.89620319])
np.testing.assert_allclose(reg.e_filtered[0],e,4)
predy = np.array([ 52.9930255])
np.testing.assert_allclose(reg.predy[0],predy,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([[ 1.51884943e+02, -5.37622793e+00, -1.86970286e+00], [ -5.37622793e+00, 2.48972661e-01, 5.26564244e-02], [ -1.86970286e+00, 5.26564244e-02, 3.18930650e-02]])
np.testing.assert_allclose(reg.vm,vm,4)
sig2 = 191.73716465732355
np.testing.assert_allclose(reg.sig2,sig2,4)
class TestGMError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.GM_Error(self.y, self.X, self.w)
betas = np.array([[ 47.94371455], [ 0.70598088], [ -0.55571746], [ 0.37230161]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 27.4739775])
np.testing.assert_allclose(reg.u[0],u,4)
predy = np.array([ 52.9930255])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n,4)
k = 3
np.testing.assert_allclose(reg.k,k,4)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,4)
e = np.array([ 31.89620319])
np.testing.assert_allclose(reg.e_filtered[0],e,4)
predy = np.array([ 52.9930255])
np.testing.assert_allclose(reg.predy[0],predy,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([[ 1.51884943e+02, -5.37622793e+00, -1.86970286e+00], [ -5.37622793e+00, 2.48972661e-01, 5.26564244e-02], [ -1.86970286e+00, 5.26564244e-02, 3.18930650e-02]])
np.testing.assert_allclose(reg.vm,vm,4)
sig2 = 191.73716465732355
np.testing.assert_allclose(reg.sig2,sig2,4)
pr2 = 0.3495097406012179
np.testing.assert_allclose(reg.pr2,pr2)
std_err = np.array([ 12.32416094, 0.4989716 , 0.1785863 ])
np.testing.assert_allclose(reg.std_err,std_err,4)
z_stat = np.array([[ 3.89022140e+00, 1.00152805e-04], [ 1.41487186e+00, 1.57106070e-01], [ -3.11175868e+00, 1.85976455e-03]])
np.testing.assert_allclose(reg.z_stat,z_stat,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestBaseGMEndogError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.BaseGM_Endog_Error(self.y, self.X, self.yd, self.q, self.w.sparse)
betas = np.array([[ 55.36095292], [ 0.46411479], [ -0.66883535], [ 0.38989939]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 26.55951566])
np.testing.assert_allclose(reg.u[0],u,4)
e = np.array([ 31.23925425])
np.testing.assert_allclose(reg.e_filtered[0],e,4)
predy = np.array([ 53.9074875])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 3
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,4)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
#std_y
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
#vm
vm = np.array([[ 5.29158422e+02, -1.57833675e+01, -8.38021080e+00],
[ -1.57833675e+01, 5.40235041e-01, 2.31120327e-01],
[ -8.38021080e+00, 2.31120327e-01, 1.44977385e-01]])
np.testing.assert_allclose(reg.vm,vm,4)
sig2 = 192.50022721929574
np.testing.assert_allclose(reg.sig2,sig2,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestGMEndogError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.GM_Endog_Error(self.y, self.X, self.yd, self.q, self.w)
betas = np.array([[ 55.36095292], [ 0.46411479], [ -0.66883535], [ 0.38989939]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 26.55951566])
np.testing.assert_allclose(reg.u[0],u,4)
e = np.array([ 31.23925425])
np.testing.assert_allclose(reg.e_filtered[0],e,4)
predy = np.array([ 53.9074875])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 3
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,4)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([[ 5.29158422e+02, -1.57833675e+01, -8.38021080e+00],
[ -1.57833675e+01, 5.40235041e-01, 2.31120327e-01],
[ -8.38021080e+00, 2.31120327e-01, 1.44977385e-01]])
np.testing.assert_allclose(reg.vm,vm,4)
pr2 = 0.346472557570858
np.testing.assert_allclose(reg.pr2,pr2)
sig2 = 192.50022721929574
np.testing.assert_allclose(reg.sig2,sig2,4)
std_err = np.array([ 23.003401 , 0.73500657, 0.38075777])
np.testing.assert_allclose(reg.std_err,std_err,4)
z_stat = np.array([[ 2.40664208, 0.01609994], [ 0.63144305, 0.52775088], [-1.75659016, 0.07898769]])
np.testing.assert_allclose(reg.z_stat,z_stat,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestBaseGMCombo(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 1, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
reg = SP.BaseGM_Combo(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse)
betas = np.array([[ 57.61123461],[ 0.73441314], [ -0.59459416], [ -0.21762921], [ 0.54732051]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 25.57932637])
np.testing.assert_allclose(reg.u[0],u,4)
e_filtered = np.array([ 31.65374945])
np.testing.assert_allclose(reg.e_filtered[0],e_filtered,4)
predy = np.array([ 54.88767663])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 4
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,4)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([ 5.22438365e+02, 2.38012873e-01, 3.20924172e-02,
2.15753599e-01])
np.testing.assert_allclose(np.diag(reg.vm),vm,4)
sig2 = 181.78650186468832
np.testing.assert_allclose(reg.sig2,sig2,4)
@unittest.skipIf(int(scipy.__version__.split(".")[1]) < 11,
"Maximum Likelihood requires SciPy version 11 or newer.")
class TestGMCombo(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
reg = SP.GM_Combo(self.y, self.X, w=self.w)
e_reduced = np.array([ 28.18617481])
np.testing.assert_allclose(reg.e_pred[0],e_reduced,4)
predy_e = np.array([ 52.28082782])
np.testing.assert_allclose(reg.predy_e[0],predy_e,4)
betas = np.array([[ 57.61123515],[ 0.73441313], [ -0.59459416], [ -0.21762921], [ 0.54732051]])
np.testing.assert_allclose(reg.betas,betas,4)
u = np.array([ 25.57932637])
np.testing.assert_allclose(reg.u[0],u,4)
e_filtered = np.array([ 31.65374945])
np.testing.assert_allclose(reg.e_filtered[0],e_filtered,4)
predy = np.array([ 54.88767685])
np.testing.assert_allclose(reg.predy[0],predy,4)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 4
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,4)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,4)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,4)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,4)
my = 38.43622446938776
np.testing.assert_allclose(reg.mean_y,my)
sy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([ 5.22438333e+02, 2.38012875e-01, 3.20924173e-02,
2.15753579e-01])
np.testing.assert_allclose(np.diag(reg.vm),vm,4)
sig2 = 181.78650186468832
np.testing.assert_allclose(reg.sig2,sig2,4)
pr2 = 0.3018280166937799
np.testing.assert_allclose(reg.pr2,pr2,4)
pr2_e = 0.3561355586759414
np.testing.assert_allclose(reg.pr2_e,pr2_e,4)
std_err = np.array([ 22.85692222, 0.48786559, 0.17914356, 0.46449318])
np.testing.assert_allclose(reg.std_err,std_err,4)
z_stat = np.array([[ 2.52051597e+00, 1.17182922e-02], [ 1.50535954e+00, 1.32231664e-01], [ -3.31909311e+00, 9.03103123e-04], [ -4.68530506e-01, 6.39405261e-01]])
np.testing.assert_allclose(reg.z_stat,z_stat,4)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
jnovinger/django
|
django/contrib/contenttypes/fields.py
|
43
|
23567
|
from __future__ import unicode_literals
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connection, models, router, transaction
from django.db.models import DO_NOTHING, signals
from django.db.models.base import ModelBase
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ForeignRelatedObjectsDescriptor,
)
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ReverseSingleRelatedObjectDescriptor) by adding itself as a model
attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handle initializing an object with the generic FK instead of
content_type and object_id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(
field, to,
related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['on_delete'] = models.CASCADE
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def get_path_info(self):
opts = self.remote_field.model._meta
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.remote_field, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self.remote_field))
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(ForeignRelatedObjectsDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericRelatedObjectsDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super(GenericRelatedObjectManager, self).__init__()
self.instance = instance
self.model = rel.model
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
qn = connection.ops.quote_name
join_cols = rel.field.get_joining_columns(reverse_join=True)[0]
self.source_col_name = qn(join_cols[0])
self.target_col_name = qn(join_cols[1])
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance._get_pk_val()
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj
))
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first. but must be." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
|
bsd-3-clause
|
chouseknecht/ansible
|
test/units/module_utils/basic/test_log.py
|
120
|
7027
|
# -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import syslog
from itertools import product
import pytest
import ansible.module_utils.basic
from ansible.module_utils.six import PY3
class TestAnsibleModuleLogSmokeTest:
DATA = [u'Text string', u'Toshio くらとみ non-ascii test']
DATA = DATA + [d.encode('utf-8') for d in DATA]
DATA += [b'non-utf8 :\xff: test']
# pylint bug: https://github.com/PyCQA/pylint/issues/511
@pytest.mark.parametrize('msg, stdin', ((m, {}) for m in DATA), indirect=['stdin']) # pylint: disable=undefined-variable
def test_smoketest_syslog(self, am, mocker, msg):
# These talk to the live daemons on the system. Need to do this to
# show that what we send doesn't cause an issue once it gets to the
# daemon. These are just smoketests to test that we don't fail.
mocker.patch('ansible.module_utils.basic.has_journal', False)
am.log(u'Text string')
am.log(u'Toshio くらとみ non-ascii test')
am.log(b'Byte string')
am.log(u'Toshio くらとみ non-ascii test'.encode('utf-8'))
am.log(b'non-utf8 :\xff: test')
@pytest.mark.skipif(not ansible.module_utils.basic.has_journal, reason='python systemd bindings not installed')
# pylint bug: https://github.com/PyCQA/pylint/issues/511
@pytest.mark.parametrize('msg, stdin', ((m, {}) for m in DATA), indirect=['stdin']) # pylint: disable=undefined-variable
def test_smoketest_journal(self, am, mocker, msg):
# These talk to the live daemons on the system. Need to do this to
# show that what we send doesn't cause an issue once it gets to the
# daemon. These are just smoketests to test that we don't fail.
mocker.patch('ansible.module_utils.basic.has_journal', True)
am.log(u'Text string')
am.log(u'Toshio くらとみ non-ascii test')
am.log(b'Byte string')
am.log(u'Toshio くらとみ non-ascii test'.encode('utf-8'))
am.log(b'non-utf8 :\xff: test')
class TestAnsibleModuleLogSyslog:
"""Test the AnsibleModule Log Method"""
PY2_OUTPUT_DATA = [
(u'Text string', b'Text string'),
(u'Toshio くらとみ non-ascii test', u'Toshio くらとみ non-ascii test'.encode('utf-8')),
(b'Byte string', b'Byte string'),
(u'Toshio くらとみ non-ascii test'.encode('utf-8'), u'Toshio くらとみ non-ascii test'.encode('utf-8')),
(b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace').encode('utf-8')),
]
PY3_OUTPUT_DATA = [
(u'Text string', u'Text string'),
(u'Toshio くらとみ non-ascii test', u'Toshio くらとみ non-ascii test'),
(b'Byte string', u'Byte string'),
(u'Toshio くらとみ non-ascii test'.encode('utf-8'), u'Toshio くらとみ non-ascii test'),
(b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace')),
]
OUTPUT_DATA = PY3_OUTPUT_DATA if PY3 else PY2_OUTPUT_DATA
@pytest.mark.parametrize('no_log, stdin', (product((True, False), [{}])), indirect=['stdin'])
def test_no_log(self, am, mocker, no_log):
"""Test that when no_log is set, logging does not occur"""
mock_syslog = mocker.patch('syslog.syslog', autospec=True)
mocker.patch('ansible.module_utils.basic.has_journal', False)
am.no_log = no_log
am.log('unittest no_log')
if no_log:
assert not mock_syslog.called
else:
mock_syslog.assert_called_once_with(syslog.LOG_INFO, 'unittest no_log')
# pylint bug: https://github.com/PyCQA/pylint/issues/511
@pytest.mark.parametrize('msg, param, stdin',
((m, p, {}) for m, p in OUTPUT_DATA), # pylint: disable=undefined-variable
indirect=['stdin'])
def test_output_matches(self, am, mocker, msg, param):
"""Check that log messages are sent correctly"""
mocker.patch('ansible.module_utils.basic.has_journal', False)
mock_syslog = mocker.patch('syslog.syslog', autospec=True)
am.log(msg)
mock_syslog.assert_called_once_with(syslog.LOG_INFO, param)
@pytest.mark.skipif(not ansible.module_utils.basic.has_journal, reason='python systemd bindings not installed')
class TestAnsibleModuleLogJournal:
"""Test the AnsibleModule Log Method"""
OUTPUT_DATA = [
(u'Text string', u'Text string'),
(u'Toshio くらとみ non-ascii test', u'Toshio くらとみ non-ascii test'),
(b'Byte string', u'Byte string'),
(u'Toshio くらとみ non-ascii test'.encode('utf-8'), u'Toshio くらとみ non-ascii test'),
(b'non-utf8 :\xff: test', b'non-utf8 :\xff: test'.decode('utf-8', 'replace')),
]
@pytest.mark.parametrize('no_log, stdin', (product((True, False), [{}])), indirect=['stdin'])
def test_no_log(self, am, mocker, no_log):
journal_send = mocker.patch('systemd.journal.send')
am.no_log = no_log
am.log('unittest no_log')
if no_log:
assert not journal_send.called
else:
assert journal_send.called == 1
# Message
# call_args is a 2-tuple of (arg_list, kwarg_dict)
assert journal_send.call_args[1]['MESSAGE'].endswith('unittest no_log'), 'Message was not sent to log'
# log adds this journal field
assert 'MODULE' in journal_send.call_args[1]
assert 'basic.py' in journal_send.call_args[1]['MODULE']
# pylint bug: https://github.com/PyCQA/pylint/issues/511
@pytest.mark.parametrize('msg, param, stdin',
((m, p, {}) for m, p in OUTPUT_DATA), # pylint: disable=undefined-variable
indirect=['stdin'])
def test_output_matches(self, am, mocker, msg, param):
journal_send = mocker.patch('systemd.journal.send')
am.log(msg)
assert journal_send.call_count == 1, 'journal.send not called exactly once'
assert journal_send.call_args[1]['MESSAGE'].endswith(param)
@pytest.mark.parametrize('stdin', ({},), indirect=['stdin'])
def test_log_args(self, am, mocker):
journal_send = mocker.patch('systemd.journal.send')
am.log('unittest log_args', log_args=dict(TEST='log unittest'))
assert journal_send.called == 1
assert journal_send.call_args[1]['MESSAGE'].endswith('unittest log_args'), 'Message was not sent to log'
# log adds this journal field
assert 'MODULE' in journal_send.call_args[1]
assert 'basic.py' in journal_send.call_args[1]['MODULE']
# We added this journal field
assert 'TEST' in journal_send.call_args[1]
assert 'log unittest' in journal_send.call_args[1]['TEST']
|
gpl-3.0
|
csdms/dakota
|
dakotathon/tests/test_plugin_hydrotrend_run.py
|
1
|
3466
|
#!/usr/bin/env python
#
# Test running the dakota.plugin.hydrotrend module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper ([email protected])
import os
import shutil
# import filecmp
import glob
from nose.tools import with_setup, assert_true
from dakotathon.dakota import Dakota
from dakotathon.plugins.hydrotrend import is_installed as is_hydrotrend_installed
from dakotathon.utils import is_dakota_installed
from . import start_dir, data_dir
# Global variables -----------------------------------------------------
run_dir = os.getcwd()
config_file = os.path.join(run_dir, "dakota.yaml")
known_config_file = os.path.join(data_dir, "dakota.yaml")
# known_dat_file = os.path.join(data_dir, 'dakota.dat')
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print("\n*** " + __name__)
def setup():
"""Called at start of any test using it @with_setup()"""
pass
def teardown():
"""Called at end of any test using it @with_setup()"""
if os.path.exists(config_file):
os.remove(config_file)
if os.path.exists("dakota.in"):
os.remove("dakota.in")
if os.path.exists("run.log"):
os.remove("run.log")
if os.path.exists("stderr.log"):
os.remove("stderr.log")
if is_hydrotrend_installed():
for dname in glob.glob("HYDRO_*"):
shutil.rmtree(dname)
if is_dakota_installed():
for dname in glob.glob("run.*"):
shutil.rmtree(dname)
for fname in ["dakota." + ext for ext in ["dat", "out", "rst"]]:
if os.path.exists(fname):
os.remove(fname)
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@with_setup(setup, teardown)
def test_run_by_setting_attributes():
"""Test running a HydroTrend simulation."""
d = Dakota(method="vector_parameter_study", plugin="hydrotrend")
d.template_file = os.path.join(data_dir, "HYDRO.IN.dtmpl")
d.auxiliary_files = os.path.join(data_dir, "HYDRO0.HYPS")
d.variables.descriptors = [
"starting_mean_annual_temperature",
"total_annual_precipitation",
]
d.variables.initial_point = [10.0, 1.5]
d.method.final_point = [20.0, 2.5]
d.method.n_steps = 5
d.responses.response_descriptors = ["Qs_median", "Q_mean"]
d.responses.response_files = ["HYDROASCII.QS", "HYDROASCII.Q"]
d.responses.response_statistics = ["median", "mean"]
d.setup()
assert_true(os.path.exists(d.input_file))
if is_dakota_installed() and is_hydrotrend_installed():
d.run()
assert_true(os.path.exists(d.output_file))
# assert_true(filecmp.cmp(known_dat_file, d.environment.data_file))
@with_setup(setup, teardown)
def test_run_from_config_file():
"""Test running a HydroTrend simulation from a config file."""
d = Dakota.from_file_like(known_config_file)
d.run_directory = run_dir
d.template_file = os.path.join(data_dir, "HYDRO.IN.dtmpl")
d.auxiliary_files = os.path.join(data_dir, "HYDRO0.HYPS")
d.serialize(config_file)
d.write_input_file()
assert_true(os.path.exists(d.input_file))
if is_dakota_installed() and is_hydrotrend_installed():
d.run()
assert_true(os.path.exists(d.output_file))
# assert_true(filecmp.cmp(known_dat_file, d.environment.data_file))
|
mit
|
astorije/ansible-modules-extras
|
system/known_hosts.py
|
60
|
8999
|
#!/usr/bin/python
"""
Ansible module to manage the ssh known_hosts file.
Copyright(c) 2014, Matthew Vernon <[email protected]>
This module is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this module. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: known_hosts
short_description: Add or remove a host from the C(known_hosts) file
description:
- The M(known_hosts) module lets you add or remove a host from the C(known_hosts) file.
This is useful if you're going to want to use the M(git) module over ssh, for example.
If you have a very large number of host keys to manage, you will find the M(template) module more useful.
version_added: "1.9"
options:
name:
aliases: [ 'host' ]
description:
- The host to add or remove (must match a host specified in key)
required: true
default: null
key:
description:
- The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed)
required: false
default: null
path:
description:
- The known_hosts file to edit
required: no
default: "(homedir)+/.ssh/known_hosts"
state:
description:
- I(present) to add the host, I(absent) to remove it.
choices: [ "present", "absent" ]
required: no
default: present
requirements: [ ]
author: "Matthew Vernon (@mcv21)"
'''
EXAMPLES = '''
# Example using with_file to set the system known_hosts file
- name: tell the host about our servers it might want to ssh to
known_hosts: path='/etc/ssh/ssh_known_hosts'
name='foo.com.invalid'
key="{{ lookup('file', 'pubkeys/foo.com.invalid') }}"
'''
# Makes sure public host keys are present or absent in the given known_hosts
# file.
#
# Arguments
# =========
# name = hostname whose key should be added (alias: host)
# key = line(s) to add to known_hosts file
# path = the known_hosts file to edit (default: ~/.ssh/known_hosts)
# state = absent|present (default: present)
import os
import os.path
import tempfile
import errno
def enforce_state(module, params):
"""
Add or remove key.
"""
host = params["name"]
key = params.get("key",None)
port = params.get("port",None)
#expand the path parameter; otherwise module.add_path_info
#(called by exit_json) unhelpfully says the unexpanded path is absent.
path = os.path.expanduser(params.get("path"))
state = params.get("state")
#Find the ssh-keygen binary
sshkeygen = module.get_bin_path("ssh-keygen",True)
#trailing newline in files gets lost, so re-add if necessary
if key is not None and key[-1]!='\n':
key+='\n'
if key is None and state != "absent":
module.fail_json(msg="No key specified when adding a host")
sanity_check(module,host,key,sshkeygen)
current,replace=search_for_host_key(module,host,key,path,sshkeygen)
#We will change state if current==True & state!="present"
#or current==False & state=="present"
#i.e (current) XOR (state=="present")
#Alternatively, if replace is true (i.e. key present, and we must change it)
if module.check_mode:
module.exit_json(changed = replace or ((state=="present") != current))
#Now do the work.
#First, remove an extant entry if required
if replace==True or (current==True and state=="absent"):
module.run_command([sshkeygen,'-R',host,'-f',path],
check_rc=True)
params['changed'] = True
#Next, add a new (or replacing) entry
if replace==True or (current==False and state=="present"):
try:
inf=open(path,"r")
except IOError, e:
if e.errno == errno.ENOENT:
inf=None
else:
module.fail_json(msg="Failed to read %s: %s" % \
(path,str(e)))
try:
outf=tempfile.NamedTemporaryFile(dir=os.path.dirname(path))
if inf is not None:
for line in inf:
outf.write(line)
inf.close()
outf.write(key)
outf.flush()
module.atomic_move(outf.name,path)
except (IOError,OSError),e:
module.fail_json(msg="Failed to write to file %s: %s" % \
(path,str(e)))
try:
outf.close()
except:
pass
params['changed'] = True
return params
def sanity_check(module,host,key,sshkeygen):
'''Check supplied key is sensible
host and key are parameters provided by the user; If the host
provided is inconsistent with the key supplied, then this function
quits, providing an error to the user.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
'''
#If no key supplied, we're doing a removal, and have nothing to check here.
if key is None:
return
#Rather than parsing the key ourselves, get ssh-keygen to do it
#(this is essential for hashed keys, but otherwise useful, as the
#key question is whether ssh-keygen thinks the key matches the host).
#The approach is to write the key to a temporary file,
#and then attempt to look up the specified host in that file.
try:
outf=tempfile.NamedTemporaryFile()
outf.write(key)
outf.flush()
except IOError,e:
module.fail_json(msg="Failed to write to temporary file %s: %s" % \
(outf.name,str(e)))
rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,
'-f',outf.name],
check_rc=True)
try:
outf.close()
except:
pass
if stdout=='': #host not found
module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
def search_for_host_key(module,host,key,path,sshkeygen):
'''search_for_host_key(module,host,key,path,sshkeygen) -> (current,replace)
Looks up host in the known_hosts file path; if it's there, looks to see
if one of those entries matches key. Returns:
current (Boolean): is host found in path?
replace (Boolean): is the key in path different to that supplied by user?
if current=False, then replace is always False.
sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
'''
replace=False
if os.path.exists(path)==False:
return False, False
#openssh >=6.4 has changed ssh-keygen behaviour such that it returns
#1 if no host is found, whereas previously it returned 0
rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,'-f',path],
check_rc=False)
if stdout=='' and stderr=='' and (rc==0 or rc==1):
return False, False #host not found, no other errors
if rc!=0: #something went wrong
module.fail_json(msg="ssh-keygen failed (rc=%d,stdout='%s',stderr='%s')" % (rc,stdout,stderr))
#If user supplied no key, we don't want to try and replace anything with it
if key is None:
return True, False
lines=stdout.split('\n')
k=key.strip() #trim trailing newline
#ssh-keygen returns only the host we ask about in the host field,
#even if the key entry has multiple hosts. Emulate this behaviour here,
#otherwise we get false negatives.
#Only necessary for unhashed entries.
if k[0] !='|':
k=k.split()
#The optional "marker" field, used for @cert-authority or @revoked
if k[0][0] == '@':
k[1]=host
else:
k[0]=host
k=' '.join(k)
for l in lines:
if l=='':
continue
if l[0]=='#': #comment
continue
if k==l: #found a match
return True, False #current, not-replace
#No match found, return current and replace
return True, True
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['host']),
key = dict(required=False, type='str'),
path = dict(default="~/.ssh/known_hosts", type='str'),
state = dict(default='present', choices=['absent','present']),
),
supports_check_mode = True
)
results = enforce_state(module,module.params)
module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
kysolvik/reservoir-id
|
reservoir-id/classifier_train.py
|
1
|
6974
|
#!/usr/bin/env python
"""
Train random forest classifier
Inputs: CSV from build_att_table, small area cutoff
Outputs: Packaged up Random Forest model
@authors: Kylen Solvik
Date Create: 3/17/17
"""
# Load libraries
import pandas as pd
from sklearn import model_selection
from sklearn import preprocessing
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import *
import numpy as np
import sys
import argparse
import os
import xgboost as xgb
# Parse arguments
parser = argparse.ArgumentParser(description='Train Random Forest classifier.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('prop_csv',
help='Path to attribute table (from build_att_table.py).',
type=str)
parser.add_argument('xgb_pkl',
help='Path to save random forest model as .pkl.',
type=str)
parser.add_argument('--area_lowbound',
help='Lower area bound. All regions <= in size will be ignored',
default=2,
type=int)
parser.add_argument('--path_prefix',
help='To be placed at beginnings of all other path args',
type=str,default='')
args = parser.parse_args()
def select_training_obs(full_csv_path):
"""Takes full csv and selects only the training observations.
Writes out to csv for further use"""
training_csv_path = full_csv_path.replace('.csv','_trainonly.csv')
if not os.path.isfile(training_csv_path):
dataset = pd.read_csv(full_csv_path,header=0)
training_dataset = dataset.loc[dataset['class'] > 0]
training_dataset.to_csv(training_csv_path,header=True,index=False)
return(training_csv_path)
def main():
# Set any attributes to exclude for this run
exclude_att_patterns = []
# Load dataset
training_csv = select_training_obs(args.path_prefix + args.prop_csv)
dataset = pd.read_csv(training_csv,header=0)
dataset_acut = dataset.loc[dataset['area'] > args.area_lowbound]
# Exclude attributes matching user input patterns, or if they are all nans
exclude_atts = []
for pattern in exclude_att_patterns:
col_list = [col for col in dataset_acut.columns if pattern in col]
exclude_atts.extend(col_list)
for att in dataset.columns[1:]:
if sum(np.isfinite(dataset[att])) == 0:
exclude_atts.append(att)
for att in list(set(exclude_atts)):
del dataset_acut[att]
(ds_y,ds_x) = dataset_acut.shape
print(ds_y,ds_x)
# Convert dataset to array
feature_names = dataset_acut.columns[2:]
array = dataset_acut.values
X = array[:,2:ds_x].astype(float)
Y = array[:,1].astype(int)
Y = Y-1 # Convert from 1s and 2s to 0-1
# Set nans to 0
X = np.nan_to_num(X)
# Separate test data
test_size = 0.2
seed = 5
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(
X, Y, test_size=test_size,
random_state=seed)
# Convert data to xgboost matrices
d_train = xgb.DMatrix(X_train,label=Y_train)
# d_test = xgb.DMatrix(X_test,label=Y_test)
#----------------------------------------------------------------------
# Paramater tuning
# Step 1: Find approximate n_estimators to use
early_stop_rounds = 40
n_folds = 5
xgb_model = xgb.XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
seed=27)
xgb_params = xgb_model.get_xgb_params()
cvresult = xgb.cv(xgb_params, d_train,
num_boost_round=xgb_params['n_estimators'], nfold=n_folds,
metrics='auc', early_stopping_rounds=early_stop_rounds,
)
n_est_best = (cvresult.shape[0] - early_stop_rounds)
print('Best number of rounds = {}'.format(n_est_best))
# Step 2: Tune hyperparameters
xgb_model = xgb.XGBClassifier()
params = {'max_depth': range(5,10,2),
'learning_rate': [0.1],
'gamma':[0,0.5,1],
'silent': [1],
'objective': ['binary:logistic'],
'n_estimators' : [n_est_best],
'subsample' : [0.7, 0.8,1],
'min_child_weight' : range(1,4,2),
'colsample_bytree':[0.7,0.8,1],
}
clf = GridSearchCV(xgb_model,params,n_jobs = 1,
cv = StratifiedKFold(Y_train,
n_folds=5, shuffle=True),
scoring = 'roc_auc',
verbose = 2,
refit = True)
clf.fit(X_train,Y_train)
best_parameters,score,_ = max(clf.grid_scores_,key=lambda x: x[1])
print('Raw AUC score:',score)
for param_name in sorted(best_parameters.keys()):
print("%s: %r" % (param_name, best_parameters[param_name]))
# Step 3: Decrease learning rate and up the # of trees
#xgb_finalcv = XGBClassifier()
tuned_params = clf.best_params_
tuned_params['n_estimators'] = 10000
tuned_params['learning_rate'] = 0.01
cvresult = xgb.cv(tuned_params, d_train,
num_boost_round=tuned_params['n_estimators'], nfold=n_folds,
metrics='auc', early_stopping_rounds=early_stop_rounds,
)
# Train model with cv results and predict on test set For test accuracy
n_est_final = int((cvresult.shape[0] - early_stop_rounds) / (1 - 1 / n_folds))
tuned_params['n_estimators'] = n_est_final
print(tuned_params)
xgb_train = xgb.XGBClassifier()
xgb_train.set_params(**tuned_params)
xgb_train.fit(X_train,Y_train)
bst_preds = xgb_train.predict(X_test)
print("Xgboost Test acc = " + str(accuracy_score(Y_test, bst_preds)))
print(confusion_matrix(Y_test, bst_preds))
print(classification_report(Y_test, bst_preds))
# Export cv classifier
joblib.dump(cvresult, args.path_prefix + args.xgb_pkl + 'cv')
# Export classifier trained on full data set
xgb_full = xgb.XGBClassifier()
xgb_full.set_params(**tuned_params)
xgb_full.fit(X,Y)
joblib.dump(xgb_full, args.path_prefix + args.xgb_pkl)
if __name__ == '__main__':
main()
|
gpl-3.0
|
DazWorrall/ansible
|
lib/ansible/modules/packaging/language/pip.py
|
8
|
21296
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Matt Wright <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: pip
short_description: Manages Python library dependencies.
description:
- "Manage Python library dependencies. To use this module, one of the following keys is required: C(name)
or C(requirements)."
version_added: "0.7"
options:
name:
description:
- The name of a Python library to install or the url of the remote package.
- As of 2.2 you can supply a list of names.
required: false
default: null
version:
description:
- The version number to install of the Python library specified in the I(name) parameter
required: false
default: null
requirements:
description:
- The path to a pip requirements file, which should be local to the remote system.
File can be specified as a relative path if using the chdir option.
required: false
default: null
virtualenv:
description:
- An optional path to a I(virtualenv) directory to install into.
It cannot be specified together with the 'executable' parameter
(added in 2.1).
If the virtualenv does not exist, it will be created before installing
packages. The optional virtualenv_site_packages, virtualenv_command,
and virtualenv_python options affect the creation of the virtualenv.
required: false
default: null
virtualenv_site_packages:
version_added: "1.0"
description:
- Whether the virtual environment will inherit packages from the
global site-packages directory. Note that if this setting is
changed on an already existing virtual environment it will not
have any effect, the environment must be deleted and newly
created.
required: false
default: "no"
choices: [ "yes", "no" ]
virtualenv_command:
version_added: "1.1"
description:
- The command or a pathname to the command to create the virtual
environment with. For example C(pyvenv), C(virtualenv),
C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv).
required: false
default: virtualenv
virtualenv_python:
version_added: "2.0"
description:
- The Python executable used for creating the virtual environment.
For example C(python3.5), C(python2.7). When not specified, the
Python version used to run the ansible module is used. This parameter
should not be used when C(virtualenv_command) is using C(pyvenv) or
the C(-m venv) module.
required: false
default: null
state:
description:
- The state of module
- The 'forcereinstall' option is only available in Ansible 2.1 and above.
required: false
default: present
choices: [ "present", "absent", "latest", "forcereinstall" ]
extra_args:
description:
- Extra arguments passed to pip.
required: false
default: null
version_added: "1.0"
editable:
description:
- Pass the editable flag.
required: false
default: false
version_added: "2.0"
chdir:
description:
- cd into this directory before running the command
version_added: "1.3"
required: false
default: null
executable:
description:
- The explicit executable or a pathname to the executable to be used to
run pip for a specific version of Python installed in the system. For
example C(pip-3.3), if there are both Python 2.7 and 3.3 installations
in the system and you want to run pip for the Python 3.3 installation.
It cannot be specified together with the 'virtualenv' parameter (added in 2.1).
By default, it will take the appropriate version for the python interpreter
use by ansible, e.g. pip3 on python 3, and pip2 or pip on python 2.
version_added: "1.3"
required: false
default: null
umask:
description:
- The system umask to apply before installing the pip package. This is
useful, for example, when installing on systems that have a very
restrictive umask by default (e.g., 0077) and you want to pip install
packages which are to be used by all users. Note that this requires you
to specify desired umask mode in octal, with a leading 0 (e.g., 0077).
version_added: "2.1"
required: false
default: null
notes:
- Please note that virtualenv (U(http://www.virtualenv.org/)) must be
installed on the remote host if the virtualenv parameter is specified and
the virtualenv needs to be created.
- By default, this module will use the appropriate version of pip for the
interpreter used by ansible (e.g. pip3 when using python 3, pip2 otherwise)
requirements: [ "virtualenv", "pip" ]
author: "Matt Wright (@mattupstate)"
'''
EXAMPLES = '''
# Install (Bottle) python package.
- pip:
name: bottle
# Install (Bottle) python package on version 0.11.
- pip:
name: bottle
version: 0.11
# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.
- pip:
name: svn+http://myrepo/svn/MyApp#egg=MyApp
# Install MyApp using one of the remote protocols (bzr+,hg+,git+).
- pip:
name: git+http://myrepo/app/MyApp
# Install (MyApp) from local tarball
- pip:
name: file:///path/to/MyApp.tar.gz
# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
# Install (Bottle) into the specified (virtualenv), inheriting globally installed modules
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_site_packages: yes
# Install (Bottle) into the specified (virtualenv), using Python 2.7
- pip:
name: bottle
virtualenv: /my_app/venv
virtualenv_command: virtualenv-2.7
# Install (Bottle) within a user home directory.
- pip:
name: bottle
extra_args: --user
# Install specified python requirements.
- pip:
requirements: /my_app/requirements.txt
# Install specified python requirements in indicated (virtualenv).
- pip:
requirements: /my_app/requirements.txt
virtualenv: /my_app/venv
# Install specified python requirements and custom Index URL.
- pip:
requirements: /my_app/requirements.txt
extra_args: -i https://example.com/pypi/simple
# Install (Bottle) for Python 3.3 specifically,using the 'pip-3.3' executable.
- pip:
name: bottle
executable: pip-3.3
# Install (Bottle), forcing reinstallation if it's already installed
- pip:
name: bottle
state: forcereinstall
# Install (Bottle) while ensuring the umask is 0022 (to ensure other users can use it)
- pip:
name: bottle
umask: 0022
become: True
'''
import os
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule, is_executable
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
#: Python one-liners to be run at the command line that will determine the
# installed version for these special libraries. These are libraries that
# don't end up in the output of pip freeze.
_SPECIAL_PACKAGE_CHECKERS = {'setuptools': 'import setuptools; print(setuptools.__version__)',
'pip': 'import pkg_resources; print(pkg_resources.get_distribution("pip").version)'}
def _get_cmd_options(module, cmd):
thiscmd = cmd + " --help"
rc, stdout, stderr = module.run_command(thiscmd)
if rc != 0:
module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr))
words = stdout.strip().split()
cmd_options = [x for x in words if x.startswith('--')]
return cmd_options
def _get_full_name(name, version=None):
if version is None:
resp = name
else:
resp = name + '==' + version
return resp
def _get_packages(module, pip, chdir):
'''Return results of pip command to get packages.'''
# Try 'pip list' command first.
command = '%s list --format=freeze' % pip
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
rc, out, err = module.run_command(command, cwd=chdir, environ_update=lang_env)
# If there was an error (pip version too old) then use 'pip freeze'.
if rc != 0:
command = '%s freeze' % pip
rc, out, err = module.run_command(command, cwd=chdir)
if rc != 0:
_fail(module, command, out, err)
return (command, out, err)
def _is_present(name, version, installed_pkgs, pkg_command):
'''Return whether or not package is installed.'''
for pkg in installed_pkgs:
if '==' in pkg:
pkg_name, pkg_version = pkg.split('==')
else:
continue
if pkg_name == name and (version is None or version == pkg_version):
return True
return False
def _get_pip(module, env=None, executable=None):
# Older pip only installed under the "/usr/bin/pip" name. Many Linux
# distros install it there.
# By default, we try to use pip required for the current python
# interpreter, so people can use pip to install modules dependencies
candidate_pip_basenames = ('pip2', 'pip')
if PY3:
# pip under python3 installs the "/usr/bin/pip3" name
candidate_pip_basenames = ('pip3',)
pip = None
if executable is not None:
if os.path.isabs(executable):
pip = executable
else:
# If you define your own executable that executable should be the only candidate.
# As noted in the docs, executable doesn't work with virtualenvs.
candidate_pip_basenames = (executable,)
if pip is None:
if env is None:
opt_dirs = []
for basename in candidate_pip_basenames:
pip = module.get_bin_path(basename, False, opt_dirs)
if pip is not None:
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find any of %s to use. pip'
' needs to be installed.' % ', '.join(candidate_pip_basenames))
else:
# If we're using a virtualenv we must use the pip from the
# virtualenv
venv_dir = os.path.join(env, 'bin')
candidate_pip_basenames = (candidate_pip_basenames[0], 'pip')
for basename in candidate_pip_basenames:
candidate = os.path.join(venv_dir, basename)
if os.path.exists(candidate) and is_executable(candidate):
pip = candidate
break
else:
# For-else: Means that we did not break out of the loop
# (therefore, that pip was not found)
module.fail_json(msg='Unable to find pip in the virtualenv,'
' %s, under any of these names: %s. Make sure pip is'
' present in the virtualenv.' % (env,
', '.join(candidate_pip_basenames)))
return pip
def _fail(module, cmd, out, err):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg)
def _get_package_info(module, package, env=None):
"""This is only needed for special packages which do not show up in pip freeze
pip and setuptools fall into this category.
:returns: a string containing the version number if the package is
installed. None if the package is not installed.
"""
if env:
opt_dirs = ['%s/bin' % env]
else:
opt_dirs = []
python_bin = module.get_bin_path('python', False, opt_dirs)
if python_bin is None:
formatted_dep = None
else:
rc, out, err = module.run_command([python_bin, '-c', _SPECIAL_PACKAGE_CHECKERS[package]])
if rc:
formatted_dep = None
else:
formatted_dep = '%s==%s' % (package, out.strip())
return formatted_dep
def main():
state_map = dict(
present='install',
absent='uninstall -y',
latest='install -U',
forcereinstall='install -U --force-reinstall',
)
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=state_map.keys()),
name=dict(type='list'),
version=dict(type='str'),
requirements=dict(type='str'),
virtualenv=dict(type='path'),
virtualenv_site_packages=dict(default=False, type='bool'),
virtualenv_command=dict(default='virtualenv', type='path'),
virtualenv_python=dict(type='str'),
use_mirrors=dict(default=True, type='bool'),
extra_args=dict(type='str'),
editable=dict(default=False, type='bool'),
chdir=dict(type='path'),
executable=dict(type='path'),
umask=dict(type='str'),
),
required_one_of=[['name', 'requirements']],
mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']],
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
version = module.params['version']
requirements = module.params['requirements']
extra_args = module.params['extra_args']
virtualenv_python = module.params['virtualenv_python']
chdir = module.params['chdir']
umask = module.params['umask']
if umask and not isinstance(umask, int):
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg="umask must be an octal integer",
details=to_native(sys.exc_info()[1]))
old_umask = None
if umask is not None:
old_umask = os.umask(umask)
try:
if state == 'latest' and version is not None:
module.fail_json(msg='version is incompatible with state=latest')
if chdir is None:
# this is done to avoid permissions issues with privilege escalation and virtualenvs
chdir = tempfile.gettempdir()
err = ''
out = ''
env = module.params['virtualenv']
if env:
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
cmd = module.params['virtualenv_command']
if os.path.basename(cmd) == cmd:
cmd = module.get_bin_path(cmd, True)
if module.params['virtualenv_site_packages']:
cmd += ' --system-site-packages'
else:
cmd_opts = _get_cmd_options(module, cmd)
if '--no-site-packages' in cmd_opts:
cmd += ' --no-site-packages'
# -p is a virtualenv option, not compatible with pyenv or venv
# this if validates if the command being used is not any of them
if not any(ex in module.params['virtualenv_command'] for ex in ('pyvenv', '-m venv')):
if virtualenv_python:
cmd += ' -p%s' % virtualenv_python
elif PY3:
# Ubuntu currently has a patch making virtualenv always
# try to use python2. Since Ubuntu16 works without
# python2 installed, this is a problem. This code mimics
# the upstream behaviour of using the python which invoked
# virtualenv to determine which python is used inside of
# the virtualenv (when none are specified).
cmd += ' -p%s' % sys.executable
# if venv or pyvenv are used and virtualenv_python is defined, then
# virtualenv_python is ignored, this has to be acknowledged
elif module.params['virtualenv_python']:
module.fail_json(
msg='virtualenv_python should not be used when'
' using the venv module or pyvenv as virtualenv_command'
)
cmd = "%s %s" % (cmd, env)
rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
out += out_venv
err += err_venv
if rc != 0:
_fail(module, cmd, out, err)
pip = _get_pip(module, env, module.params['executable'])
cmd = '%s %s' % (pip, state_map[state])
# If there's a virtualenv we want things we install to be able to use other
# installations that exist as binaries within this virtualenv. Example: we
# install cython and then gevent -- gevent needs to use the cython binary,
# not just a python package that will be found by calling the right python.
# So if there's a virtualenv, we add that bin/ to the beginning of the PATH
# in run_command by setting path_prefix here.
path_prefix = None
if env:
path_prefix = "/".join(pip.split('/')[:-1])
# Automatically apply -e option to extra_args when source is a VCS url. VCS
# includes those beginning with svn+, git+, hg+ or bzr+
has_vcs = False
if name:
for pkg in name:
if bool(pkg and re.match(r'(svn|git|hg|bzr)\+', pkg)):
has_vcs = True
break
if module.params['editable']:
args_list = [] # used if extra_args is not used at all
if extra_args:
args_list = extra_args.split(' ')
if '-e' not in args_list:
args_list.append('-e')
# Ok, we will reconstruct the option string
extra_args = ' '.join(args_list)
if extra_args:
cmd += ' %s' % extra_args
if name:
for pkg in name:
cmd += ' %s' % _get_full_name(pkg, version)
else:
if requirements:
cmd += ' -r %s' % requirements
if module.check_mode:
if extra_args or requirements or state == 'latest' or not name:
module.exit_json(changed=True)
pkg_cmd, out_pip, err_pip = _get_packages(module, pip, chdir)
out += out_pip
err += err_pip
changed = False
if name:
pkg_list = [p for p in out.split('\n') if not p.startswith('You are using') and not p.startswith('You should consider') and p]
if pkg_cmd.endswith(' freeze') and ('pip' in name or 'setuptools' in name):
# Older versions of pip (pre-1.3) do not have pip list.
# pip freeze does not list setuptools or pip in its output
# So we need to get those via a specialcase
for pkg in ('setuptools', 'pip'):
if pkg in name:
formatted_dep = _get_package_info(module, pkg, env)
if formatted_dep is not None:
pkg_list.append(formatted_dep)
out += '%s\n' % formatted_dep
for pkg in name:
is_present = _is_present(pkg, version, pkg_list, pkg_cmd)
if (state == 'present' and not is_present) or (state == 'absent' and is_present):
changed = True
break
module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
out_freeze_before = None
if requirements or has_vcs:
_, out_freeze_before, _ = _get_packages(module, pip, chdir)
rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip
err += err_pip
if rc == 1 and state == 'absent' and \
('not installed' in out_pip or 'not installed' in err_pip):
pass # rc is 1 when attempting to uninstall non-installed package
elif rc != 0:
_fail(module, cmd, out, err)
if state == 'absent':
changed = 'Successfully uninstalled' in out_pip
else:
if out_freeze_before is None:
changed = 'Successfully installed' in out_pip
else:
_, out_freeze_after, _ = _get_packages(module, pip, chdir)
changed = out_freeze_before != out_freeze_after
module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
state=state, requirements=requirements, virtualenv=env,
stdout=out, stderr=err)
finally:
if old_umask is not None:
os.umask(old_umask)
if __name__ == '__main__':
main()
|
gpl-3.0
|
shivam1111/odoo
|
addons/l10n_be/wizard/__init__.py
|
438
|
1145
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import l10n_be_partner_vat_listing
import l10n_be_vat_intra
import l10n_be_account_vat_declaration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/lib/django-1.5/django/template/response.py
|
221
|
6214
|
from django.http import HttpResponse
from django.template import loader, Context, RequestContext
from django.utils import six
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
mimetype=None):
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use tricky-to-debug problems
self.template_name = template
self.context_data = context
self._post_render_callbacks = []
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', content_type, status,
mimetype)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""Pickling support function.
Ensures that the object can't be pickled before it has been
rendered, and that the pickled state only includes rendered
data, not the data used to construct the response.
"""
obj_dict = super(SimpleTemplateResponse, self).__getstate__()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template)
elif isinstance(template, six.string_types):
return loader.get_template(template)
else:
return template
def resolve_context(self, context):
"""Converts context data into a full Context object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
else:
return Context(context)
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self.resolve_template(self.template_name)
context = self.resolve_context(self.context_data)
content = template.render(context)
return content
def add_post_render_callback(self, callback):
"""Adds a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Renders (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be accessed.')
return super(SimpleTemplateResponse, self).content
@content.setter
def content(self, value):
"""Sets the content for the response
"""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + \
['_request', '_current_app']
def __init__(self, request, template, context=None, content_type=None,
status=None, mimetype=None, current_app=None):
# self.request gets over-written by django.test.client.Client - and
# unlike context_data and template_name the _request should not
# be considered part of the public API.
self._request = request
# As a convenience we'll allow callers to provide current_app without
# having to avoid needing to create the RequestContext directly
self._current_app = current_app
super(TemplateResponse, self).__init__(
template, context, content_type, status, mimetype)
def resolve_context(self, context):
"""Convert context data into a full RequestContext object
(assuming it isn't already a Context object).
"""
if isinstance(context, Context):
return context
return RequestContext(self._request, context, current_app=self._current_app)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.