repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
andim27/magiccamp | tests/regressiontests/forms/localflavor/ro.py | 30 | 5828 | # -*- coding: utf-8 -*-
# Tests for the contrib/localflavor/ RO form fields.
tests = r"""
>>> from django.contrib.localflavor.ro.forms import *
##ROCIFField ################################################################
f = ROCIFField()
f.clean('21694681')
u'21694681'
f.clean('RO21694681')
u'21694681'
f.clean('21694680')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid CIF']
f.clean('21694680000')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at most 10 characters (it has 11).']
f.clean('0')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at least 2 characters (it has 1).']
f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
##ROCNPField #################################################################
f = ROCNPField()
f.clean('1981211204489')
u'1981211204489'
f.clean('1981211204487')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid CNP']
f.clean('1981232204489')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid CNP']
f.clean('9981211204489')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid CNP']
f.clean('9981211209')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at least 13 characters (it has 10).']
f.clean('19812112044891')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at most 13 characters (it has 14).']
f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
##ROCountyField ##############################################################
f = ROCountyField()
f.clean('CJ')
'CJ'
f.clean('cj')
'CJ'
f.clean('Argeş')
'AG'
f.clean('argeş')
'AG'
f.clean('Arges')
Traceback (most recent call last):
...
ValidationError: [u'Enter a Romanian county code or name.']
f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
##ROCountySelect #############################################################
f = ROCountySelect()
f.render('county','CJ')
u'<select name="county">\n<option value="AB">Alba</option>\n<option value="AR">A
rad</option>\n<option value="AG">Arge\u015f</option>\n<option value="BC">Bac\u01
03u</option>\n<option value="BH">Bihor</option>\n<option value="BN">Bistri\u0163
a-N\u0103s\u0103ud</option>\n<option value="BT">Boto\u015fani</option>\n<option
value="BV">Bra\u015fov</option>\n<option value="BR">Br\u0103ila</option>\n<optio
n value="B">Bucure\u015fti</option>\n<option value="BZ">Buz\u0103u</option>\n<op
tion value="CS">Cara\u015f-Severin</option>\n<option value="CL">C\u0103l\u0103ra
\u015fi</option>\n<option value="CJ" selected="selected">Cluj</option>\n<option
value="CT">Constan\u0163a</option>\n<option value="CV">Covasna</option>\n<option
value="DB">D\xe2mbovi\u0163a</option>\n<option value="DJ">Dolj</option>\n<optio
n value="GL">Gala\u0163i</option>\n<option value="GR">Giurgiu</option>\n<option
value="GJ">Gorj</option>\n<option value="HR">Harghita</option>\n<option value="H
D">Hunedoara</option>\n<option value="IL">Ialomi\u0163a</option>\n<option value=
"IS">Ia\u015fi</option>\n<option value="IF">Ilfov</option>\n<option value="MM">M
aramure\u015f</option>\n<option value="MH">Mehedin\u0163i</option>\n<option valu
e="MS">Mure\u015f</option>\n<option value="NT">Neam\u0163</option>\n<option valu
e="OT">Olt</option>\n<option value="PH">Prahova</option>\n<option value="SM">Sat
u Mare</option>\n<option value="SJ">S\u0103laj</option>\n<option value="SB">Sibi
u</option>\n<option value="SV">Suceava</option>\n<option value="TR">Teleorman</o
ption>\n<option value="TM">Timi\u015f</option>\n<option value="TL">Tulcea</optio
n>\n<option value="VS">Vaslui</option>\n<option value="VL">V\xe2lcea</option>\n<
option value="VN">Vrancea</option>\n</select>'
##ROIBANField #################################################################
f = ROIBANField()
f.clean('RO56RZBR0000060003291177')
u'RO56RZBR0000060003291177'
f.clean('RO56RZBR0000060003291176')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format']
f.clean('RO56-RZBR-0000-0600-0329-1177')
u'RO56RZBR0000060003291177'
f.clean('AT61 1904 3002 3457 3201')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format']
f.clean('RO56RZBR000006000329117')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at least 24 characters (it has 23).']
f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
##ROPhoneNumberField ##########################################################
f = ROPhoneNumberField()
f.clean('0264485936')
u'0264485936'
f.clean('(0264)-485936')
u'0264485936'
f.clean('02644859368')
Traceback (most recent call last):
...
ValidationError: [u'Phone numbers must be in XXXX-XXXXXX format.']
f.clean('026448593')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at least 10 characters (it has 9).']
f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
##ROPostalCodeField ###########################################################
f = ROPostalCodeField()
f.clean('400473')
u'400473'
f.clean('40047')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at least 6 characters (it has 5).']
f.clean('4004731')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at most 6 characters (it has 7).']
f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
"""
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
GinnyN/towerofdimensions-django | build/lib/django/contrib/flatpages/models.py | 410 | 1134 | from django.db import models
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'))
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_("Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use 'flatpages/default.html'."))
registration_required = models.BooleanField(_('registration required'), help_text=_("If this is checked, only logged-in users will be able to view the page."))
sites = models.ManyToManyField(Site)
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
def __unicode__(self):
return u"%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
return self.url
| bsd-3-clause |
shogun-toolbox/shogun | examples/undocumented/python/distance_manhattanword.py | 2 | 1105 | #!/usr/bin/env python
import shogun as sg
traindna = '../data/fm_train_dna.dat'
testdna = '../data/fm_test_dna.dat'
parameter_list = [[traindna,testdna,3,0,False],[traindna,testdna,4,0,False]]
def distance_manhattenword (train_fname=traindna,test_fname=testdna,order=3,gap=0,reverse=False):
charfeat=sg.create_string_features(sg.read_csv(train_fname), sg.DNA)
feats_train=sg.create_string_features(charfeat, order-1, order, gap, reverse)
preproc = sg.create_transformer("SortWordString")
preproc.fit(feats_train)
feats_train = preproc.transform(feats_train)
charfeat=sg.create_string_features(sg.read_csv(test_fname), sg.DNA)
feats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse)
feats_test = preproc.transform(feats_test)
distance = sg.create_distance('ManhattanWordDistance')
distance.init(feats_train, feats_train)
dm_train=distance.get_distance_matrix()
distance.init(feats_train, feats_test)
dm_test=distance.get_distance_matrix()
return dm_train,dm_test
if __name__=='__main__':
print('ManhattanWordDistance')
distance_manhattenword(*parameter_list[0])
| bsd-3-clause |
cbertinato/pandas | pandas/_config/localization.py | 1 | 4655 | """
Helpers for configuring locale settings.
Name `localization` is chosen to avoid overlap with builtin `locale` module.
"""
from contextlib import contextmanager
import locale
import re
import subprocess
from pandas._config.config import options
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""
Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if all(x is not None for x in normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError, locale.Error):
# horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""
Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
def _default_locale_getter():
try:
raw_locales = subprocess.check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""
Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
out_locales.append(str(
x, encoding=options.display.encoding))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
| bsd-3-clause |
Microsoft/hummingbird | tests/test_sklearn_decomposition.py | 1 | 5758 | """
Tests sklearn matrix decomposition converters
"""
import unittest
import warnings
import sys
from distutils.version import LooseVersion
import numpy as np
import torch
import sklearn
from sklearn.decomposition import FastICA, KernelPCA, PCA, TruncatedSVD
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
import hummingbird.ml
class TestSklearnMatrixDecomposition(unittest.TestCase):
def _fit_model_pca(self, model, precompute=False):
data = load_digits()
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.2, random_state=42)
X_test = X_test.astype("float32")
if precompute:
# For precompute we use a linear kernel
model.fit(np.dot(X_train, X_train.T))
X_test = np.dot(X_test, X_train.T)
else:
model.fit(X_train)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(model.transform(X_test), torch_model.transform(X_test), rtol=1e-6, atol=2 * 1e-5)
# PCA n_components none
def test_pca_converter_none(self):
self._fit_model_pca(PCA(n_components=None))
# PCA n_componenets two
def test_pca_converter_two(self):
self._fit_model_pca(PCA(n_components=2))
# PCA n_componenets mle and whiten true
@unittest.skipIf(
LooseVersion(sklearn.__version__) < LooseVersion("0.23.2"),
reason="With Sklearn version < 0.23.2 returns ValueError: math domain error (https://github.com/scikit-learn/scikit-learn/issues/4441)",
)
def test_pca_converter_mle_whiten(self):
self._fit_model_pca(PCA(n_components="mle", whiten=True))
# PCA n_componenets mle and solver full
@unittest.skipIf(
LooseVersion(sklearn.__version__) < LooseVersion("0.23.2"),
reason="With Sklearn version < 0.23.2 returns ValueError: math domain error (https://github.com/scikit-learn/scikit-learn/issues/4441)",
)
def test_pca_converter_mle_full(self):
self._fit_model_pca(PCA(n_components="mle", svd_solver="full"))
# PCA n_componenets none and solver arpack
def test_pca_converter_none_arpack(self):
self._fit_model_pca(PCA(n_components=None, svd_solver="arpack"))
# PCA n_componenets none and solver randomized
def test_pca_converter_none_randomized(self):
self._fit_model_pca(PCA(n_components=None, svd_solver="randomized"))
# KernelPCA linear kernel
def test_kernel_pca_converter_linear(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="linear"))
# KernelPCA linear kernel with inverse transform
def test_kernel_pca_converter_linear_fit_inverse_transform(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="linear", fit_inverse_transform=True))
# KernelPCA poly kernel
def test_kernel_pca_converter_poly(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="poly", degree=2))
# KernelPCA poly kernel coef0
def test_kernel_pca_converter_poly_coef0(self):
self._fit_model_pca(KernelPCA(n_components=10, kernel="poly", degree=3, coef0=10))
# KernelPCA poly kernel with inverse transform
def test_kernel_pca_converter_poly_fit_inverse_transform(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="poly", degree=3, fit_inverse_transform=True))
# KernelPCA poly kernel
def test_kernel_pca_converter_rbf(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="rbf"))
# KernelPCA sigmoid kernel
def test_kernel_pca_converter_sigmoid(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="sigmoid"))
# KernelPCA cosine kernel
def test_kernel_pca_converter_cosine(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="cosine"))
# KernelPCA precomputed kernel
def test_kernel_pca_converter_precomputed(self):
self._fit_model_pca(KernelPCA(n_components=5, kernel="precomputed"), precompute=True)
# TODO: Fails on macos-latest Python 3.8 due to a sklearn bug.
# FastICA converter with n_components none
# def test_fast_ica_converter_none(self):
# self._fit_model_pca(FastICA(n_components=None))
# FastICA converter with n_components 3
def test_fast_ica_converter_3(self):
self._fit_model_pca(FastICA(n_components=3))
# FastICA converter with n_components 3 whiten
def test_fast_ica_converter_3_whiten(self):
self._fit_model_pca(FastICA(n_components=3, whiten=True))
# FastICA converter with n_components 3 deflation algorithm
def test_fast_ica_converter_3_deflation(self):
self._fit_model_pca(FastICA(n_components=3, algorithm="deflation"))
# FastICA converter with n_components 3 fun exp
def test_fast_ica_converter_3_exp(self):
self._fit_model_pca(FastICA(n_components=3, fun="exp"))
# FastICA converter with n_components 3 fun cube
def test_fast_ica_converter_3_cube(self):
self._fit_model_pca(FastICA(n_components=3, fun="cube"))
# FastICA converter with n_components 3 fun custom
def test_fast_ica_converter_3_custom(self):
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
self._fit_model_pca(FastICA(n_components=3, fun=my_g))
# TruncatedSVD converter with n_components 3
def test_truncated_svd_converter_3(self):
self._fit_model_pca(TruncatedSVD(n_components=3))
# TruncatedSVD converter with n_components 3 algorithm arpack
def test_truncated_svd_converter_3_arpack(self):
self._fit_model_pca(TruncatedSVD(n_components=3, algorithm="arpack"))
if __name__ == "__main__":
unittest.main()
| mit |
mkrupcale/ansible | lib/ansible/modules/cloud/amazon/ec2_asg.py | 13 | 38185 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
- register or deregister the instance
required: false
choices: ['present', 'absent']
default: present
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group
required: false
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
required: false
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: true
min_size:
description:
- Minimum number of instances in group, if unspecified then the current group value will be used.
required: false
max_size:
description:
- Maximum number of instances in group, if unspecified then the current group value will be used.
required: false
placement_group:
description:
- Physical location of your cluster placement group created in Amazon EC2.
required: false
version_added: "2.3"
default: None
desired_capacity:
description:
- Desired number of instances in group, if unspecified then the current group value will be used.
required: false
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
required: false
version_added: "1.8"
default: False
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
required: false
version_added: "1.8"
default: None
lc_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not already have the current launch_config.
required: false
version_added: "1.8"
default: True
vpc_zone_identifier:
description:
- List of VPC subnets to use
required: false
default: None
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
required: false
default: None
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
default_cooldown:
description:
- The number of seconds after a scaling activity completes before another can begin.
required: false
default: 300 seconds
version_added: "2.0"
wait_timeout:
description:
- how long before wait instances to become viable when replaced. Used in conjunction with instance_ids option.
default: 300
version_added: "1.8"
wait_for_instances:
description:
- Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: yes
required: False
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the current termination policies are maintained.
required: false
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
notification_topic:
description:
- A SNS topic ARN to send auto scaling notifications to.
default: None
required: false
version_added: "2.2"
notification_types:
description:
- A list of auto scaling events to trigger notifications on.
default: ['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR']
required: false
version_added: "2.2"
suspend_processes:
description:
- A list of scaling processes to suspend.
required: False
default: []
choices: ['Launch', 'Terminate', 'HealthCheck', 'ReplaceUnhealthy', 'AZRebalance', 'AlarmNotification', 'ScheduledActions', 'AddToLoadBalancer']
version_added: "2.3"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
# Basic configuration
- ec2_asg:
name: special
load_balancers: [ 'lb1', 'lb2' ]
availability_zones: [ 'eu-west-1a', 'eu-west-1b' ]
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ]
tags:
- environment: production
propagate_at_launch: no
# Rolling ASG Updates
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
a rolling fashion with instances using the current launch configuration, "my_new_lc".
This could also be considered a rolling deploy of a pre-baked AMI.
If this is a newly created group, the instances will not be replaced since all instances
will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
To only replace a couple of instances instead of all of them, supply a list
to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
'''
import time
import logging as log
import traceback
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
log.getLogger('boto').setLevel(log.CRITICAL)
#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
'health_check_period', 'health_check_type', 'launch_config_name',
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
'termination_policies', 'vpc_zone_identifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
def enforce_required_arguments(module):
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group):
properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES)
# Ugly hack to make this JSON-serializable. We take a list of boto Tag
# objects and replace them with a dict-representation. Needed because the
# tags are included in ansible's return value (which is jsonified)
if 'tags' in properties and isinstance(properties['tags'], list):
serializable_tags = {}
for tag in properties['tags']:
serializable_tags[tag.key] = [tag.value, tag.propagate_at_launch]
properties['tags'] = serializable_tags
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
instance_facts = {}
if autoscaling_group.instances:
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
for i in autoscaling_group.instances:
instance_facts[i.instance_id] = {'health_status': i.health_status,
'lifecycle_state': i.lifecycle_state,
'launch_config_name': i.launch_config_name }
if i.health_status == 'Healthy' and i.lifecycle_state == 'InService':
properties['viable_instances'] += 1
if i.health_status == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i.lifecycle_state == 'InService':
properties['in_service_instances'] += 1
if i.lifecycle_state == 'Terminating':
properties['terminating_instances'] += 1
if i.lifecycle_state == 'Pending':
properties['pending_instances'] += 1
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.load_balancers
if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
as_group = asg_connection.get_all_groups(names=[group_name])[0]
wait_timeout = module.params.get('wait_timeout')
props = get_properties(as_group)
count = 1
if as_group.load_balancers and as_group.health_check_type == 'ELB':
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
else:
return
for lb in as_group.load_balancers:
elb_connection.deregister_instances(lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group.load_balancers:
lb_instances = elb_connection.describe_instance_health(lb)
for i in lb_instances:
if i.instance_id == instance_id and i.state == "InService":
count += 1
log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, module, group_name):
healthy_instances = set()
as_group = asg_connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(instance)
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ELB instance status:")
for lb in as_group.load_balancers:
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidInstance':
return None
module.fail_json(msg=str(e))
for i in lb_instances:
if i.state == "InService":
healthy_instances.add(i.instance_id)
log.debug("{0}: {1}".format(i.instance_id, i.state))
return len(healthy_instances)
def wait_for_elb(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
wait_timeout = module.params.get('wait_timeout')
# if the health_check_type is ELB, we want to query the ELBs directly for instance
# status as to avoid health_check_grace period that is awarded to ASG instances
as_group = asg_connection.get_all_groups(names=[group_name])[0]
if as_group.load_balancers and as_group.health_check_type == 'ELB':
log.debug("Waiting for ELB to consider instances healthy.")
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
wait_timeout = time.time() + wait_timeout
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
while healthy_instances < as_group.min_size and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
def suspend_processes(as_group, module):
suspend_processes = set(module.params.get('suspend_processes'))
try:
suspended_processes = set([p.process_name for p in as_group.suspended_processes])
except AttributeError:
# New ASG being created, no suspended_processes defined yet
suspended_processes = set()
if suspend_processes == suspended_processes:
return False
resume_processes = list(suspended_processes - suspend_processes)
if resume_processes:
as_group.resume_processes(resume_processes)
if suspend_processes:
as_group.suspend_processes(list(suspend_processes))
return True
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
placement_group = module.params.get('placement_group')
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
default_cooldown = module.params.get('default_cooldown')
wait_for_instances = module.params.get('wait_for_instances')
as_groups = connection.get_all_groups(names=[group_name])
wait_timeout = module.params.get('wait_timeout')
termination_policies = module.params.get('termination_policies')
notification_topic = module.params.get('notification_topic')
notification_types = module.params.get('notification_types')
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
asg_tags = []
for tag in set_tags:
for k,v in tag.iteritems():
if k !='propagate_at_launch':
asg_tags.append(Tag(key=k,
value=v,
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
resource_id=group_name))
if not as_groups:
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()]
enforce_required_arguments(module)
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
if len(launch_configs) == 0:
module.fail_json(msg="No launch config found with name %s" % launch_config_name)
ag = AutoScalingGroup(
group_name=group_name,
load_balancers=load_balancers,
availability_zones=availability_zones,
launch_config=launch_configs[0],
min_size=min_size,
max_size=max_size,
placement_group=placement_group,
desired_capacity=desired_capacity,
vpc_zone_identifier=vpc_zone_identifier,
connection=connection,
tags=asg_tags,
health_check_period=health_check_period,
health_check_type=health_check_type,
default_cooldown=default_cooldown,
termination_policies=termination_policies)
try:
connection.create_auto_scaling_group(ag)
suspend_processes(ag, module)
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
if notification_topic:
ag.put_notification_configuration(notification_topic, notification_types)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
changed = True
return(changed, asg_properties)
except BotoServerError as e:
module.fail_json(msg="Failed to create Autoscaling Group: %s" % str(e), exception=traceback.format_exc(e))
else:
as_group = as_groups[0]
changed = False
if suspend_processes(as_group, module):
changed = True
for attr in ASG_ATTRIBUTES:
if module.params.get(attr, None) is not None:
module_attr = module.params.get(attr)
if attr == 'vpc_zone_identifier':
module_attr = ','.join(module_attr)
group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list
# sorted differently
if attr != 'termination_policies':
try:
module_attr.sort()
except:
pass
try:
group_attr.sort()
except:
pass
if group_attr != module_attr:
changed = True
setattr(as_group, attr, module_attr)
if len(set_tags) > 0:
have_tags = {}
want_tags = {}
for tag in asg_tags:
want_tags[tag.key] = [tag.value, tag.propagate_at_launch]
dead_tags = []
for tag in as_group.tags:
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
if tag.key not in want_tags:
changed = True
dead_tags.append(tag)
if dead_tags != []:
connection.delete_tags(dead_tags)
if have_tags != want_tags:
changed = True
connection.create_or_update_tags(asg_tags)
# handle loadbalancers separately because None != []
load_balancers = module.params.get('load_balancers') or []
if load_balancers and as_group.load_balancers != load_balancers:
changed = True
as_group.load_balancers = module.params.get('load_balancers')
if changed:
try:
as_group.update()
except BotoServerError as e:
module.fail_json(msg="Failed to update Autoscaling Group: %s" % str(e), exception=traceback.format_exc(e))
if notification_topic:
try:
as_group.put_notification_configuration(notification_topic, notification_types)
except BotoServerError as e:
module.fail_json(msg="Failed to update Autoscaling Group notifications: %s" % str(e), exception=traceback.format_exc(e))
if wait_for_instances:
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
try:
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
except BotoServerError as e:
module.fail_json(msg="Failed to read existing Autoscaling Groups: %s" % str(e), exception=traceback.format_exc(e))
return(changed, asg_properties)
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
notification_topic = module.params.get('notification_topic')
if notification_topic:
ag.delete_notification_configuration(notification_topic)
groups = connection.get_all_groups(names=[group_name])
if groups:
group = groups[0]
group.max_size = 0
group.min_size = 0
group.desired_capacity = 0
group.update()
instances = True
while instances:
tmp_groups = connection.get_all_groups(names=[group_name])
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
instances = False
time.sleep(10)
group.delete()
while len(connection.get_all_groups(names=[group_name])):
time.sleep(5)
changed=True
return changed
else:
changed=False
return changed
def get_chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def update_size(group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size ))
group.max_size = max_size
group.min_size = min_size
group.desired_capacity = dc
group.update()
def replace(connection, module):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
lc_check = module.params.get('lc_check')
replace_instances = module.params.get('replace_instances')
as_group = connection.get_all_groups(names=[group_name])[0]
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group.min_size
if max_size is None:
max_size = as_group.max_size
if desired_capacity is None:
desired_capacity = as_group.desired_capacity
# check to see if instances are replaceable if checking launch configs
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
num_new_inst_needed = desired_capacity - len(new_instances)
if lc_check:
if num_new_inst_needed == 0 and old_instances:
log.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, module, old_instances, instances, True)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
# set temporary settings and wait for them to be reached
# This should get overwritten if the number of instances left is less than the batch size.
as_group = connection.get_all_groups(names=[group_name])[0]
update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
log.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
wait_for_term_inst(connection, module, term_instances)
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
if break_early:
log.debug("breaking loop")
break
update_size(as_group, max_size, min_size, desired_capacity)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
log.debug("Rolling update complete.")
changed=True
return(changed, asg_properties)
def get_instances_by_lc(props, lc_check, initial_instances):
new_instances = []
old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props['instances']:
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if lc_check:
for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
instances_to_terminate.append(i)
else:
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
desired_size = as_group.min_size
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: {0}".format(num_new_inst_needed))
log.debug("new instances: {0}".format(new_instances))
log.debug("old instances: {0}".format(old_instances))
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group.min_size != min_size:
as_group.min_size = min_size
as_group.update()
log.debug("Updating minimum size back to original of {0}".format(min_size))
#if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
log.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed !=0 :
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("{0} new instances needed".format(num_new_inst_needed))
log.debug("decrementing capacity: {0}".format(decrement_capacity))
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, module, term_instances):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
log.debug("waiting for instances to terminate")
count = 0
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = ( i for i in instance_facts if i in term_instances)
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
if lifecycle == 'Terminating' or health == 'Unhealthy':
count += 1
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
time.sleep(10)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached {0}: {1}".format(prop, desired_size))
return props
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
placement_group=dict(type='str'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='list'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
default_cooldown=dict(type='int', default=300),
wait_for_instances=dict(type='bool', default=True),
termination_policies=dict(type='list', default='Default'),
notification_topic=dict(type='str', default=None),
notification_types=dict(type='list', default=[
'autoscaling:EC2_INSTANCE_LAUNCH',
'autoscaling:EC2_INSTANCE_LAUNCH_ERROR',
'autoscaling:EC2_INSTANCE_TERMINATE',
'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
]),
suspend_processes=dict(type='list', default=[])
),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
changed = create_changed = replace_changed = False
if state == 'present':
create_changed, asg_properties=create_autoscaling_group(connection, module)
elif state == 'absent':
changed = delete_autoscaling_group(connection, module)
module.exit_json( changed = changed )
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
if create_changed or replace_changed:
changed = True
module.exit_json( changed = changed, **asg_properties )
if __name__ == '__main__':
main()
| gpl-3.0 |
bogdanvuk/sydpy | sydpy/types/__init__.py | 1 | 1304 | # This file is part of sydpy.
#
# Copyright (C) 2014-2015 Bogdan Vukobratovic
#
# sydpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# sydpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with sydpy. If not, see
# <http://www.gnu.org/licenses/>.
from ._type_base import conv, convgen, ConversionError
from .bit import bit, bit8, bit16, bit32, bit64, Bit
from .array import Array, array
from .vector import vector, Vector
from .struct import struct, Struct
from .enum import Enum
__all__ = ["conv",
"convgen",
"bit",
"bit8",
"bit16",
"bit32",
"bit64",
"Bit",
"array",
"Array",
"vector",
"Vector",
"struct",
"Struct",
"enum",
"Enum"
]
| lgpl-2.1 |
kubeflow/kfserving | python/kfserving/test/test_v1beta1_transformer_config.py | 1 | 2113 | # Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kfserving
from kfserving.models.v1beta1_transformer_config import V1beta1TransformerConfig # noqa: E501
from kfserving.rest import ApiException
class TestV1beta1TransformerConfig(unittest.TestCase):
"""V1beta1TransformerConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1beta1TransformerConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kfserving.models.v1beta1_transformer_config.V1beta1TransformerConfig() # noqa: E501
if include_optional :
return V1beta1TransformerConfig(
default_image_version = '0',
image = '0'
)
else :
return V1beta1TransformerConfig(
default_image_version = '0',
image = '0',
)
def testV1beta1TransformerConfig(self):
"""Test V1beta1TransformerConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
zenodo/invenio | invenio/legacy/registry.py | 18 | 2353 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os
import inspect
from flask import current_app
from flask_registry import RegistryProxy, ImportPathRegistry, \
ModuleAutoDiscoveryRegistry
from invenio.ext.registry import ModuleAutoDiscoverySubRegistry
from invenio.utils.datastructures import LazyDict
legacy_modules = RegistryProxy('legacy', ImportPathRegistry,
initial=['invenio.legacy.*'])
webadmin_proxy = RegistryProxy('legacy.webadmin', \
ModuleAutoDiscoverySubRegistry, 'web.admin',
registry_namespace=legacy_modules)
def _admin_handler_name(name):
parts = name.split('.')
return '%s/%s' % (parts[2], parts[5])
webadmin = LazyDict(lambda: dict((_admin_handler_name(module.__name__), module)
for module in webadmin_proxy))
webinterface_proxy = RegistryProxy(
'legacy.webinterface', ModuleAutoDiscoveryRegistry, 'webinterface',
registry_namespace=legacy_modules)
def _webinterface(module):
from invenio.ext.legacy.handler import WebInterfaceDirectory
parts = module.__name__.split('.')
for value in dir(module):
webinterface = getattr(module, value)
if inspect.isclass(webinterface) and \
issubclass(webinterface, WebInterfaceDirectory) and \
webinterface.__module__ == module.__name__:
yield webinterface.__name__, webinterface
def _webinterfaces(modules):
for module in modules:
for value in _webinterface(module):
yield value
webinterfaces = LazyDict(lambda: dict(_webinterfaces(webinterface_proxy)))
| gpl-2.0 |
openmb/openblackhole-enigma2 | lib/python/Plugins/SystemPlugins/SoftwareManager/SoftwareTools.py | 47 | 9344 | # -*- coding: iso-8859-1 -*-
from enigma import eConsoleAppContainer
from Components.Console import Console
from Components.About import about
from Components.PackageInfo import PackageInfoHandler
from Components.Language import language
from Components.Sources.List import List
from Components.Ipkg import IpkgComponent
from Components.Network import iNetwork
from Tools.Directories import pathExists, fileExists, resolveFilename, SCOPE_METADIR
from Tools.HardwareInfo import HardwareInfo
from time import time
class SoftwareTools(PackageInfoHandler):
lastDownloadDate = None
NetworkConnectionAvailable = None
list_updating = False
available_updates = 0
available_updatelist = []
available_packetlist = []
installed_packetlist = {}
def __init__(self):
aboutInfo = about.getImageVersionString()
if aboutInfo.startswith("dev-"):
self.ImageVersion = 'Experimental'
else:
self.ImageVersion = 'Stable'
self.language = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
PackageInfoHandler.__init__(self, self.statusCallback, blocking = False, neededTag = 'ALL_TAGS', neededFlag = self.ImageVersion)
self.directory = resolveFilename(SCOPE_METADIR)
self.list = List([])
self.NotifierCallback = None
self.Console = Console()
self.UpdateConsole = Console()
self.cmdList = []
self.unwanted_extensions = ('-dbg', '-dev', '-doc', '-staticdev', '-src')
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
def statusCallback(self, status, progress):
pass
def startSoftwareTools(self, callback = None):
if callback is not None:
self.NotifierCallback = callback
iNetwork.checkNetworkState(self.checkNetworkCB)
def checkNetworkCB(self,data):
if data is not None:
if data <= 2:
self.NetworkConnectionAvailable = True
self.getUpdates()
else:
self.NetworkConnectionAvailable = False
self.getUpdates()
def getUpdates(self, callback = None):
if self.lastDownloadDate is None:
if self.NetworkConnectionAvailable == True:
self.lastDownloadDate = time()
if self.list_updating is False and callback is None:
self.list_updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is False and callback is not None:
self.list_updating = True
self.NotifierCallback = callback
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is True and callback is not None:
self.NotifierCallback = callback
else:
self.list_updating = False
if callback is not None:
callback(False)
elif self.NotifierCallback is not None:
self.NotifierCallback(False)
else:
if self.NetworkConnectionAvailable == True:
self.lastDownloadDate = time()
if self.list_updating is False and callback is None:
self.list_updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is False and callback is not None:
self.list_updating = True
self.NotifierCallback = callback
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is True and callback is not None:
self.NotifierCallback = callback
else:
if self.list_updating and callback is not None:
self.NotifierCallback = callback
self.startIpkgListAvailable()
else:
self.list_updating = False
if callback is not None:
callback(False)
elif self.NotifierCallback is not None:
self.NotifierCallback(False)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_ERROR:
self.list_updating = False
if self.NotifierCallback is not None:
self.NotifierCallback(False)
elif event == IpkgComponent.EVENT_DONE:
if self.list_updating:
self.startIpkgListAvailable()
pass
def startIpkgListAvailable(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " list"
self.UpdateConsole.ePopen(cmd, self.IpkgListAvailableCB, callback)
def IpkgListAvailableCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
if self.list_updating:
self.available_packetlist = []
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
descr = l > 2 and tokens[2].strip() or ""
self.available_packetlist.append([name, version, descr])
if callback is None:
self.startInstallMetaPackage()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def startInstallMetaPackage(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if self.NetworkConnectionAvailable == True:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " install enigma2-meta enigma2-plugins-meta enigma2-skins-meta"
self.UpdateConsole.ePopen(cmd, self.InstallMetaPackageCB, callback)
else:
self.InstallMetaPackageCB(True)
def InstallMetaPackageCB(self, result, retval = None, extra_args = None):
(callback) = extra_args
if result:
self.fillPackagesIndexList()
if callback is None:
self.startIpkgListInstalled()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def startIpkgListInstalled(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " list_installed"
self.UpdateConsole.ePopen(cmd, self.IpkgListInstalledCB, callback)
def IpkgListInstalledCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
self.installed_packetlist = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
self.installed_packetlist[name] = version
for package in self.packagesIndexlist[:]:
if not self.verifyPrerequisites(package[0]["prerequisites"]):
self.packagesIndexlist.remove(package)
for package in self.packagesIndexlist[:]:
attributes = package[0]["attributes"]
if attributes.has_key("packagetype"):
if attributes["packagetype"] == "internal":
self.packagesIndexlist.remove(package)
if callback is None:
self.countUpdates()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def countUpdates(self, callback = None):
self.available_updates = 0
self.available_updatelist = []
for package in self.packagesIndexlist[:]:
attributes = package[0]["attributes"]
packagename = attributes["packagename"]
for x in self.available_packetlist:
if x[0] == packagename:
if self.installed_packetlist.has_key(packagename):
if self.installed_packetlist[packagename] != x[1]:
self.available_updates +=1
self.available_updatelist.append([packagename])
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(True)
callback = None
elif self.NotifierCallback is not None:
self.NotifierCallback(True)
self.NotifierCallback = None
def startIpkgUpdate(self, callback = None):
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " update"
self.Console.ePopen(cmd, self.IpkgUpdateCB, callback)
def IpkgUpdateCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
if self.Console:
if len(self.Console.appContainers) == 0:
if callback is not None:
callback(True)
callback = None
def cleanupSoftwareTools(self):
self.list_updating = False
if self.NotifierCallback is not None:
self.NotifierCallback = None
self.ipkg.stop()
if self.Console is not None:
if len(self.Console.appContainers):
for name in self.Console.appContainers.keys():
self.Console.kill(name)
if self.UpdateConsole is not None:
if len(self.UpdateConsole.appContainers):
for name in self.UpdateConsole.appContainers.keys():
self.UpdateConsole.kill(name)
def verifyPrerequisites(self, prerequisites):
if prerequisites.has_key("hardware"):
hardware_found = False
for hardware in prerequisites["hardware"]:
if hardware == HardwareInfo().device_name:
hardware_found = True
if not hardware_found:
return False
return True
iSoftwareTools = SoftwareTools()
| gpl-2.0 |
sassman/ansible-modules-core | cloud/amazon/ec2_snapshot.py | 6 | 5398 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: creates a snapshot from an existing volume
description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
volume_id:
description:
- volume from which to take the snapshot
required: false
description:
description:
- description to be applied to the snapshot
required: false
instance_id:
description:
- instance that has the required volume to snapshot mounted
required: false
device_name:
description:
- device name of a mounted volume to be snapshotted
required: false
snapshot_tags:
description:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
choices: ['yes', 'no']
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
author: Will Thames
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- local_action:
module: ec2_snapshot
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- local_action:
module: ec2_snapshot
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
'''
import sys
import time
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id = dict(),
description = dict(),
instance_id = dict(),
device_name = dict(),
wait = dict(type='bool', default='true'),
wait_timeout = dict(default=0),
snapshot_tags = dict(type='dict', default=dict()),
)
)
module = AnsibleModule(argument_spec=argument_spec)
volume_id = module.params.get('volume_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
snapshot_tags = module.params.get('snapshot_tags')
if not volume_id and not instance_id or volume_id and instance_id:
module.fail_json('One and only one of volume_id or instance_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json('Instance ID and device name must both be specified')
ec2 = ec2_connect(module)
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
try:
snapshot = ec2.create_snapshot(volume_id, description=description)
time_waited = 0
if wait:
snapshot.update()
while snapshot.status != 'completed':
time.sleep(3)
snapshot.update()
time_waited += 3
if wait_timeout and time_waited > wait_timeout:
module.fail_json('Timed out while creating snapshot.')
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=True, snapshot_id=snapshot.id, volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size, tags=snapshot.tags.copy())
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
dcolligan/server | setup.py | 4 | 2617 | # Don't import __future__ packages here; they make setup fail
# First, we try to use setuptools. If it's not available locally,
# we fall back on ez_setup.
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
with open("README.pypi.rst") as readmeFile:
long_description = readmeFile.read()
install_requires = []
with open("requirements.txt") as requirementsFile:
for line in requirementsFile:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
if line.find('-c constraints.txt') == -1:
pinnedVersion = line.split()[0]
install_requires.append(pinnedVersion)
dependency_links = []
try:
with open("constraints.txt") as constraintsFile:
for line in constraintsFile:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
dependency_links.append(line)
except EnvironmentError:
print('No constraints file found, proceeding without '
'creating dependency links.')
setup(
name="ga4gh-server",
description="A reference implementation of the GA4GH API",
packages=["ga4gh", "ga4gh.server", "ga4gh.server.datamodel",
"ga4gh.server.templates"],
namespace_packages=["ga4gh"],
zip_safe=False,
url="https://github.com/ga4gh/ga4gh-server",
use_scm_version={"write_to": "ga4gh/server/_version.py"},
entry_points={
'console_scripts': [
'ga4gh_configtest=ga4gh.server.cli.configtest:configtest_main',
'ga4gh_server=ga4gh.server.cli.server:server_main',
'ga4gh_repo=ga4gh.server.cli.repomanager:repo_main',
]
},
long_description=long_description,
install_requires=install_requires,
dependency_links=dependency_links,
license='Apache License 2.0',
include_package_data=True,
author="Global Alliance for Genomics and Health",
author_email="[email protected]",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
keywords=['genomics', 'reference'],
# Use setuptools_scm to set the version number automatically from Git
setup_requires=['setuptools_scm'],
)
| apache-2.0 |
saullocastro/compmech | doc/pyplots/theory/fem/fsdt_donnell_kquad4.py | 3 | 1473 | from matplotlib.pyplot import *
from math import sqrt
m = 1/3.
xs = [+1, +1, -1, -1]
ys = [-1, +1, -1, +1]
figure(figsize=(4, 4))
ax = gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_position(('data', 0))
ax.spines['bottom'].set_position(('data', 0))
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.set_aspect('equal')
ax.set_xlim(-1.4, +1.6)
ax.set_ylim(-1.4, +1.6)
ax.text(1.8, 0., r'$\xi$', transform=ax.transData, va='center')
ax.text(0., 1.8, r'$\eta$', rotation='horizontal', transform=ax.transData,
ha='center')
ax.text(+1.1, +1.1, '$n_1$\n' + r'$(+1, +1)$', ha='center', va='bottom',
fontsize=10)
ax.text(-1.1, +1.1, '$n_2$\n' + r'$(-1, +1)$', ha='center', va='bottom',
fontsize=10)
ax.text(-1.1, -1.1, '$n_3$\n' + r'$(-1, -1)$', ha='center', va='top' ,
fontsize=10)
ax.text(+1.1, -1.1, '$n_4$\n' + r'$(+1, -1)$', ha='center', va='top' ,
fontsize=10)
# radius
ax.annotate('$r_1$', xy=(-1, 0.5), xytext=(-0.5, 0.2),
arrowprops=dict(arrowstyle='->'), va='center', ha='center')
ax.annotate('$r_2$', xy=(+1, 0.5), xytext=(+0.5, 0.2),
arrowprops=dict(arrowstyle='->'), va='center', ha='center')
ax.set_xticks([])
ax.set_yticks([])
#ax.set_xticklabels(['-1', '+1'])
#ax.set_yticklabels(['-1', '+1'])
plot([1, -1, -1, 1, 1], [1, 1, -1, -1, 1], '-k')
plot(xs, ys, 'ok', mfc='k')
tight_layout()
savefig('test.png')
#show()
| bsd-3-clause |
timduru/platform-external-chromium_org | tools/site_compare/site_compare.py | 179 | 6504 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SiteCompare component to handle bulk scrapes.
Invokes a list of browsers and sends them to a list of URLs,
saving the rendered results to a specified directory, then
performs comparison operations on the resulting bitmaps and
saves the results
"""
# This line is necessary to work around a QEMU bug
import _imaging
import os # Functions for walking the directory tree
import types # Runtime type-checking
import command_line # command-line parsing
import drivers # Functions for driving keyboard/mouse/windows, OS-specific
import operators # Functions that, given two bitmaps as input, produce
# output depending on the performance of an operation
import scrapers # Functions that know how to capture a render from
# particular browsers
import commands.compare2 # compare one page in two versions of same browser
import commands.maskmaker # generate a mask based on repeated scrapes
import commands.measure # measure length of time a page takes to load
import commands.scrape # scrape a URL or series of URLs to a bitmap
# The timeload command is obsolete (too flaky); it may be reinstated
# later but for now it's been superceded by "measure"
# import commands.timeload # measure length of time a page takes to load
def Scrape(browsers, urls, window_size=(1024, 768),
window_pos=(0, 0), timeout=20, save_path=None, **kwargs):
"""Invoke one or more browsers over one or more URLs, scraping renders.
Args:
browsers: browsers to invoke with optional version strings
urls: URLs to visit
window_size: size of the browser window to display
window_pos: location of browser window
timeout: time (in seconds) to wait for page to load
save_path: root of save path, automatically appended with browser and
version
kwargs: miscellaneous keyword args, passed to scraper
Returns:
None
@TODO(jhaas): more parameters, or perhaps an indefinite dictionary
parameter, for things like length of time to wait for timeout, speed
of mouse clicks, etc. Possibly on a per-browser, per-URL, or
per-browser-per-URL basis
"""
if type(browsers) in types.StringTypes: browsers = [browsers]
if save_path is None:
# default save path is "scrapes" off the current root
save_path = os.path.join(os.path.split(__file__)[0], "Scrapes")
for browser in browsers:
# Browsers should be tuples of (browser, version)
if type(browser) in types.StringTypes: browser = (browser, None)
scraper = scrapers.GetScraper(browser)
full_path = os.path.join(save_path, browser[0], scraper.version)
drivers.windowing.PreparePath(full_path)
scraper.Scrape(urls, full_path, window_size, window_pos, timeout, kwargs)
def Compare(base, compare, ops, root_path=None, out_path=None):
"""Compares a series of scrapes using a series of operators.
Args:
base: (browser, version) tuple of version to consider the baseline
compare: (browser, version) tuple of version to compare to
ops: list of operators plus operator arguments
root_path: root of the scrapes
out_path: place to put any output from the operators
Returns:
None
@TODO(jhaas): this method will likely change, to provide a robust and
well-defined way of chaining operators, applying operators conditionally,
and full-featured scripting of the operator chain. There also needs
to be better definition of the output; right now it's to stdout and
a log.txt file, with operator-dependent images saved for error output
"""
if root_path is None:
# default save path is "scrapes" off the current root
root_path = os.path.join(os.path.split(__file__)[0], "Scrapes")
if out_path is None:
out_path = os.path.join(os.path.split(__file__)[0], "Compares")
if type(base) in types.StringTypes: base = (base, None)
if type(compare) in types.StringTypes: compare = (compare, None)
if type(ops) in types.StringTypes: ops = [ops]
base_dir = os.path.join(root_path, base[0])
compare_dir = os.path.join(root_path, compare[0])
if base[1] is None:
# base defaults to earliest capture
base = (base[0], max(os.listdir(base_dir)))
if compare[1] is None:
# compare defaults to latest capture
compare = (compare[0], min(os.listdir(compare_dir)))
out_path = os.path.join(out_path, base[0], base[1], compare[0], compare[1])
drivers.windowing.PreparePath(out_path)
# TODO(jhaas): right now we're just dumping output to a log file
# (and the console), which works as far as it goes but isn't nearly
# robust enough. Change this after deciding exactly what we want to
# change it to.
out_file = open(os.path.join(out_path, "log.txt"), "w")
description_string = ("Comparing %s %s to %s %s" %
(base[0], base[1], compare[0], compare[1]))
out_file.write(description_string)
print description_string
base_dir = os.path.join(base_dir, base[1])
compare_dir = os.path.join(compare_dir, compare[1])
for filename in os.listdir(base_dir):
out_file.write("%s: " % filename)
if not os.path.isfile(os.path.join(compare_dir, filename)):
out_file.write("Does not exist in target directory\n")
print "File %s does not exist in target directory" % filename
continue
base_filename = os.path.join(base_dir, filename)
compare_filename = os.path.join(compare_dir, filename)
for op in ops:
if type(op) in types.StringTypes: op = (op, None)
module = operators.GetOperator(op[0])
ret = module.Compare(base_filename, compare_filename)
if ret is None:
print "%s: OK" % (filename,)
out_file.write("OK\n")
else:
print "%s: %s" % (filename, ret[0])
out_file.write("%s\n" % (ret[0]))
ret[1].save(os.path.join(out_path, filename))
out_file.close()
def main():
"""Main executable. Parse the command line and invoke the command."""
cmdline = command_line.CommandLine()
# The below two commands are currently unstable so have been disabled
# commands.compare2.CreateCommand(cmdline)
# commands.maskmaker.CreateCommand(cmdline)
commands.measure.CreateCommand(cmdline)
commands.scrape.CreateCommand(cmdline)
cmdline.ParseCommandLine()
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
SummerLW/Perf-Insight-Report | third_party/gsutil/third_party/apitools/apitools/base/py/batch_test.py | 11 | 19527 | """Tests for google3.cloud.bigscience.apitools.base.py.batch."""
import textwrap
import mock
from six.moves import http_client
from six.moves.urllib import parse
import unittest2
from apitools.base.py import batch
from apitools.base.py import exceptions
from apitools.base.py import http_wrapper
class FakeCredentials(object):
def __init__(self):
self.num_refreshes = 0
def refresh(self, _):
self.num_refreshes += 1
class FakeHttp(object):
class FakeRequest(object):
def __init__(self, credentials=None):
if credentials is not None:
self.credentials = credentials
def __init__(self, credentials=None):
self.request = FakeHttp.FakeRequest(credentials=credentials)
class FakeService(object):
"""A service for testing."""
def GetMethodConfig(self, _):
return {}
def GetUploadConfig(self, _):
return {}
# pylint: disable=unused-argument
def PrepareHttpRequest(
self, method_config, request, global_params, upload_config):
return global_params['desired_request']
# pylint: enable=unused-argument
def ProcessHttpResponse(self, _, http_response):
return http_response
class BatchTest(unittest2.TestCase):
def assertUrlEqual(self, expected_url, provided_url):
def parse_components(url):
parsed = parse.urlsplit(url)
query = parse.parse_qs(parsed.query)
return parsed._replace(query=''), query
expected_parse, expected_query = parse_components(expected_url)
provided_parse, provided_query = parse_components(provided_url)
self.assertEqual(expected_parse, provided_parse)
self.assertEqual(expected_query, provided_query)
def __ConfigureMock(self, mock_request, expected_request, response):
if isinstance(response, list):
response = list(response)
def CheckRequest(_, request, **unused_kwds):
self.assertUrlEqual(expected_request.url, request.url)
self.assertEqual(expected_request.http_method, request.http_method)
if isinstance(response, list):
return response.pop(0)
else:
return response
mock_request.side_effect = CheckRequest
def testRequestServiceUnavailable(self):
mock_service = FakeService()
desired_url = 'https://www.example.com'
batch_api_request = batch.BatchApiRequest(batch_url=desired_url,
retryable_codes=[])
# The request to be added. The actual request sent will be somewhat
# larger, as this is added to a batch.
desired_request = http_wrapper.Request(desired_url, 'POST', {
'content-type': 'multipart/mixed; boundary="None"',
'content-length': 80,
}, 'x' * 80)
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as mock_request:
self.__ConfigureMock(
mock_request,
http_wrapper.Request(desired_url, 'POST', {
'content-type': 'multipart/mixed; boundary="None"',
'content-length': 419,
}, 'x' * 419),
http_wrapper.Response({
'status': '200',
'content-type': 'multipart/mixed; boundary="boundary"',
}, textwrap.dedent("""\
--boundary
content-type: text/plain
content-id: <id+0>
HTTP/1.1 503 SERVICE UNAVAILABLE
nope
--boundary--"""), None))
batch_api_request.Add(
mock_service, 'unused', None,
global_params={'desired_request': desired_request})
api_request_responses = batch_api_request.Execute(
FakeHttp(), sleep_between_polls=0)
self.assertEqual(1, len(api_request_responses))
# Make sure we didn't retry non-retryable code 503.
self.assertEqual(1, mock_request.call_count)
self.assertTrue(api_request_responses[0].is_error)
self.assertIsNone(api_request_responses[0].response)
self.assertIsInstance(api_request_responses[0].exception,
exceptions.HttpError)
def testSingleRequestInBatch(self):
mock_service = FakeService()
desired_url = 'https://www.example.com'
batch_api_request = batch.BatchApiRequest(batch_url=desired_url)
# The request to be added. The actual request sent will be somewhat
# larger, as this is added to a batch.
desired_request = http_wrapper.Request(desired_url, 'POST', {
'content-type': 'multipart/mixed; boundary="None"',
'content-length': 80,
}, 'x' * 80)
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as mock_request:
self.__ConfigureMock(
mock_request,
http_wrapper.Request(desired_url, 'POST', {
'content-type': 'multipart/mixed; boundary="None"',
'content-length': 419,
}, 'x' * 419),
http_wrapper.Response({
'status': '200',
'content-type': 'multipart/mixed; boundary="boundary"',
}, textwrap.dedent("""\
--boundary
content-type: text/plain
content-id: <id+0>
HTTP/1.1 200 OK
content
--boundary--"""), None))
batch_api_request.Add(mock_service, 'unused', None, {
'desired_request': desired_request,
})
api_request_responses = batch_api_request.Execute(FakeHttp())
self.assertEqual(1, len(api_request_responses))
self.assertEqual(1, mock_request.call_count)
self.assertFalse(api_request_responses[0].is_error)
response = api_request_responses[0].response
self.assertEqual({'status': '200'}, response.info)
self.assertEqual('content', response.content)
self.assertEqual(desired_url, response.request_url)
def testRefreshOnAuthFailure(self):
mock_service = FakeService()
desired_url = 'https://www.example.com'
batch_api_request = batch.BatchApiRequest(batch_url=desired_url)
# The request to be added. The actual request sent will be somewhat
# larger, as this is added to a batch.
desired_request = http_wrapper.Request(desired_url, 'POST', {
'content-type': 'multipart/mixed; boundary="None"',
'content-length': 80,
}, 'x' * 80)
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as mock_request:
self.__ConfigureMock(
mock_request,
http_wrapper.Request(desired_url, 'POST', {
'content-type': 'multipart/mixed; boundary="None"',
'content-length': 419,
}, 'x' * 419), [
http_wrapper.Response({
'status': '200',
'content-type': 'multipart/mixed; boundary="boundary"',
}, textwrap.dedent("""\
--boundary
content-type: text/plain
content-id: <id+0>
HTTP/1.1 401 UNAUTHORIZED
Invalid grant
--boundary--"""), None),
http_wrapper.Response({
'status': '200',
'content-type': 'multipart/mixed; boundary="boundary"',
}, textwrap.dedent("""\
--boundary
content-type: text/plain
content-id: <id+0>
HTTP/1.1 200 OK
content
--boundary--"""), None)
])
batch_api_request.Add(mock_service, 'unused', None, {
'desired_request': desired_request,
})
credentials = FakeCredentials()
api_request_responses = batch_api_request.Execute(
FakeHttp(credentials=credentials), sleep_between_polls=0)
self.assertEqual(1, len(api_request_responses))
self.assertEqual(2, mock_request.call_count)
self.assertEqual(1, credentials.num_refreshes)
self.assertFalse(api_request_responses[0].is_error)
response = api_request_responses[0].response
self.assertEqual({'status': '200'}, response.info)
self.assertEqual('content', response.content)
self.assertEqual(desired_url, response.request_url)
def testNoAttempts(self):
desired_url = 'https://www.example.com'
batch_api_request = batch.BatchApiRequest(batch_url=desired_url)
batch_api_request.Add(FakeService(), 'unused', None, {
'desired_request': http_wrapper.Request(desired_url, 'POST', {
'content-type': 'multipart/mixed; boundary="None"',
'content-length': 80,
}, 'x' * 80),
})
api_request_responses = batch_api_request.Execute(None, max_retries=0)
self.assertEqual(1, len(api_request_responses))
self.assertIsNone(api_request_responses[0].response)
self.assertIsNone(api_request_responses[0].exception)
def _DoTestConvertIdToHeader(self, test_id, expected_result):
batch_request = batch.BatchHttpRequest('https://www.example.com')
self.assertEqual(
expected_result % batch_request._BatchHttpRequest__base_id,
batch_request._ConvertIdToHeader(test_id))
def testConvertIdSimple(self):
self._DoTestConvertIdToHeader('blah', '<%s+blah>')
def testConvertIdThatNeedsEscaping(self):
self._DoTestConvertIdToHeader('~tilde1', '<%s+%%7Etilde1>')
def _DoTestConvertHeaderToId(self, header, expected_id):
batch_request = batch.BatchHttpRequest('https://www.example.com')
self.assertEqual(expected_id,
batch_request._ConvertHeaderToId(header))
def testConvertHeaderToIdSimple(self):
self._DoTestConvertHeaderToId('<hello+blah>', 'blah')
def testConvertHeaderToIdWithLotsOfPlus(self):
self._DoTestConvertHeaderToId('<a+++++plus>', 'plus')
def _DoTestConvertInvalidHeaderToId(self, invalid_header):
batch_request = batch.BatchHttpRequest('https://www.example.com')
self.assertRaises(exceptions.BatchError,
batch_request._ConvertHeaderToId, invalid_header)
def testHeaderWithoutAngleBrackets(self):
self._DoTestConvertInvalidHeaderToId('1+1')
def testHeaderWithoutPlus(self):
self._DoTestConvertInvalidHeaderToId('<HEADER>')
def testSerializeRequest(self):
request = http_wrapper.Request(body='Hello World', headers={
'content-type': 'protocol/version',
})
expected_serialized_request = '\n'.join([
'GET HTTP/1.1',
'Content-Type: protocol/version',
'MIME-Version: 1.0',
'content-length: 11',
'Host: ',
'',
'Hello World',
])
batch_request = batch.BatchHttpRequest('https://www.example.com')
self.assertEqual(expected_serialized_request,
batch_request._SerializeRequest(request))
def testSerializeRequestPreservesHeaders(self):
# Now confirm that if an additional, arbitrary header is added
# that it is successfully serialized to the request. Merely
# check that it is included, because the order of the headers
# in the request is arbitrary.
request = http_wrapper.Request(body='Hello World', headers={
'content-type': 'protocol/version',
'key': 'value',
})
batch_request = batch.BatchHttpRequest('https://www.example.com')
self.assertTrue(
'key: value\n' in batch_request._SerializeRequest(request))
def testSerializeRequestNoBody(self):
request = http_wrapper.Request(body=None, headers={
'content-type': 'protocol/version',
})
expected_serialized_request = '\n'.join([
'GET HTTP/1.1',
'Content-Type: protocol/version',
'MIME-Version: 1.0',
'Host: ',
'',
'',
])
batch_request = batch.BatchHttpRequest('https://www.example.com')
self.assertEqual(expected_serialized_request,
batch_request._SerializeRequest(request))
def testDeserializeRequest(self):
serialized_payload = '\n'.join([
'GET HTTP/1.1',
'Content-Type: protocol/version',
'MIME-Version: 1.0',
'content-length: 11',
'key: value',
'Host: ',
'',
'Hello World',
])
example_url = 'https://www.example.com'
expected_response = http_wrapper.Response({
'content-length': str(len('Hello World')),
'Content-Type': 'protocol/version',
'key': 'value',
'MIME-Version': '1.0',
'status': '',
'Host': ''
}, 'Hello World', example_url)
batch_request = batch.BatchHttpRequest(example_url)
self.assertEqual(
expected_response,
batch_request._DeserializeResponse(serialized_payload))
def testNewId(self):
batch_request = batch.BatchHttpRequest('https://www.example.com')
for i in range(100):
self.assertEqual(str(i), batch_request._NewId())
def testAdd(self):
batch_request = batch.BatchHttpRequest('https://www.example.com')
for x in range(100):
batch_request.Add(http_wrapper.Request(body=str(x)))
for key in batch_request._BatchHttpRequest__request_response_handlers:
value = batch_request._BatchHttpRequest__request_response_handlers[
key]
self.assertEqual(key, value.request.body)
self.assertFalse(value.request.url)
self.assertEqual('GET', value.request.http_method)
self.assertIsNone(value.response)
self.assertIsNone(value.handler)
def testInternalExecuteWithFailedRequest(self):
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as mock_request:
self.__ConfigureMock(
mock_request,
http_wrapper.Request('https://www.example.com', 'POST', {
'content-type': 'multipart/mixed; boundary="None"',
'content-length': 80,
}, 'x' * 80),
http_wrapper.Response({'status': '300'}, None, None))
batch_request = batch.BatchHttpRequest('https://www.example.com')
self.assertRaises(
exceptions.HttpError, batch_request._Execute, None)
def testInternalExecuteWithNonMultipartResponse(self):
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as mock_request:
self.__ConfigureMock(
mock_request,
http_wrapper.Request('https://www.example.com', 'POST', {
'content-type': 'multipart/mixed; boundary="None"',
'content-length': 80,
}, 'x' * 80),
http_wrapper.Response({
'status': '200',
'content-type': 'blah/blah'
}, '', None))
batch_request = batch.BatchHttpRequest('https://www.example.com')
self.assertRaises(
exceptions.BatchError, batch_request._Execute, None)
def testInternalExecute(self):
with mock.patch.object(http_wrapper, 'MakeRequest',
autospec=True) as mock_request:
self.__ConfigureMock(
mock_request,
http_wrapper.Request('https://www.example.com', 'POST', {
'content-type': 'multipart/mixed; boundary="None"',
'content-length': 583,
}, 'x' * 583),
http_wrapper.Response({
'status': '200',
'content-type': 'multipart/mixed; boundary="boundary"',
}, textwrap.dedent("""\
--boundary
content-type: text/plain
content-id: <id+2>
HTTP/1.1 200 OK
Second response
--boundary
content-type: text/plain
content-id: <id+1>
HTTP/1.1 401 UNAUTHORIZED
First response
--boundary--"""), None))
test_requests = {
'1': batch.RequestResponseAndHandler(
http_wrapper.Request(body='first'), None, None),
'2': batch.RequestResponseAndHandler(
http_wrapper.Request(body='second'), None, None),
}
batch_request = batch.BatchHttpRequest('https://www.example.com')
batch_request._BatchHttpRequest__request_response_handlers = (
test_requests)
batch_request._Execute(FakeHttp())
test_responses = (
batch_request._BatchHttpRequest__request_response_handlers)
self.assertEqual(http_client.UNAUTHORIZED,
test_responses['1'].response.status_code)
self.assertEqual(http_client.OK,
test_responses['2'].response.status_code)
self.assertIn(
'First response', test_responses['1'].response.content)
self.assertIn(
'Second response', test_responses['2'].response.content)
def testPublicExecute(self):
def LocalCallback(response, exception):
self.assertEqual({'status': '418'}, response.info)
self.assertEqual('Teapot', response.content)
self.assertIsNone(response.request_url)
self.assertIsInstance(exception, exceptions.HttpError)
global_callback = mock.Mock()
batch_request = batch.BatchHttpRequest(
'https://www.example.com', global_callback)
with mock.patch.object(batch.BatchHttpRequest, '_Execute',
autospec=True) as mock_execute:
mock_execute.return_value = None
test_requests = {
'0': batch.RequestResponseAndHandler(
None,
http_wrapper.Response({'status': '200'}, 'Hello!', None),
None),
'1': batch.RequestResponseAndHandler(
None,
http_wrapper.Response({'status': '418'}, 'Teapot', None),
LocalCallback),
}
batch_request._BatchHttpRequest__request_response_handlers = (
test_requests)
batch_request.Execute(None)
# Global callback was called once per handler.
self.assertEqual(len(test_requests), global_callback.call_count)
| bsd-3-clause |
Cian47/anti_bicycle_theft | python-api/env/lib/python3.5/site-packages/pip/_vendor/html5lib/treebuilders/dom.py | 920 | 8469 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import minidom, Node
import weakref
from . import _base
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return list(self.element.attributes.items()).__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(list(self.element.attributes.items()))
def items(self):
return [(item[0], item[1]) for item in
list(self.element.attributes.items())]
def keys(self):
return list(self.element.attributes.keys())
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if Node.TEXT_NODE not in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| mit |
mensler/ansible | hacking/cherrypick.py | 62 | 1474 | #!/usr/bin/env python3
import os
import sys
import tempfile
import sh
REPO_PATH = {'extras': '/srv/ansible/stable-2.2/lib/ansible/modules/extras',
'core': '/srv/ansible/stable-2.2/lib/ansible/modules/core'}
if __name__ == '__main__':
commit_hash = sys.argv[1]
which_modules = sys.argv[2]
git = sh.git.bake('--no-pager', _tty_out=False)
try:
# Get the change
git('checkout', 'devel')
patch = git('format-patch', '-1', '--stdout', commit_hash).stdout
finally:
git('checkout', '-')
# Transform the change for the new repo
patch = patch.replace(b'lib/ansible/modules/', b'')
new_patch = []
patch_stream = (l for l in patch.split(b'\n'))
for line in patch_stream:
if line.strip() == b'---':
new_patch.append(b'(cherry picked from %s)' % commit_hash.encode('utf-8'))
new_patch.append(line)
break
new_patch.append(line)
new_patch.extend(list(patch_stream))
# Save the patch
try:
fh, patchfilename = tempfile.mkstemp()
os.write(fh, b'\n'.join(new_patch))
os.close(fh)
# Apply the patch
try:
orig_dir = os.getcwd()
os.chdir(REPO_PATH[which_modules])
git('am', patchfilename)
finally:
os.chdir(orig_dir)
except:
print("Problem occurred. Patch saved in: {}".format(patchfilename))
else:
os.remove(patchfilename)
| gpl-3.0 |
erikmcc/kubernetes | cluster/juju/layers/kubernetes-worker/lib/charms/kubernetes/flagmanager.py | 290 | 4961 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charmhelpers.core import unitdata
class FlagManager:
'''
FlagManager - A Python class for managing the flags to pass to an
application without remembering what's been set previously.
This is a blind class assuming the operator knows what they are doing.
Each instance of this class should be initialized with the intended
application to manage flags. Flags are then appended to a data-structure
and cached in unitdata for later recall.
THe underlying data-provider is backed by a SQLITE database on each unit,
tracking the dictionary, provided from the 'charmhelpers' python package.
Summary:
opts = FlagManager('docker')
opts.add('bip', '192.168.22.2')
opts.to_s()
'''
def __init__(self, daemon, opts_path=None):
self.db = unitdata.kv()
self.daemon = daemon
if not self.db.get(daemon):
self.data = {}
else:
self.data = self.db.get(daemon)
def __save(self):
self.db.set(self.daemon, self.data)
def add(self, key, value, strict=False):
'''
Adds data to the map of values for the DockerOpts file.
Supports single values, or "multiopt variables". If you
have a flag only option, like --tlsverify, set the value
to None. To preserve the exact value, pass strict
eg:
opts.add('label', 'foo')
opts.add('label', 'foo, bar, baz')
opts.add('flagonly', None)
opts.add('cluster-store', 'consul://a:4001,b:4001,c:4001/swarm',
strict=True)
'''
if strict:
self.data['{}-strict'.format(key)] = value
self.__save()
return
if value:
values = [x.strip() for x in value.split(',')]
# handle updates
if key in self.data and self.data[key] is not None:
item_data = self.data[key]
for c in values:
c = c.strip()
if c not in item_data:
item_data.append(c)
self.data[key] = item_data
else:
# handle new
self.data[key] = values
else:
# handle flagonly
self.data[key] = None
self.__save()
def remove(self, key, value):
'''
Remove a flag value from the DockerOpts manager
Assuming the data is currently {'foo': ['bar', 'baz']}
d.remove('foo', 'bar')
> {'foo': ['baz']}
:params key:
:params value:
'''
self.data[key].remove(value)
self.__save()
def destroy(self, key, strict=False):
'''
Destructively remove all values and key from the FlagManager
Assuming the data is currently {'foo': ['bar', 'baz']}
d.wipe('foo')
>{}
:params key:
:params strict:
'''
try:
if strict:
self.data.pop('{}-strict'.format(key))
else:
self.data.pop(key)
self.__save()
except KeyError:
pass
def get(self, key, default=None):
"""Return the value for ``key``, or the default if ``key`` doesn't exist.
"""
return self.data.get(key, default)
def destroy_all(self):
'''
Destructively removes all data from the FlagManager.
'''
self.data.clear()
self.__save()
def to_s(self):
'''
Render the flags to a single string, prepared for the Docker
Defaults file. Typically in /etc/default/docker
d.to_s()
> "--foo=bar --foo=baz"
'''
flags = []
for key in self.data:
if self.data[key] is None:
# handle flagonly
flags.append("{}".format(key))
elif '-strict' in key:
# handle strict values, and do it in 2 steps.
# If we rstrip -strict it strips a tailing s
proper_key = key.rstrip('strict').rstrip('-')
flags.append("{}={}".format(proper_key, self.data[key]))
else:
# handle multiopt and typical flags
for item in self.data[key]:
flags.append("{}={}".format(key, item))
return ' '.join(flags)
| apache-2.0 |
JackWoot/E2E-Messenger | Server/server/lib/werkzeug/contrib/wrappers.py | 318 | 10331 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~
Extra wrappers or mixins contributed by the community. These wrappers can
be mixed in into request objects to add extra functionality.
Example::
from werkzeug.wrappers import Request as RequestBase
from werkzeug.contrib.wrappers import JSONRequestMixin
class Request(RequestBase, JSONRequestMixin):
pass
Afterwards this request object provides the extra functionality of the
:class:`JSONRequestMixin`.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import codecs
try:
from simplejson import loads
except ImportError:
from json import loads
from werkzeug.exceptions import BadRequest
from werkzeug.utils import cached_property
from werkzeug.http import dump_options_header, parse_options_header
from werkzeug._compat import wsgi_decoding_dance
def is_known_charset(charset):
"""Checks if the given charset is known to Python."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
class JSONRequestMixin(object):
"""Add json method to a request object. This will parse the input data
through simplejson if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not json or if the data itself cannot be parsed as json.
"""
@cached_property
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a JSON request')
try:
return loads(self.data)
except Exception:
raise BadRequest('Unable to read JSON request')
class ProtobufRequestMixin(object):
"""Add protobuf parsing method to a request object. This will parse the
input data through `protobuf`_ if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not protobuf or if the data itself cannot be parsed property.
.. _protobuf: http://code.google.com/p/protobuf/
"""
#: by default the :class:`ProtobufRequestMixin` will raise a
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
#: initialized. You can bypass that check by setting this
#: attribute to `False`.
protobuf_check_initialization = True
def parse_protobuf(self, proto_type):
"""Parse the data into an instance of proto_type."""
if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a Protobuf request')
obj = proto_type()
try:
obj.ParseFromString(self.data)
except Exception:
raise BadRequest("Unable to parse Protobuf request")
# Fail if not all required fields are set
if self.protobuf_check_initialization and not obj.IsInitialized():
raise BadRequest("Partial Protobuf request")
return obj
class RoutingArgsRequestMixin(object):
"""This request mixin adds support for the wsgiorg routing args
`specification`_.
.. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
"""
def _get_routing_args(self):
return self.environ.get('wsgiorg.routing_args', (()))[0]
def _set_routing_args(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
routing_args = property(_get_routing_args, _set_routing_args, doc='''
The positional URL arguments as `tuple`.''')
del _get_routing_args, _set_routing_args
def _get_routing_vars(self):
rv = self.environ.get('wsgiorg.routing_args')
if rv is not None:
return rv[1]
rv = {}
if not self.shallow:
self.routing_vars = rv
return rv
def _set_routing_vars(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
The keyword URL arguments as `dict`.''')
del _get_routing_vars, _set_routing_vars
class ReverseSlashBehaviorRequestMixin(object):
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
directly on the paths.
Because it changes the behavior or :class:`Request` this class has to be
mixed in *before* the actual request class::
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
pass
This example shows the differences (for an application mounted on
`/application` and the request going to `/application/foo/bar`):
+---------------+-------------------+---------------------+
| | normal behavior | reverse behavior |
+===============+===================+=====================+
| `script_root` | ``/application`` | ``/application/`` |
+---------------+-------------------+---------------------+
| `path` | ``/foo/bar`` | ``foo/bar`` |
+---------------+-------------------+---------------------+
"""
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will not include a leading slash.
"""
path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return path.lstrip('/')
@cached_property
def script_root(self):
"""The root path of the script includling a trailing slash."""
path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return path.rstrip('/') + '/'
class DynamicCharsetRequestMixin(object):
""""If this mixin is mixed into a request class it will provide
a dynamic `charset` attribute. This means that if the charset is
transmitted in the content type headers it's used from there.
Because it changes the behavior or :class:`Request` this class has
to be mixed in *before* the actual request class::
class MyRequest(DynamicCharsetRequestMixin, Request):
pass
By default the request object assumes that the URL charset is the
same as the data charset. If the charset varies on each request
based on the transmitted data it's not a good idea to let the URLs
change based on that. Most browsers assume either utf-8 or latin1
for the URLs if they have troubles figuring out. It's strongly
recommended to set the URL charset to utf-8::
class MyRequest(DynamicCharsetRequestMixin, Request):
url_charset = 'utf-8'
.. versionadded:: 0.6
"""
#: the default charset that is assumed if the content type header
#: is missing or does not contain a charset parameter. The default
#: is latin1 which is what HTTP specifies as default charset.
#: You may however want to set this to utf-8 to better support
#: browsers that do not transmit a charset for incoming data.
default_charset = 'latin1'
def unknown_charset(self, charset):
"""Called if a charset was provided but is not supported by
the Python codecs module. By default latin1 is assumed then
to not lose any information, you may override this method to
change the behavior.
:param charset: the charset that was not found.
:return: the replacement charset.
"""
return 'latin1'
@cached_property
def charset(self):
"""The charset from the content type."""
header = self.environ.get('CONTENT_TYPE')
if header:
ct, options = parse_options_header(header)
charset = options.get('charset')
if charset:
if is_known_charset(charset):
return charset
return self.unknown_charset(charset)
return self.default_charset
class DynamicCharsetResponseMixin(object):
"""If this mixin is mixed into a response class it will provide
a dynamic `charset` attribute. This means that if the charset is
looked up and stored in the `Content-Type` header and updates
itself automatically. This also means a small performance hit but
can be useful if you're working with different charsets on
responses.
Because the charset attribute is no a property at class-level, the
default value is stored in `default_charset`.
Because it changes the behavior or :class:`Response` this class has
to be mixed in *before* the actual response class::
class MyResponse(DynamicCharsetResponseMixin, Response):
pass
.. versionadded:: 0.6
"""
#: the default charset.
default_charset = 'utf-8'
def _get_charset(self):
header = self.headers.get('content-type')
if header:
charset = parse_options_header(header)[1].get('charset')
if charset:
return charset
return self.default_charset
def _set_charset(self, charset):
header = self.headers.get('content-type')
ct, options = parse_options_header(header)
if not ct:
raise TypeError('Cannot set charset if Content-Type '
'header is missing.')
options['charset'] = charset
self.headers['Content-Type'] = dump_options_header(ct, options)
charset = property(_get_charset, _set_charset, doc="""
The charset for the response. It's stored inside the
Content-Type header as a parameter.""")
del _get_charset, _set_charset
| gpl-2.0 |
karthikvadla16/spark-tk | regression-tests/sparktkregtests/testcases/frames/column_method_drop_test.py | 14 | 3803 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests methods that access or alter columns"""
import unittest
from sparktkregtests.lib import sparktk_test
dummy_int_val = -77 # placeholder data value for added column
dummy_col_count = 1000 # length of dummy list for column add
# This method is to test different sources of functions
# i.e. global
def global_dummy_val_list(row):
return [dummy_int_val for _ in range(0, dummy_col_count)]
class ColumnMethodTest(sparktk_test.SparkTKTestCase):
# Test class bound methods
@staticmethod
def static_dummy_val_list(row):
return [dummy_int_val for _ in range(0, dummy_col_count)]
def setUp(self):
"""Build test_frame"""
super(ColumnMethodTest, self).setUp()
dataset = self.get_file("int_str_float.csv")
schema = [("int", int), ("str", str), ("float", float)]
self.frame = self.context.frame.import_csv(dataset, schema=schema)
def test_column_names(self):
"""all original columns"""
header = self.frame.column_names
self.assertEqual(header, ['int', 'str', 'float'])
def test_column_names_drop(self):
"""Exercise subsets of 1 and 2 columns"""
self.frame.drop_columns('str')
header = self.frame.column_names
self.assertEqual(header, ['int', 'float'])
def test_column_names_drop_multiple(self):
"""Drop multiple columns"""
self.frame.drop_columns(['str', 'float'])
header = self.frame.column_names
self.assertEqual(header, ['int'])
def test_drop_non_existent_column(self):
"""test dropping non-existent column"""
with self.assertRaisesRegexp(
ValueError, 'Invalid column name non-existent provided'):
self.frame.drop_columns("non-existent")
def test_drop_columns(self):
"""Test drop columns scenarios"""
self.frame.add_columns(
lambda row: dummy_int_val, ('product', int))
col_count = len(self.frame.take(1)[0])
self.frame.drop_columns(['int'])
self.assertNotIn('int', self.frame.column_names)
self.assertEqual(col_count-1, len(self.frame.take(1)[0]))
def test_drop_columns_multiple(self):
"""Test drop columns multiple, repeated"""
self.frame.add_columns(
lambda row: dummy_int_val, ('product', int))
col_count = len(self.frame.take(1)[0])
self.frame.drop_columns(['str', 'product', 'str'])
self.assertNotIn('str', self.frame.column_names)
self.assertNotIn('product', self.frame.column_names)
self.assertEqual(col_count-2, len(self.frame.take(1)[0]))
def test_drop_zero_columns(self):
"""Test dropping no columns"""
self.frame.drop_columns([])
header = self.frame.column_names
self.assertEqual(header, ['int', 'str', 'float'])
def test_drop_nonexistent_column(self):
"""Test drop non-existent column"""
with self.assertRaisesRegexp(ValueError, 'Invalid column name'):
self.frame.drop_columns(['no-such-name'])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
mikeckennedy/cookiecutter-pyramid-talk-python-starter | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/services/cms_service.py | 1 | 1465 | from {{cookiecutter.project_slug}}.data.cms_page import CmsPage
from {{cookiecutter.project_slug}}.data.dbsession import DbSessionFactory
class CmsService:
@classmethod
def get_page_by_url(cls, url):
if not url:
return None
url = url.lower().strip()
session = DbSessionFactory.create_session()
page = session.query(CmsPage) \
.filter(CmsPage.url == url) \
.first()
session.close()
return page
@classmethod
def add_page(cls, url, html, is_redirect=False, redirect_url=None):
if not url or not url.strip():
raise ValueError('url cannot be empty')
url = url.lower().strip()
session = DbSessionFactory.create_session()
page = CmsPage()
page.url = url
page.html = html
page.is_redirect = is_redirect
page.redirect_url = redirect_url
session.add(page)
session.commit()
return page
@classmethod
def init_test_data(cls):
url = '/landing_pages/a_dynamic_cms_page'
if cls.get_page_by_url(url) is not None:
return
cls.add_page(
url,
'<h1>This is a CMS page</h1>\n' +
'\n' +
'<p>\n' +
'You can create them in the DB and any URL can be mapped.<br>\n' +
'See CmsController / CmsService for more info.\n' +
'</p>\n'
)
| mit |
jamesblunt/sympy | sympy/physics/sho.py | 71 | 2482 | from __future__ import print_function, division
from sympy.core import S, pi, Rational
from sympy.functions import assoc_laguerre, sqrt, exp, factorial, factorial2
def R_nl(n, l, nu, r):
"""
Returns the radial wavefunction R_{nl} for a 3d isotropic harmonic
oscillator.
``n``
the "nodal" quantum number. Corresponds to the number of nodes in
the wavefunction. n >= 0
``l``
the quantum number for orbital angular momentum
``nu``
mass-scaled frequency: nu = m*omega/(2*hbar) where `m' is the mass
and `omega` the frequency of the oscillator.
(in atomic units nu == omega/2)
``r``
Radial coordinate
Examples
========
>>> from sympy.physics.sho import R_nl
>>> from sympy import var
>>> var("r nu l")
(r, nu, l)
>>> R_nl(0, 0, 1, r)
2*2**(3/4)*exp(-r**2)/pi**(1/4)
>>> R_nl(1, 0, 1, r)
4*2**(1/4)*sqrt(3)*(-2*r**2 + 3/2)*exp(-r**2)/(3*pi**(1/4))
l, nu and r may be symbolic:
>>> R_nl(0, 0, nu, r)
2*2**(3/4)*sqrt(nu**(3/2))*exp(-nu*r**2)/pi**(1/4)
>>> R_nl(0, l, 1, r)
r**l*sqrt(2**(l + 3/2)*2**(l + 2)/factorial2(2*l + 1))*exp(-r**2)/pi**(1/4)
The normalization of the radial wavefunction is:
>>> from sympy import Integral, oo
>>> Integral(R_nl(0, 0, 1, r)**2 * r**2, (r, 0, oo)).n()
1.00000000000000
>>> Integral(R_nl(1, 0, 1, r)**2 * r**2, (r, 0, oo)).n()
1.00000000000000
>>> Integral(R_nl(1, 1, 1, r)**2 * r**2, (r, 0, oo)).n()
1.00000000000000
"""
n, l, nu, r = map(S, [n, l, nu, r])
# formula uses n >= 1 (instead of nodal n >= 0)
n = n + 1
C = sqrt(
((2*nu)**(l + Rational(3, 2))*2**(n + l + 1)*factorial(n - 1))/
(sqrt(pi)*(factorial2(2*n + 2*l - 1)))
)
return C*r**(l)*exp(-nu*r**2)*assoc_laguerre(n - 1, l + S(1)/2, 2*nu*r**2)
def E_nl(n, l, hw):
"""
Returns the Energy of an isotropic harmonic oscillator
``n``
the "nodal" quantum number
``l``
the orbital angular momentum
``hw``
the harmonic oscillator parameter.
The unit of the returned value matches the unit of hw, since the energy is
calculated as:
E_nl = (2*n + l + 3/2)*hw
Examples
========
>>> from sympy.physics.sho import E_nl
>>> from sympy import symbols
>>> x, y, z = symbols('x, y, z')
>>> E_nl(x, y, z)
z*(2*x + y + 3/2)
"""
return (2*n + l + Rational(3, 2))*hw
| bsd-3-clause |
iceihehe/pipeg | python3/genome2.py | 4 | 5090 | #!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import collections
import json
import os
import re
import subprocess
import sys
UTF8 = "utf-8"
TRANSFORM, SUMMARIZE = ("TRANSFORM", "SUMMARIZE")
Code = collections.namedtuple("Code", "name code kind")
def main():
genome = 3 * GENOME
for i, code in enumerate(CODE):
context = dict(genome=genome, target="G[AC]{2}TT", replace="TCGA")
execute(code, context)
if sys.version_info[:2] > (3, 1):
def execute(code, context):
module, offset = create_module(code.code, context)
with subprocess.Popen([sys.executable, "-"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) as process:
communicate(process, code, module, offset)
else:
def execute(code, context):
module, offset = create_module(code.code, context)
process = subprocess.Popen([sys.executable, "-"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
communicate(process, code, module, offset)
def create_module(code, context):
lines = ["import json", "result = error = None"]
for key, value in context.items():
lines.append("{} = {!r}".format(key, value))
offset = len(lines) + 1
outputLine = "\nprint(json.dumps((result, error)))"
return "\n".join(lines) + "\n" + code + outputLine, offset
def communicate(process, code, module, offset):
stdout, stderr = process.communicate(module.encode(UTF8))
if stderr:
stderr = stderr.decode(UTF8).lstrip().replace(", in <module>", ":")
stderr = re.sub(", line (\d+)",
lambda match: str(int(match.group(1)) - offset), stderr)
print(re.sub(r'File."[^"]+?"', "'{}' has an error on line "
.format(code.name), stderr))
return
if stdout:
result, error = json.loads(stdout.decode(UTF8))
handle_result(code, result, error)
return
print("'{}' produced no result\n".format(code.name))
def handle_result(code, result, error):
if error is not None:
print("'{}' error: {}".format(code.name, error))
elif result is None:
print("'{}' produced no result".format(code.name))
elif code.kind == TRANSFORM:
genome = result
try:
print("'{}' produced a genome of length {}".format(code.name,
len(genome)))
except TypeError as err:
print("'{}' error: expected a sequence result: {}".format(
code.name, err))
elif code.kind == SUMMARIZE:
print("'{}' produced a result of {}".format(code.name, result))
print()
CODE = (
Code("Count",
"""
import re
matches = re.findall(target, genome)
if matches:
result = len(matches)
else:
error = "'{}' not found".format(target)
""", SUMMARIZE)
,
Code("Replace",
"""
import re
result, count = re.subn(target, replace, genome)
if not count:
error = "no '{}' replacements made".format(target)
""", TRANSFORM)
,
Code("Exception Test",
"""
result = 0
for i in range(len(genome)):
if genome[i] = "A":
result += 1
""", SUMMARIZE)
,
Code("Error Test",
"""
import re
matches = re.findall(target * 5, genome)
if matches:
result = len(matches)
else:
error = "'{}' not found".format(target)
""", TRANSFORM)
,
Code("No Result Test",
"""
# No result
""", TRANSFORM)
,
Code("Wrong Kind Test",
"""
result = len(genome)
""", TRANSFORM)
,
Code("Termination Test",
"""
import sys
result = "terminating"
sys.exit()
""", SUMMARIZE)
,
Code("Length",
"""
result = len(genome)
""", SUMMARIZE)
)
GENOME = """TGTTAGTCGCTCCTCGGTCTAAGACATCAAAGTCGGTCTGCGCGGCTGCTCCCTTAGCGCTG
CATAAGAGCGGGGCAGAGAGAGATAGGCGTTTTGACCGTGGCGAGCAAGGCGCGTCATAGTGTCGCCGTGACTG
ATCCTACTGGGTTCTTGCTACTGCCCGGGTCGCAATCCAAAATCTCCACGCGCTGCCACCCCGAAGAAGATATA
TGTCACTGAATTGTATTGGTAACATAGTCGAATTGGGTTCAGGTAAGTTAGTCGTTTAGCCGCTGCGACAGTGG
TGGAAGGGCGAATAGTGTAAAATTTCGCCTGTTAGTGAACATTATCAGGCTGCCATCGTTGATCGCCCCTCTTA
AACTCAGTCTTAAATGAGTTCCCGCCTAAGGTCATTCGTGCCTTGATGATTGATAGCTCGATTGGTCCCTTATG
AAACCGGACCAGAAATGTACCCGCTGAACCGGTGTCATAAGTGTCGCCGTCCCTACGATCGACACTTCCTGAGC
ACGAACGATTTGCGACGCTGTAATGCCACGAGGACTGCATTGAAGATTTTTTGTCCTAGGTGTATGTGCTTCTC
AGGAAGATGCACTACGCACTCCCCTTATCACGGGTGTGACCATCAGGTAGCGTAGGAAGATTAAGACCGCGTAA
CTATCCCTTTCCGTCGCACTCCGACGTCTCAGCACATGTGCGGGGGCCCCTAATTGAGAAACAGTCCATGGTTG
TCCGTAAGTTTCGGAAATCAACTTCACTGCTAGATGGTTGGACGCCAAGGCTCAATAGGTTGGACTCTAAGAAG
""".replace("\n", "")
if __name__ == "__main__":
main()
| mit |
cedk/odoo | addons/mrp_operations/report/mrp_code_barcode.py | 381 | 1511 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class code_barcode(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(code_barcode, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.mrp.code.barcode', 'mrp_operations.operation.code', 'addons/mrp_operations/report/mrp_code_barcode.rml',parser=code_barcode,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sjdines/mezzanine-fluent-pages | mezzanine_fluent_pages/mezzanine_layout_page/admin.py | 1 | 6680 | from django.conf.urls import url
from django.contrib import admin
from fluent_contents.admin import PlaceholderEditorAdmin
from fluent_contents.analyzer import get_template_placeholder_data
from fluent_utils.ajax import JsonResponse
from mezzanine.pages.admin import PageAdmin
from . import models, widgets
class FluentContentsLayoutPageAdmin(PlaceholderEditorAdmin, PageAdmin):
"""
Admin configuration for `FluentContentsLayoutPage`.
"""
# The `change_form_template` is overwritten to include the content type id in the JS which is
# used in the fluent ajax calls.
change_form_template = 'admin/fluent_mezzanine/change_form.html'
class Media:
# This is a custom JS adaption of the `fluent_layouts.js` found in
# `fluent_pages.fluentpage`. The only modification is to change the `app_root` variable
# declaration to a new endpoint. The rest of the code has been used here so `fluent_pages`
# is not a requirement to use this package.
js = ('fluent_mezzanine/fluent_layouts.js',)
def get_placeholder_data(self, request, obj=None):
"""
Provides a list of `fluent_contents.models.PlaceholderData`
classes, that describe the contents of the template.
:param request: Django request object.
:param obj: Object to get place holder data from.
:return: list of `~fluent_contents.models.PlaceholderData`
"""
template = self.get_page_template(obj)
if not template:
return [] # No template means no data!
else:
return get_template_placeholder_data(template)
def get_page_template(self, page):
"""
Return the template that is associated with the page.
If no page is provided then the first available template will
be used as defined in `PageLayout`. If not `PageLayout` exists
then `None` will be returned.
:param page: Page object to obtain the template from.
:return: Template object or None.
"""
if page is None:
# Add page. start with default template.
try:
return models.PageLayout.objects.all()[0].get_template()
except IndexError:
return None
else:
# Change page, honor template of object.
return page.layout.get_template()
# ---- Layout selector code ----
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Overwrite the widget for the `layout` foreign key.
:param db_field: Field on the object.
:param request: Django request object.
:param kwargs: Extra keyword arguments.
:return: Formfield.
"""
if db_field.name == 'layout':
kwargs['widget'] = widgets.LayoutSelector
return super(FluentContentsLayoutPageAdmin, self).formfield_for_foreignkey(
db_field,
request,
**kwargs
)
def get_urls(self):
"""
Add URL pattern for obtaining layout information.
:return: List of URL patterns.
"""
urls = super(FluentContentsLayoutPageAdmin, self).get_urls()
my_urls = [
url(
r'^get_layout/(?P<id>\d+)/$',
self.admin_site.admin_view(self.get_layout_view),
name='get_layout',
)
]
return my_urls + urls
def get_layout_view(self, request, id):
"""
Return the metadata about a layout
:param request: Django request object.
:param id: Id integer value (pk) for the layout referenced.
:return: JsonResponse with layout information or error message.
"""
# Get the layout or if it does not exist return an error message.
try:
layout = models.PageLayout.objects.get(pk=id)
except models.PageLayout.DoesNotExist:
json = {'success': False, 'error': 'Layout not found'}
status = 404
else:
template = layout.get_template()
placeholders = get_template_placeholder_data(template)
status = 200
# Set useful information regarding the layout.
json = {
'id': layout.id,
'key': layout.key,
'title': layout.title,
'placeholders': [p.as_dict() for p in placeholders],
}
return JsonResponse(json, status=status)
# ---- Layout permission hooks ----
def get_readonly_fields(self, request, obj=None):
"""
Allow layout modification on initial creation only if no perms.
If the user does not have the privilege to access the layout
field initially we need to overwrite that as it is a required
field.
After it is set we can return to the default behaviour.
:param request: Django request object.
:param obj: Object instance that uses layout fields.
:return: List of read only fields.
"""
fields = super(FluentContentsLayoutPageAdmin, self).get_readonly_fields(request, obj)
if (
obj is not None and
'layout' not in fields and
not self.has_change_page_layout_permission(request, obj)
):
# Disable on edit page only.
# Add page is allowed, need to be able to choose initial layout
fields = fields + ('layout',)
return fields
def has_change_page_layout_permission(self, request, obj):
"""
Whether the user can change the page layout.
:param request: Django request object.
:param obj: Object instance that uses layout fields.
:return: Boolean (True if user has permission to change
the layout; False if the user does not have permission to
change the layout).
"""
codename = '{0}.change_page_layout'.format(obj._meta.app_label)
return request.user.has_perm(codename, obj=obj)
class PageLayoutAdmin(admin.ModelAdmin):
"""
Admin configuration for `PageLayout` model.
"""
# Config list page:
list_display = ['title', 'key', ]
fieldsets = (
(
None, {
'fields': (
'title',
'key',
'template_path'
),
}
),
)
prepopulated_fields = {
'key': (
'title',
)
}
# Admin registration.
admin.site.register(models.FluentContentsLayoutPage, FluentContentsLayoutPageAdmin)
admin.site.register(models.PageLayout, PageLayoutAdmin)
| bsd-2-clause |
ibmsoe/tensorflow | tensorflow/python/framework/meta_graph.py | 34 | 26049 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MetaGraph and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os.path
import re
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Prefix to be added to unbound input names so they are easily identifiable.
_UNBOUND_INPUT_PREFIX = "$unbound_inputs_"
def _node_def(from_node_def, export_scope, unbound_inputs, clear_devices=False):
"""Create a `NodeDef` proto with export_scope stripped.
Args:
from_node_def: A `node_def_pb2.NodeDef` protocol buffer.
export_scope: A `string` representing the name scope to remove.
unbound_inputs: An array of unbound input names if they exist.
clear_devices: Boolean which controls whether to clear device information
from node_def. Default false.
Returns:
A `node_def_pb2.NodeDef` protocol buffer.
"""
node_def = copy.deepcopy(from_node_def)
for i, v in enumerate(node_def.input):
if (export_scope and
not node_def.input[i].lstrip("^").startswith(export_scope)):
# Adds "$unbound_inputs_" prefix to the unbound name so they are easily
# identifiable.
node_def.input[i] = re.sub(r"([\^]|^)(.*)",
r"\1" + _UNBOUND_INPUT_PREFIX + r"\2",
compat.as_str(v))
unbound_inputs.append(node_def.input[i])
else:
node_def.input[i] = ops.strip_name_scope(v, export_scope)
node_def.name = compat.as_bytes(
ops.strip_name_scope(from_node_def.name, export_scope))
for k, v in six.iteritems(from_node_def.attr):
if k == "_class":
new_s = [compat.as_bytes(
ops.strip_name_scope(s, export_scope)) for s in v.list.s
if not export_scope or
compat.as_str(s).split("@")[1].startswith(export_scope)]
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=new_s)))
else:
node_def.attr[k].CopyFrom(v)
if clear_devices:
node_def.device = ""
return node_def
def _read_file(filename):
"""Reads a file containing `GraphDef` and returns the protocol buffer.
Args:
filename: `graph_def` filename including the path.
Returns:
A `GraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
graph_def = graph_pb2.GraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
graph_def.ParseFromString(file_content)
return graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content, graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return graph_def
def ops_used_by_graph_def(graph_def):
"""Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph.
"""
# Map function names to definitions
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
# Collect the list of op names. Since functions can reference functions, we
# need a recursive traversal.
used_ops = set() # Includes both primitive ops and functions
functions_to_process = [] # A subset of used_ops
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_to_process.append(name_to_function[op])
used_ops.add(op)
for node in graph_def.node:
mark_op_as_used(node.op)
while functions_to_process:
fun = functions_to_process.pop()
for node in fun.node_def:
mark_op_as_used(node.op)
return [op for op in used_ops if op not in name_to_function]
def stripped_op_list_for_graph(graph_def):
"""Collect the stripped OpDefs for ops used by a graph.
This function computes the `stripped_op_list` field of `MetaGraphDef` and
similar protos. The result can be communicated from the producer to the
consumer, which can then use the C++ function
`RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
An `OpList` of ops used by the graph.
Raises:
ValueError: If an unregistered op is used.
"""
# This is the Python equivalent of StrippedOpListForGraph in C++.
# Unfortunately, since the Python op registry can differ from that in C++, we
# can't remove the duplication using swig (at least naively).
# TODO(irving): Support taking graphs directly.
used_ops = ops_used_by_graph_def(graph_def)
# Verify that all used ops are registered.
registered_ops = op_def_registry.get_registered_ops()
# These internal ops used by functions are not registered, so we need to
# whitelist them. # TODO(irving): Do something better here.
op_whitelist = ("_Arg", "_Retval", "_ListToArray", "_ArrayToList")
for op in used_ops:
if op not in registered_ops and op not in op_whitelist:
raise ValueError("Op %s is used by the graph, but is not registered" % op)
# Build the stripped op list in sorted order
return op_def_pb2.OpList(op=[registered_ops[op] for op in sorted(used_ops)
if op in registered_ops])
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind
def _should_include_node(node_or_node_name, export_scope):
"""Returns `True` if a node should be included.
Args:
node_or_node_name: A node or `string` node name.
export_scope: `string`. Name scope under which to extract the subgraph. The
scope name will be striped from the node definitions for easy import later
into new name scopes.
Returns:
`True` if the node should be included.
"""
if not isinstance(node_or_node_name, six.string_types):
try:
node_name = node_or_node_name.name
except AttributeError:
# Keep the object that we don't know how to process.
return True
else:
node_name = node_or_node_name
return (node_name.startswith(_UNBOUND_INPUT_PREFIX) or
(not export_scope or node_name.startswith(export_scope)))
def add_collection_def(meta_graph_def, key, graph=None,
export_scope=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
graph: The `Graph` from which to get collections.
export_scope: Optional `string`. Name scope to remove.
"""
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
logging.warning("Only collections with string type keys will be "
"serialized. This key has %s", type(key))
return
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
collection_list = graph.get_collection(key)
# Remove nodes that should not be exported from the collection list.
collection_list = [x for x in collection_list if
_should_include_node(x, export_scope)]
if not collection_list:
return
try:
col_def = meta_graph_def.collection_def[key]
to_proto = ops.get_to_proto_function(key)
proto_type = ops.get_collection_proto_type(key)
if to_proto:
kind = "bytes_list"
for x in collection_list:
# Additional type check to make sure the returned proto is indeed
# what we expect.
proto = to_proto(x, export_scope=export_scope)
if proto:
assert isinstance(proto, proto_type)
getattr(col_def, kind).value.append(proto.SerializeToString())
else:
kind = _get_kind_name(collection_list[0])
if kind == "node_list":
for x in collection_list:
if not export_scope or x.name.startswith(export_scope):
getattr(col_def, kind).value.append(
ops.strip_name_scope(x.name, export_scope))
elif kind == "bytes_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python3 distinguishes between bytes and strings.
getattr(col_def, kind).value.extend(
[compat.as_bytes(x) for x in collection_list])
else:
getattr(col_def, kind).value.extend([x for x in collection_list])
except Exception as e: # pylint: disable=broad-except
logging.warning("Error encountered when serializing %s.\n"
"Type is unsupported, or the types of the items don't "
"match field type in CollectionDef.\n%s", key, str(e))
if key in meta_graph_def.collection_def:
del meta_graph_def.collection_def[key]
return
def create_meta_graph_def(meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
graph=None,
export_scope=None):
"""Construct and returns a `MetaGraphDef` protocol buffer.
Args:
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
graph: The `Graph` to create `MetaGraphDef` out of.
export_scope: Optional `string`. Name scope to remove.
Returns:
MetaGraphDef protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# Type check.
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if meta_info_def and not isinstance(meta_info_def,
meta_graph_pb2.MetaGraphDef.MetaInfoDef):
raise TypeError("meta_info_def must be of type MetaInfoDef, not %s",
type(meta_info_def))
if graph_def and not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be of type GraphDef, not %s",
type(graph_def))
if saver_def and not isinstance(saver_def, saver_pb2.SaverDef):
raise TypeError("saver_def must be of type SaverDef, not %s",
type(saver_def))
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Creates a MetaGraphDef proto.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Adds meta_info_def.
if not meta_info_def:
meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
# Set the tf version strings to the current tf build.
meta_info_def.tensorflow_version = versions.__version__
meta_info_def.tensorflow_git_version = versions.__git_version__
meta_graph_def.meta_info_def.MergeFrom(meta_info_def)
# Adds graph_def or the default.
if not graph_def:
meta_graph_def.graph_def.MergeFrom(graph.as_graph_def(add_shapes=True))
else:
meta_graph_def.graph_def.MergeFrom(graph_def)
# Fills in meta_info_def.stripped_op_list using the ops from graph_def.
# pylint: disable=g-explicit-length-test
if len(meta_graph_def.meta_info_def.stripped_op_list.op) == 0:
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
stripped_op_list_for_graph(meta_graph_def.graph_def))
# pylint: enable=g-explicit-length-test
# Adds saver_def.
if saver_def:
meta_graph_def.saver_def.MergeFrom(saver_def)
# Adds collection_list.
if collection_list:
clist = collection_list
else:
clist = graph.get_all_collection_keys()
for ctype in clist:
add_collection_def(meta_graph_def, ctype,
graph=graph,
export_scope=export_scope)
return meta_graph_def
def read_meta_graph_file(filename):
"""Reads a file containing `MetaGraphDef` and returns the protocol buffer.
Args:
filename: `meta_graph_def` filename including the path.
Returns:
A `MetaGraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
meta_graph_def = meta_graph_pb2.MetaGraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
meta_graph_def.ParseFromString(file_content)
return meta_graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return meta_graph_def
def import_scoped_meta_graph(meta_graph_or_file,
clear_devices=False,
graph=None,
import_scope=None,
input_map=None,
unbound_inputs_col_name="unbound_inputs"):
"""Recreates a`Graph` saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates all the collections, and returns a saver
constructed from the `saver_def` field.
In combination with `export_scoped_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
from graph_def. Default false.
graph: The `Graph` to import into. If `None`, use the default graph.
import_scope: Optional `string`. Name scope into which to import the
subgraph. If `None`, the graph is imported to the root name scope.
input_map: A dictionary mapping input names (as strings) in `graph_def` to
`Tensor` objects. The values of the named input tensors in the imported
graph will be re-mapped to the respective `Tensor` values.
unbound_inputs_col_name: Collection name for looking up unbound inputs.
Returns:
A dictionary of all the `Variables` imported into the name scope.
Raises:
ValueError: If the graph_def contains unbound inputs.
"""
if isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph_or_file
else:
meta_graph_def = read_meta_graph_file(meta_graph_or_file)
if unbound_inputs_col_name:
for key, col_def in meta_graph_def.collection_def.items():
if key == unbound_inputs_col_name:
kind = col_def.WhichOneof("kind")
field = getattr(col_def, kind)
if field.value and (
not input_map or
sorted([compat.as_str(v) for v in field.value]) !=
sorted(input_map)):
raise ValueError("Graph contains unbound inputs: %s. Must "
"provide these inputs through input_map." %
",".join([compat.as_str(v) for v in field.value
if not input_map or v not in input_map]))
break
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Gathers the list of nodes we are interested in.
with graph.as_default():
producer_op_list = None
if meta_graph_def.meta_info_def.HasField("stripped_op_list"):
producer_op_list = meta_graph_def.meta_info_def.stripped_op_list
input_graph_def = meta_graph_def.graph_def
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ""
importer.import_graph_def(
input_graph_def, name=(import_scope or ""), input_map=input_map,
producer_op_list=producer_op_list)
# Restores all the other collections.
for key, col_def in meta_graph_def.collection_def.items():
# Don't add unbound_inputs to the new graph.
if key == unbound_inputs_col_name:
continue
kind = col_def.WhichOneof("kind")
if kind is None:
logging.error("Cannot identify data type for collection %s. Skipping.",
key)
continue
from_proto = ops.get_from_proto_function(key)
if from_proto:
assert kind == "bytes_list"
proto_type = ops.get_collection_proto_type(key)
for value in col_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
graph.add_to_collection(
key, from_proto(proto, import_scope=import_scope))
else:
field = getattr(col_def, kind)
if kind == "node_list":
for value in field.value:
col_op = graph.as_graph_element(
ops.prepend_name_scope(value, import_scope))
graph.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python2 distinguishes between int and long, while Python3 has
# only int.
for value in field.value:
graph.add_to_collection(key, int(value))
else:
for value in field.value:
graph.add_to_collection(
key, ops.prepend_name_scope(value, import_scope))
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=import_scope)
for v in variables:
var_list[ops.strip_name_scope(v.name, import_scope)] = v
return var_list
def export_scoped_meta_graph(filename=None,
graph_def=None,
graph=None,
export_scope=None,
as_text=False,
unbound_inputs_col_name="unbound_inputs",
clear_devices=False,
**kwargs):
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
graph: The `Graph` to import into. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract
the subgraph. The scope name will be striped from the node definitions
for easy import later into new name scopes. If `None`, the whole graph
is exported. graph_def and export_scope cannot both be specified.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
unbound_inputs_col_name: Optional `string`. If provided, a string collection
with the given name will be added to the returned `MetaGraphDef`,
containing the names of tensors that must be remapped when importing the
`MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
before exporting the graph.
**kwargs: Optional keyed arguments, including meta_info_def,
saver_def, collection_list.
Returns:
A `MetaGraphDef` proto and dictionary of `Variables` in the exported
name scope.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
"""
graph = graph or ops.get_default_graph()
unbound_inputs = []
if export_scope or clear_devices:
if graph_def:
new_graph_def = graph_pb2.GraphDef()
new_graph_def.versions.CopyFrom(graph_def.versions)
for node_def in graph_def.node:
if _should_include_node(node_def.name, export_scope):
new_node_def = _node_def(node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
new_graph_def.node.extend([new_node_def])
graph_def = new_graph_def
else:
# Only do this complicated work if we want to remove a name scope.
graph_def = graph_pb2.GraphDef()
# pylint: disable=protected-access
graph_def.versions.CopyFrom(graph.graph_def_versions)
bytesize = 0
for key in sorted(graph._nodes_by_id):
if _should_include_node(graph._nodes_by_id[key].name, export_scope):
value = graph._nodes_by_id[key]
# pylint: enable=protected-access
node_def = _node_def(value.node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
graph_def.node.extend([node_def])
if value.outputs:
assert "_output_shapes" not in graph_def.node[-1].attr
graph_def.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in value.outputs])
bytesize += value.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
# It's possible that not all the inputs are in the export_scope.
# If we would like such information included in the exported meta_graph,
# add them to a special unbound_inputs collection.
if unbound_inputs_col_name:
# Clears the unbound_inputs collections.
graph.clear_collection(unbound_inputs_col_name)
for k in unbound_inputs:
graph.add_to_collection(unbound_inputs_col_name, k)
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=export_scope)
for v in variables:
if _should_include_node(v, export_scope):
var_list[ops.strip_name_scope(v.name, export_scope)] = v
scoped_meta_graph_def = create_meta_graph_def(
graph_def=graph_def,
graph=graph,
export_scope=export_scope,
**kwargs)
if filename:
graph_io.write_graph(
scoped_meta_graph_def,
os.path.dirname(filename),
os.path.basename(filename),
as_text=as_text)
return scoped_meta_graph_def, var_list
def copy_scoped_meta_graph(from_scope, to_scope,
from_graph=None, to_graph=None):
"""Copies a sub-meta_graph from one scope to another.
Args:
from_scope: `String` name scope containing the subgraph to be copied.
to_scope: `String` name scope under which the copied subgraph will reside.
from_graph: Optional `Graph` from which to copy the subgraph. If `None`, the
default graph is use.
to_graph: Optional `Graph` to which to copy the subgraph. If `None`, the
default graph is used.
Returns:
A dictionary of `Variables` that has been copied into `to_scope`.
Raises:
ValueError: If `from_scope` and `to_scope` are the same while
`from_graph` and `to_graph` are also the same.
"""
from_graph = from_graph or ops.get_default_graph()
to_graph = to_graph or ops.get_default_graph()
if from_graph == to_graph and from_scope == to_scope:
raise ValueError("'from_scope' and 'to_scope' need to be different "
"when performing copy in the same graph.")
orig_meta_graph, var_list = export_scoped_meta_graph(
export_scope=from_scope, graph=from_graph)
var_list = import_scoped_meta_graph(orig_meta_graph,
graph=to_graph,
import_scope=to_scope)
return var_list
| apache-2.0 |
fuzzysteve/yamlloader | tableloader/tableFunctions/groups.py | 1 | 1801 | # -*- coding: utf-8 -*-
from yaml import load, dump
try:
from yaml import CSafeLoader as SafeLoader
print "Using CSafeLoader"
except ImportError:
from yaml import SafeLoader
print "Using Python SafeLoader"
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from sqlalchemy import Table
def importyaml(connection,metadata,sourcePath,language='en'):
invGroups = Table('invGroups',metadata)
trnTranslations = Table('trnTranslations',metadata)
print "Importing Groups"
print "opening Yaml"
with open(os.path.join(sourcePath,'fsd','groupIDs.yaml'),'r') as yamlstream:
trans = connection.begin()
groupids=load(yamlstream,Loader=SafeLoader)
print "Yaml Processed into memory"
for groupid in groupids:
connection.execute(invGroups.insert(),
groupID=groupid,
categoryID=groupids[groupid].get('categoryID',0),
groupName=groupids[groupid].get('name',{}).get(language,'').decode('utf-8'),
iconID=groupids[groupid].get('iconID'),
useBasePrice=groupids[groupid].get('useBasePrice'),
anchored=groupids[groupid].get('anchored',0),
anchorable=groupids[groupid].get('anchorable',0),
fittableNonSingleton=groupids[groupid].get('fittableNonSingleton',0),
published=groupids[groupid].get('published',0))
if (groupids[groupid].has_key('name')):
for lang in groupids[groupid]['name']:
connection.execute(trnTranslations.insert(),tcID=7,keyID=groupid,languageID=lang,text=groupids[groupid]['name'][lang].decode('utf-8'));
trans.commit()
| mit |
gonzolino/heat | heat/db/sqlalchemy/migrate_repo/versions/047_stack_nested_depth.py | 13 | 1602 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
def upgrade(migrate_engine):
meta = sqlalchemy.MetaData(bind=migrate_engine)
stack = sqlalchemy.Table('stack', meta, autoload=True)
nested_depth = sqlalchemy.Column(
'nested_depth', sqlalchemy.Integer(), default=0)
nested_depth.create(stack)
def get_stacks(owner_id):
stmt = stack.select().where(stack.c.owner_id == owner_id)
return migrate_engine.execute(stmt)
def set_nested_depth(st, nested_depth):
if st.backup:
return
values = {'nested_depth': nested_depth}
update = stack.update().where(
stack.c.id == st.id).values(values)
migrate_engine.execute(update)
# Recurse down the tree
child_stacks = get_stacks(owner_id=st.id)
child_nested_depth = nested_depth + 1
for ch in child_stacks:
set_nested_depth(ch, child_nested_depth)
# Iterate over all top-level non nested stacks
for st in get_stacks(owner_id=None):
set_nested_depth(st, 0)
| apache-2.0 |
realsystem/CloudFerry | cloudferrylib/base/action/action.py | 11 | 1065 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from cloudferrylib.scheduler import task
class Action(task.Task):
def __init__(self, init, cloud=None):
self.cloud = None
self.src_cloud = None
self.dst_cloud = None
self.cfg = None
self.__dict__.update(init)
self.init = init
if cloud:
self.cloud = init[cloud]
super(Action, self).__init__()
def run(self, **kwargs):
pass
def save(self):
pass
def restore(self):
pass
| apache-2.0 |
oberstet/crossbarexamples | rest/needs_cleanup/python/lib/crossbarconnect/client.py | 9 | 8266 | ###############################################################################
##
## Copyright (C) 2012-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['Client']
try:
import ssl
_HAS_SSL = True
except ImportError:
_HAS_SSL = False
import sys
_HAS_SSL_CLIENT_CONTEXT = sys.version_info >= (2,7,9)
import json
import hmac
import hashlib
import base64
import random
from datetime import datetime
import six
from six.moves.urllib import parse
from six.moves.http_client import HTTPConnection, HTTPSConnection
def _utcnow():
"""
Get current time in UTC as ISO 8601 string.
:returns str -- Current time as string in ISO 8601 format.
"""
now = datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
def _parse_url(url):
"""
Parses a Crossbar.io HTTP bridge URL.
"""
parsed = parse.urlparse(url)
if parsed.scheme not in ["http", "https"]:
raise Exception("invalid Push URL scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "http":
port = 80
elif parsed.scheme == "https":
port = 443
else:
raise Exception("logic error")
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid Push URL: non-empty fragment '%s" % parsed.fragment)
if parsed.query is not None and parsed.query != "":
raise Exception("invalid Push URL: non-empty query string '%s" % parsed.query)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = parse.unquote(ppath)
else:
ppath = "/"
path = ppath
return {'secure': parsed.scheme == "https",
'host': parsed.hostname,
'port': port,
'path': path}
class Client:
"""
Crossbar.io HTTP bridge client.
"""
def __init__(self, url, key = None, secret = None, timeout = 5, context = None):
"""
Create a new Crossbar.io push client.
The only mandatory argument is the Push service endpoint of the Crossbar.io
instance to push to.
For signed pushes, provide authentication key and secret. If those are not
given, unsigned pushes are performed.
:param url: URL of the HTTP bridge of Crossbar.io (e.g. http://example.com:8080/push).
:type url: str
:param key: Optional key to use for signing requests.
:type key: str
:param secret: When using signed request, the secret corresponding to key.
:type secret: str
:param timeout: Timeout for requests.
:type timeout: int
:param context: If the HTTP bridge is running on HTTPS (that is securely over TLS),
then the context provides the SSL settings the client should use (e.g. the
certificate chain against which to verify the server certificate). This parameter
is only available on Python 2.7.9+ and Python 3 (otherwise the parameter is silently
ignored!). See: https://docs.python.org/2/library/ssl.html#ssl.SSLContext
:type context: obj or None
"""
if six.PY2:
if type(url) == str:
url = six.u(url)
if type(key) == str:
key = six.u(key)
if type(secret) == str:
secret = six.u(secret)
assert(type(url) == six.text_type)
assert((key and secret) or (not key and not secret))
assert(key is None or type(key) == six.text_type)
assert(secret is None or type(secret) == six.text_type)
assert(type(timeout) == int)
if _HAS_SSL and _HAS_SSL_CLIENT_CONTEXT:
assert(context is None or isinstance(context, ssl.SSLContext))
self._seq = 1
self._key = key
self._secret = secret
self._endpoint = _parse_url(url)
self._endpoint['headers'] = {
"Content-type": "application/json",
"User-agent": "crossbarconnect-python"
}
if self._endpoint['secure']:
if not _HAS_SSL:
raise Exception("Bridge URL is using HTTPS, but Python SSL module is missing")
if _HAS_SSL_CLIENT_CONTEXT:
self._connection = HTTPSConnection(self._endpoint['host'],
self._endpoint['port'], timeout = timeout, context = context)
else:
self._connection = HTTPSConnection(self._endpoint['host'],
self._endpoint['port'], timeout = timeout)
else:
self._connection = HTTPConnection(self._endpoint['host'],
self._endpoint['port'], timeout = timeout)
def publish(self, topic, *args, **kwargs):
"""
Publish an event to subscribers on specified topic via Crossbar.io HTTP bridge.
The event payload (positional and keyword) can be of any type that can be
serialized to JSON.
If `kwargs` contains an `options` attribute, this is expected to
be a dictionary with the following possible parameters:
* `exclude`: A list of WAMP session IDs to exclude from receivers.
* `eligible`: A list of WAMP session IDs eligible as receivers.
:param topic: Topic to push to.
:type topic: str
:param args: Arbitrary application payload for the event (positional arguments).
:type args: list
:param kwargs: Arbitrary application payload for the event (keyword arguments).
:type kwargs: dict
:returns int -- The event publication ID assigned by the broker.
"""
if six.PY2 and type(topic) == str:
topic = six.u(topic)
assert(type(topic) == six.text_type)
## this will get filled and later serialized into HTTP/POST body
##
event = {
'topic': topic
}
if 'options' in kwargs:
event['options'] = kwargs.pop('options')
assert(type(event['options']) == dict)
if args:
event['args'] = args
if kwargs:
event['kwargs'] = kwargs
try:
body = json.dumps(event, separators = (',',':'))
if six.PY3:
body = body.encode('utf8')
except Exception as e:
raise Exception("invalid event payload - not JSON serializable: {0}".format(e))
params = {
'timestamp': _utcnow(),
'seq': self._seq,
}
if self._key:
## if the request is to be signed, create extra fields and signature
params['key'] = self._key
params['nonce'] = random.randint(0, 9007199254740992)
# HMAC[SHA256]_{secret} (key | timestamp | seq | nonce | body) => signature
hm = hmac.new(self._secret.encode('utf8'), None, hashlib.sha256)
hm.update(params['key'].encode('utf8'))
hm.update(params['timestamp'].encode('utf8'))
hm.update(u"{0}".format(params['seq']).encode('utf8'))
hm.update(u"{0}".format(params['nonce']).encode('utf8'))
hm.update(body)
signature = base64.urlsafe_b64encode(hm.digest())
params['signature'] = signature
self._seq += 1
path = "{0}?{1}".format(parse.quote(self._endpoint['path']), parse.urlencode(params))
## now issue the HTTP/POST
##
self._connection.request('POST', path, body, self._endpoint['headers'])
response = self._connection.getresponse()
response_body = response.read()
if response.status != 202:
raise Exception("publication request failed {0} [{1}] - {2}".format(response.status, response.reason, response_body))
try:
res = json.loads(response_body)
except Exception as e:
raise Exception("publication request bogus result - {0}".format(e))
return res['id']
| apache-2.0 |
CiscoSystems/nova | nova/tests/api/openstack/compute/contrib/test_snapshots.py | 30 | 8037 | # Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import volumes
from nova import context
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.volume import cinder
class SnapshotApiTest(test.NoDBTestCase):
def setUp(self):
super(SnapshotApiTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(cinder.API, "create_snapshot",
fakes.stub_snapshot_create)
self.stubs.Set(cinder.API, "create_snapshot_force",
fakes.stub_snapshot_create)
self.stubs.Set(cinder.API, "delete_snapshot",
fakes.stub_snapshot_delete)
self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
self.stubs.Set(cinder.API, "get_all_snapshots",
fakes.stub_snapshot_get_all)
self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Volumes'])
self.context = context.get_admin_context()
self.app = fakes.wsgi_app(init_only=('os-snapshots',))
def test_snapshot_create(self):
snapshot = {"volume_id": 12,
"force": False,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = webob.Request.blank('/v2/fake/os-snapshots')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(resp_dict['snapshot']['displayName'],
snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['displayDescription'],
snapshot['display_description'])
self.assertEqual(resp_dict['snapshot']['volumeId'],
snapshot['volume_id'])
def test_snapshot_create_force(self):
snapshot = {"volume_id": 12,
"force": True,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
req = webob.Request.blank('/v2/fake/os-snapshots')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(resp_dict['snapshot']['displayName'],
snapshot['display_name'])
self.assertEqual(resp_dict['snapshot']['displayDescription'],
snapshot['display_description'])
self.assertEqual(resp_dict['snapshot']['volumeId'],
snapshot['volume_id'])
# Test invalid force paramter
snapshot = {"volume_id": 12,
"force": '**&&^^%%$$##@@'}
body = dict(snapshot=snapshot)
req = webob.Request.blank('/v2/fake/os-snapshots')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
def test_snapshot_delete(self):
snapshot_id = 123
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_snapshot_delete_invalid_id(self):
snapshot_id = -1
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'DELETE'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_snapshot_show(self):
snapshot_id = 123
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'GET'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(resp_dict['snapshot']['id'], str(snapshot_id))
def test_snapshot_show_invalid_id(self):
snapshot_id = -1
req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
req.method = 'GET'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_snapshot_detail(self):
req = webob.Request.blank('/v2/fake/os-snapshots/detail')
req.method = 'GET'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
resp_dict = jsonutils.loads(resp.body)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(len(resp_snapshots), 3)
resp_snapshot = resp_snapshots.pop()
self.assertEqual(resp_snapshot['id'], 102)
class SnapshotSerializerTest(test.NoDBTestCase):
def _verify_snapshot(self, snap, tree):
self.assertEqual(tree.tag, 'snapshot')
for attr in ('id', 'status', 'size', 'createdAt',
'displayName', 'displayDescription', 'volumeId'):
self.assertEqual(str(snap[attr]), tree.get(attr))
def test_snapshot_show_create_serializer(self):
serializer = volumes.SnapshotTemplate()
raw_snapshot = dict(
id='snap_id',
status='snap_status',
size=1024,
createdAt=timeutils.utcnow(),
displayName='snap_name',
displayDescription='snap_desc',
volumeId='vol_id',
)
text = serializer.serialize(dict(snapshot=raw_snapshot))
tree = etree.fromstring(text)
self._verify_snapshot(raw_snapshot, tree)
def test_snapshot_index_detail_serializer(self):
serializer = volumes.SnapshotsTemplate()
raw_snapshots = [dict(
id='snap1_id',
status='snap1_status',
size=1024,
createdAt=timeutils.utcnow(),
displayName='snap1_name',
displayDescription='snap1_desc',
volumeId='vol1_id',
),
dict(
id='snap2_id',
status='snap2_status',
size=1024,
createdAt=timeutils.utcnow(),
displayName='snap2_name',
displayDescription='snap2_desc',
volumeId='vol2_id',
)]
text = serializer.serialize(dict(snapshots=raw_snapshots))
tree = etree.fromstring(text)
self.assertEqual('snapshots', tree.tag)
self.assertEqual(len(raw_snapshots), len(tree))
for idx, child in enumerate(tree):
self._verify_snapshot(raw_snapshots[idx], child)
| apache-2.0 |
diagramsoftware/odoomrp-utils | product_uom_change_fix/models/product.py | 13 | 1920 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, api
class ProductTemplate(models.Model):
_inherit = 'product.template'
@api.multi
def write(self, vals):
res = {}
for product_tmpl in self:
write_vals = {}
if 'uom_po_id' in vals:
write_vals['uom_po_id'] = vals.pop("uom_po_id", None)
write_vals['uom_id'] = vals.pop("uom_id", None)
if vals:
res = super(ProductTemplate, self).write(vals)
if write_vals:
product_obj = self.env['product.product']
st_mv_obj = self.env['stock.move']
product_lst = product_obj.search([('product_tmpl_id', '=',
product_tmpl.id)])
if not st_mv_obj.search([('product_id', 'in',
product_lst.ids)]):
models.Model.write(self, write_vals)
else:
res = super(ProductTemplate, self).write(write_vals)
return res
| agpl-3.0 |
noba3/KoTos | addons/script.module.urlresolver/lib/urlresolver/plugins/watchfreeinhd.py | 4 | 2097 | '''
watchfreeinhd urlresolver plugin
Copyright (C) 2013 voinage
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class WatchFreeResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "watchfreeinhd"
domains = ["watchfreeinhd.com"]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_POST(web_url, {'agree': 'Yes, let me watch'}).content
link = re.findall('<a href="(.+?)" id="player" name="player">', html)
if link:
return link[0]
else:
raise UrlResolver.ResolverError('File Not Found or removed')
def get_url(self, host, media_id):
return 'http://www.%s.com/%s' % (host, media_id)
def get_host_and_id(self, url):
r = re.match(r'http://www.(watchfreeinhd).com/([0-9A-Za-z]+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match(r'http://www.(watchfreeinhd).com/([0-9A-Za-z]+)', url) or 'watchfree' in host)
| gpl-2.0 |
OldHuntsman/DefilerWings | game/pythoncode/girls.py | 1 | 16984 | # coding=utf-8
import random
import data
import renpy.exports as renpy
import renpy.store as store
import girls_data
from treasures import gen_treas
from utils import call
from characters import Girl
from data import achieve_target
class GirlsList(object):
def __init__(self, game_ref, base_character):
self.game = game_ref
self.character = base_character
self.prisoners = [] # список заключенных девушек
self.free_list = [] # список свободных девушек
self.spawn = [] # список отродий, приходящих после пробуждения
self.active = 0 # номер текущей девушки
self.offspring = [] # типы потомков для выполнения квеста
def new_girl(self, girl_type='peasant'):
"""
Генерация новой девушки указанного типа.
"""
self.game.girl = Girl(game_ref=self.game, girl_type=girl_type)
self.game.girl.treasure = self.gen_tres()
return self.description('new')
def gen_tres(self):
"""
Создание списка индивидуальных сокровищ для текущей девушки
"""
g_type = self.game.girl.type # упрощение обращения к типу девушки
girl_info = girls_data.girls_info[g_type] # упрощение обращения к информации для данного типа девушки
count = random.randint(girl_info['t_count_min'], girl_info['t_count_max'])
t_list = girl_info['t_list']
alignment = girl_info['t_alignment']
min_cost = girl_info['t_price_min']
max_cost = girl_info['t_price_max']
obtained = u"Принадлежало красавице по имени %s" % self.game.girl.name
return gen_treas(count, t_list, alignment, min_cost, max_cost, obtained)
def impregnate(self):
"""
Осеменение женщины.
"""
# self.description('prelude', True)
# self.description('sex', True)
# self.description('impregnate', True)
self.game.girl.virgin = False
if self.game.girl.quality < self.game.dragon.magic or \
'impregnator' in self.game.dragon.modifiers():
self.game.girl.pregnant = 2
else:
self.game.girl.pregnant = 1
self.game.dragon.lust -= 1
achieve_target(self.game.girl.type, "impregnate")
return self.description('shout')
def free_girl(self):
"""
Выпустить текущую девушку на свободу.
"""
# девушка отслеживается только если беременна
if self.game.girl.pregnant:
self.free_list.append(self.game.girl)
if self.game.girl.jailed:
return self.description('free_prison')
else:
return self.description('free')
def free_all_girls(self):
"""
Выпустить на свободу всех девушек.
"""
for girl_i in reversed(xrange(self.prisoners_count)):
self.game.girl = self.prisoners[girl_i]
if self.game.girl.pregnant:
self.free_list.append(self.game.girl)
self.prisoners = []
def steal_girl(self):
return self.description('steal')
def jail_girl(self):
"""
Посадить текущую девушку за решетку.
"""
if self.game.girl.jailed:
text = self.description('jailed')
self.prisoners.insert(self.active, self.game.girl)
else:
text = self.description('jail')
self.game.girl.jailed = True
self.prisoners.append(self.game.girl)
return text
def set_active(self, index):
"""
Достать девушку с номером index из темницы
"""
self.game.girl = self.prisoners[index]
self.active = index
del self.prisoners[index]
def eat_girl(self):
"""
Скушать девушку.
"""
self.game.dragon.hunger -= 1
if self.game.dragon.lust < 3:
self.game.dragon.lust += 1
self.game.dragon.bloodiness = 0
return self.description('eat')
def rob_girl(self):
"""
Ограбить девушку.
"""
self.game.lair.treasury.receive_treasures(self.game.girl.treasure)
return self.description('rob')
def prisoners_list(self):
"""
Возвращает список плененных девушек.
"""
jail_list = []
for girl_i in xrange(len(self.prisoners)):
jail_list.append(self.prisoners[girl_i].name)
return jail_list
@property
def prisoners_count(self):
"""
Возвращает количество плененных девушек.
"""
return len(self.prisoners)
def description(self, status, say=False):
"""
Генерация описания ситуации для текущей девушки (self.game.girl).
status - кодовое описание ситуации
say - если истина - описание выводится сразу на экран
Возвращается текст описания или None, если текст в списке не найден
"""
format_dict = {
'dragon_name': self.game.dragon.name,
'dragon_name_full': self.game.dragon.fullname,
'dragon_type': self.game.dragon.kind,
'girl_name': self.game.girl.name,
'girl_title': girls_data.girls_info[self.game.girl.type]['description'],
}
girl_type = self.game.girl.type
if girl_type not in girls_data.girls_texts or status not in girls_data.girls_texts[girl_type]:
girl_type = 'girl'
if status in girls_data.girls_texts[girl_type]:
text = random.choice(girls_data.girls_texts[girl_type][status])
if self.spawn:
# Если список отродий не пуст - получаем имя последнего для возможной подстановки
format_dict['spawn_name'] = girls_data.spawn_info[self.spawn[-1]]['born'].capitalize()
if status == 'rob':
treas_description = self.game.lair.treasury.treasures_description(self.game.girl.treasure)
treas_description = '\n'.join(treas_description) + u'.'
self.game.girl.treasure = []
format_dict['rob_list'] = treas_description
text = text % format_dict
else:
text = None
if say and text:
self.game.girl.third(text) # выдача сообщения
store.nvl_list = [] # вариант nvl clear на питоне
else:
return text
@staticmethod
def event(event_type, *args, **kwargs):
if event_type in girls_data.girl_events:
if girls_data.girl_events[event_type] is not None:
call(girls_data.girl_events[event_type], *args, **kwargs)
else:
raise Exception("Unknown event: %s" % event_type)
return
def next_year(self):
"""
Все действия с девушками за год.
"""
# плененные девушки
for girl_i in reversed(xrange(self.prisoners_count)):
self.game.girl = self.prisoners[girl_i]
# попытка побега
if (random.randint(1, 2) == 1) and self.game.lair.reachable([]) and \
'regular_guards' not in self.game.lair.upgrades and \
'elite_guards' not in self.game.lair.upgrades and \
'smuggler_guards' not in self.game.lair.upgrades:
# Девушка сбежала из камеры
del self.prisoners[girl_i]
self.event('escape') # событие "побег из заключения"
if self.game.girl.pregnant:
self.free_list.append(self.game.girl)
else:
# девушка не убежала
if ('servant' in self.game.lair.upgrades) or ('gremlin_servant' in self.game.lair.upgrades):
if self.game.girl.pregnant:
girl_type = girls_data.girls_info[self.game.girl.type]
if self.game.girl.pregnant == 1:
spawn_class = 'regular_spawn'
else:
spawn_class = 'advanced_spawn'
if 'educated_spawn' not in self.offspring:
self.offspring.append('educated_spawn')
if girl_type['giantess']:
girl_size = 'giantess'
else:
girl_size = 'common_size'
if girl_size not in self.offspring:
self.offspring.append(girl_size)
self.spawn.append(girl_type[spawn_class])
self.event('spawn', girl_type[spawn_class]) # событие "рождение отродий"
self.game.girl.pregnant = 0
else:
self.event('hunger_death') # событие "смерть девушки от голода"
del self.prisoners[girl_i]
# свободные, в том числе только что сбежавшие. Отслеживаются только беременные
for girl_i in xrange(len(self.free_list)):
self.game.girl = self.free_list[girl_i]
if (random.randint(1, 3) == 1) and not girls_data.girls_info[self.game.girl.type]['giantess']:
self.event('kill') # событие "беременную девушку убивают на свободе"
else:
girl_type = girls_data.girls_info[self.game.girl.type]
if self.game.girl.pregnant == 1:
spawn_class = 'regular_spawn'
else:
spawn_class = 'advanced_spawn'
if 'free_spawn' not in self.offspring:
self.offspring.append('free_spawn')
if girl_type['giantess']:
girl_size = 'giantess'
else:
girl_size = 'common_size'
if girl_size not in self.offspring:
self.offspring.append(girl_size)
spawn_type = girls_data.girls_info[self.game.girl.type][spawn_class]
spawn = girls_data.spawn_info[spawn_type]
self.event('free_spawn', spawn_type) # событие "рождение отродий на воле"
self.free_spawn(spawn['power'])
self.free_list = [] # очистка списка - либо родила, либо убили - отслеживать дальше не имеет смысла
def before_sleep(self):
"""
Все действия до начала сна - смерть с тоски, может быть что-то еще?
"""
for girl_i in reversed(xrange(self.prisoners_count)):
self.game.girl = self.prisoners[girl_i]
if (not self.game.girl.virgin) and (not self.game.girl.pregnant):
self.description('anguish', True) # умирает c тоски
del self.prisoners[girl_i]
# noinspection PyTypeChecker
def after_awakening(self):
"""
Все действия после пробуждения - разбираемся с воспитанными отродьями.
"""
for spawn_i in xrange(len(self.spawn)):
spawn_type = self.spawn[spawn_i] # упрощение обращения к типу отродий
spawn = girls_data.spawn_info[spawn_type] # упрощение обращения к данным отродий
renpy.show("meow", what=store.Image("img/scene/spawn/%s.jpg" % spawn_type))
spawn_mod = spawn['modifier'] # упрощение обращения к списку модификаторов отродий
# Делаем проверку. Истина, если не морское отродье или морское в подводном логове
# TODO: Возможно стоит сделать умирание слуги, если оно не морское и в морском логове.
marine_check = ('marine' not in spawn_mod) or \
(self.game.lair.type.require and 'swimming' in self.game.lair.type.require)
spawn_menu = [(u"К Вам приходит %s и просит назначения" % spawn['name'], None)] # меню отродий
# Возможные пункты меню
if ('poisonous' in spawn_mod) and ('poison_guards' not in self.game.lair.upgrades) and marine_check:
spawn_menu.append((u"Выпустить в логово", u'poison_guards'))
if ('servant' in spawn_mod) and ('servant' not in self.game.lair.upgrades) and marine_check:
spawn_menu.append((u"Сделать слугой", 'servant'))
if ('warrior' in spawn_mod) and ('regular_guards' not in self.game.lair.upgrades) and marine_check:
spawn_menu.append((u"Сделать охранником", 'regular_guards'))
if ('elite' in spawn_mod) and ('elite_guards' not in self.game.lair.upgrades) and marine_check:
spawn_menu.append((u"Сделать элитным охранником", 'elite_guards'))
spawn_menu.append((u"Выпустить в королевство", 'free'))
if (('servant' in spawn_mod) or
('warrior' in spawn_mod) or
('elite' in spawn_mod)) and \
('marine' not in spawn_mod):
spawn_menu.append((u"Отправить в армию тьмы", 'army_of_darkness'))
menu_action = renpy.display_menu(spawn_menu)
if menu_action == 'free':
renpy.say(self.game.narrator, u"%s отправляется бесчинствовать в королевстве." % spawn['name'])
self.free_spawn(spawn['power'])
elif menu_action == 'army_of_darkness':
renpy.say(self.game.narrator, u"%s отправляется в армию тьмы." % spawn['name'])
self.army_of_darkness(spawn_type)
else:
# выдача сообщения о начале работы
renpy.say(self.game.narrator, u"%s приступает к выполнению обязанностей." % spawn['name'])
# выдача сообщения о конце работы, если это необходимо
if 'replaces' in data.lair_upgrades[menu_action].keys():
replace = data.lair_upgrades[menu_action]['replaces']
renpy.say(self.game.narrator,
u"%s больше не требуются и уходят." % data.lair_upgrades[replace]['name'])
# добавление в улучшение логова
self.game.lair.add_upgrade(menu_action)
renpy.hide("meow")
self.spawn = []
def free_spawn(self, power):
"""
Действия отродий на свободе
"""
# Растёт разруха. Надо проверить чтобы это срабатывало по одному разу на тип отродий.
self.game.poverty.value += 1
pass
def army_of_darkness(self, warrior_type):
"""
Отправка в армию тьмы
"""
self.game.army.add_warrior(warrior_type)
@property
def is_mating_possible(self):
"""
Возвращает возможность совокупления - истину или ложь.
# TODO: проверка на превращение в человека
"""
assert self.game.girl, "Girl not found"
mating_possible = self.game.girl.virgin and self.game.dragon.lust > 0
if girls_data.girls_info[self.game.girl.type]['giantess']:
mating_possible = self.game.dragon.size > 3 and mating_possible
return mating_possible | bsd-3-clause |
Reddine/dzlibs | tweeza/users/views.py | 9 | 2145 | from flask import (Blueprint, render_template, flash, request, redirect,
url_for)
from flask.ext.login import login_required, current_user
from users.models import User
from items.models import Item
from users.forms import EditProfileForm
from flask.ext.babel import gettext as _
users = Blueprint('users', __name__, url_prefix='/profile')
@users.route('/')
@login_required
def index():
items_count = Item.objects(submitter=current_user.id).count()
return render_template('users/user_profile.html', user=current_user,
items_count=items_count)
@users.route('/edit/', methods=['GET', 'POST'])
@login_required
def edit():
form = EditProfileForm()
if request.method == 'POST':
if form.validate_on_submit():
user = User.objects.get(id=current_user.id)
user.name = form.name.data.strip()
user.email = form.email.data.strip()
user.website = form.website.data.strip()
user.twitter_username = form.twitter.data.strip('@')
facebook = form.facebook.data.strip().strip('/').split('/')[-1]
user.facebook_username = facebook
user.location = form.location.data.strip()
user.hireable = bool(form.hireable.data)
user.bio = form.bio.data.strip()
user.save()
flash(_('Profile updated successfully'), category='success')
return redirect(url_for('users.index'))
else:
flash(_('Error happened, see below'), category='alert')
return render_template('users/edit_profile.html', form=form)
else:
form.hireable.default = int(bool(current_user.hireable))
form.bio.default = current_user.bio or ''
form.process()
return render_template('users/edit_profile.html', form=form)
@users.route('/user/<int:id>')
def user_profile(id):
user = User.objects.get_or_404(user_id=id)
items_count = Item.objects(submitter=user).count()
return render_template('users/user_profile.html',
user=user,
items_count=items_count)
| mpl-2.0 |
hujiajie/pa-chromium | tools/git/for-all-touched-files.py | 130 | 3879 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Invokes the specified (quoted) command for all files modified
between the current git branch and the specified branch or commit.
The special token [[FILENAME]] (or whatever you choose using the -t
flag) is replaced with each of the filenames of new or modified files.
Deleted files are not included. Neither are untracked files.
Synopsis:
%prog [-b BRANCH] [-d] [-x EXTENSIONS|-c] [-t TOKEN] QUOTED_COMMAND
Examples:
%prog -x gyp,gypi "tools/format_xml.py [[FILENAME]]"
%prog -c "tools/sort-headers.py [[FILENAME]]"
%prog -t "~~BINGO~~" "echo I modified ~~BINGO~~"
"""
import optparse
import os
import subprocess
import sys
# List of C++-like source file extensions.
_CPP_EXTENSIONS = ('h', 'hh', 'hpp', 'c', 'cc', 'cpp', 'cxx', 'mm',)
def GitShell(args, ignore_return=False):
"""A shell invocation suitable for communicating with git. Returns
output as list of lines, raises exception on error.
"""
job = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, err) = job.communicate()
if job.returncode != 0 and not ignore_return:
print out
raise Exception("Error %d running command %s" % (
job.returncode, args))
return out.split('\n')
def FilenamesFromGit(branch_name, extensions):
"""Provides a list of all new and modified files listed by [git diff
branch_name] where branch_name can be blank to get a diff of the
workspace.
Excludes deleted files.
If extensions is not an empty list, include only files with one of
the extensions on the list.
"""
lines = GitShell('git diff --stat=600,500 %s' % branch_name)
filenames = []
for line in lines:
line = line.lstrip()
# Avoid summary line, and files that have been deleted (no plus).
if line.find('|') != -1 and line.find('+') != -1:
filename = line.split()[0]
if filename:
filename = filename.rstrip()
ext = filename.rsplit('.')[-1]
if not extensions or ext in extensions:
filenames.append(filename)
return filenames
def ForAllTouchedFiles(branch_name, extensions, token, command):
"""For each new or modified file output by [git diff branch_name],
run command with token replaced with the filename. If extensions is
not empty, do this only for files with one of the extensions in that
list.
"""
filenames = FilenamesFromGit(branch_name, extensions)
for filename in filenames:
os.system(command.replace(token, filename))
def main():
parser = optparse.OptionParser(usage=__doc__)
parser.add_option('-x', '--extensions', default='', dest='extensions',
help='Limits to files with given extensions '
'(comma-separated).')
parser.add_option('-c', '--cpp', default=False, action='store_true',
dest='cpp_only',
help='Runs your command only on C++-like source files.')
parser.add_option('-t', '--token', default='[[FILENAME]]', dest='token',
help='Sets the token to be replaced for each file '
'in your command (default [[FILENAME]]).')
parser.add_option('-b', '--branch', default='origin/master', dest='branch',
help='Sets what to diff to (default origin/master). Set '
'to empty to diff workspace against HEAD.')
opts, args = parser.parse_args()
if not args:
parser.print_help()
sys.exit(1)
extensions = opts.extensions
if opts.cpp_only:
extensions = _CPP_EXTENSIONS
ForAllTouchedFiles(opts.branch, extensions, opts.token, args[0])
if __name__ == '__main__':
main()
| bsd-3-clause |
asampat3090/readthedocs.org | readthedocs/restapi/urls.py | 4 | 1407 | from django.conf.urls import url, patterns, include
from rest_framework import routers
from .views.model_views import BuildViewSet, ProjectViewSet, NotificationViewSet, VersionViewSet
from readthedocs.comments.views import CommentViewSet
router = routers.DefaultRouter()
router.register(r'build', BuildViewSet)
router.register(r'version', VersionViewSet)
router.register(r'project', ProjectViewSet)
router.register(r'notification', NotificationViewSet)
router.register(r'comments', CommentViewSet, base_name="comments")
urlpatterns = patterns(
'',
url(r'^', include(router.urls)),
url(r'embed/', 'readthedocs.restapi.views.core_views.embed', name='embed'),
url(r'docurl/', 'readthedocs.restapi.views.core_views.docurl', name='docurl'),
url(r'cname/', 'readthedocs.restapi.views.core_views.cname', name='cname'),
url(r'footer_html/', 'readthedocs.restapi.views.footer_views.footer_html', name='footer_html'),
url(r'index_search/',
'readthedocs.restapi.views.search_views.index_search',
name='index_search'),
url(r'search/$', 'readthedocs.restapi.views.search_views.search', name='api_search'),
url(r'search/project/$',
'readthedocs.restapi.views.search_views.project_search',
name='api_project_search'),
url(r'search/section/$',
'readthedocs.restapi.views.search_views.section_search',
name='api_section_search'),
)
| mit |
kustodian/ansible | test/units/module_utils/network/ftd/test_fdm_swagger_parser.py | 37 | 17322 | # Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import copy
import os
import unittest
from ansible.module_utils.network.ftd.common import HTTPMethod
from ansible.module_utils.network.ftd.fdm_swagger_client import FdmSwaggerParser
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
TEST_DATA_FOLDER = os.path.join(DIR_PATH, 'test_data')
base = {
'basePath': "/api/fdm/v2",
'definitions': {"NetworkObject": {"type": "object",
"properties": {"version": {"type": "string"}, "name": {"type": "string"},
"description": {"type": "string"},
"subType": {"type": "object",
"$ref": "#/definitions/NetworkObjectType"},
"value": {"type": "string"},
"isSystemDefined": {"type": "boolean"},
"dnsResolution": {"type": "object",
"$ref": "#/definitions/FQDNDNSResolution"},
"id": {"type": "string"},
"type": {"type": "string", "default": "networkobject"}},
"required": ["subType", "type", "value", "name"]},
"NetworkObjectWrapper": {
"allOf": [{"$ref": "#/definitions/NetworkObject"}, {"$ref": "#/definitions/LinksWrapper"}]}
},
'paths': {
"/object/networks": {
"get": {"tags": ["NetworkObject"],
"operationId": "getNetworkObjectList",
"responses": {
"200": {
"description": "",
"schema": {"type": "object",
"title": "NetworkObjectList",
"properties": {
"items": {
"type": "array",
"items": {"$ref": "#/definitions/NetworkObjectWrapper"}},
"paging": {
"$ref": "#/definitions/Paging"}},
"required": ["items", "paging"]}}},
"parameters": [
{"name": "offset", "in": "query", "required": False, "type": "integer"},
{"name": "limit", "in": "query", "required": False, "type": "integer"},
{"name": "sort", "in": "query", "required": False, "type": "string"},
{"name": "filter", "in": "query", "required": False, "type": "string"}]},
"post": {"tags": ["NetworkObject"], "operationId": "addNetworkObject",
"responses": {
"200": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/NetworkObjectWrapper"}},
"422": {"description": "",
"schema": {"type": "object", "$ref": "#/definitions/ErrorWrapper"}}},
"parameters": [{"in": "body", "name": "body",
"required": True,
"schema": {"$ref": "#/definitions/NetworkObject"}}]}
},
"/object/networks/{objId}": {
"get": {"tags": ["NetworkObject"], "operationId": "getNetworkObject",
"responses": {"200": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/NetworkObjectWrapper"}},
"404": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/ErrorWrapper"}}},
"parameters": [{"name": "objId", "in": "path", "required": True,
"type": "string"}]},
"put": {"tags": ["NetworkObject"], "operationId": "editNetworkObject",
"responses": {"200": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/NetworkObjectWrapper"}},
"422": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/ErrorWrapper"}}},
"parameters": [{"name": "objId", "in": "path", "required": True,
"type": "string"},
{"in": "body", "name": "body", "required": True,
"schema": {"$ref": "#/definitions/NetworkObject"}}]},
"delete": {"tags": ["NetworkObject"], "operationId": "deleteNetworkObject",
"responses": {"204": {"description": ""},
"422": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/ErrorWrapper"}}},
"parameters": [{"name": "objId", "in": "path", "required": True,
"type": "string"}]}}}
}
def _get_objects(base_object, key_names):
return dict((_key, base_object[_key]) for _key in key_names)
class TestFdmSwaggerParser(unittest.TestCase):
def test_simple_object(self):
self._data = copy.deepcopy(base)
self.fdm_data = FdmSwaggerParser().parse_spec(self._data)
expected_operations = {
'getNetworkObjectList': {
'method': HTTPMethod.GET,
'url': '/api/fdm/v2/object/networks',
'modelName': 'NetworkObject',
'parameters': {
'path': {},
'query': {
'offset': {
'required': False,
'type': 'integer'
},
'limit': {
'required': False,
'type': 'integer'
},
'sort': {
'required': False,
'type': 'string'
},
'filter': {
'required': False,
'type': 'string'
}
}
},
'returnMultipleItems': True,
"tags": ["NetworkObject"]
},
'addNetworkObject': {
'method': HTTPMethod.POST,
'url': '/api/fdm/v2/object/networks',
'modelName': 'NetworkObject',
'parameters': {'path': {},
'query': {}},
'returnMultipleItems': False,
"tags": ["NetworkObject"]
},
'getNetworkObject': {
'method': HTTPMethod.GET,
'url': '/api/fdm/v2/object/networks/{objId}',
'modelName': 'NetworkObject',
'parameters': {
'path': {
'objId': {
'required': True,
'type': "string"
}
},
'query': {}
},
'returnMultipleItems': False,
"tags": ["NetworkObject"]
},
'editNetworkObject': {
'method': HTTPMethod.PUT,
'url': '/api/fdm/v2/object/networks/{objId}',
'modelName': 'NetworkObject',
'parameters': {
'path': {
'objId': {
'required': True,
'type': "string"
}
},
'query': {}
},
'returnMultipleItems': False,
"tags": ["NetworkObject"]
},
'deleteNetworkObject': {
'method': HTTPMethod.DELETE,
'url': '/api/fdm/v2/object/networks/{objId}',
'modelName': 'NetworkObject',
'parameters': {
'path': {
'objId': {
'required': True,
'type': "string"
}
},
'query': {}
},
'returnMultipleItems': False,
"tags": ["NetworkObject"]
}
}
assert sorted(['NetworkObject', 'NetworkObjectWrapper']) == sorted(self.fdm_data['models'].keys())
assert expected_operations == self.fdm_data['operations']
assert {'NetworkObject': expected_operations} == self.fdm_data['model_operations']
def test_simple_object_with_documentation(self):
api_spec = copy.deepcopy(base)
docs = {
'definitions': {
'NetworkObject': {
'description': 'Description for Network Object',
'properties': {'name': 'Description for name field'}
}
},
'paths': {
'/object/networks': {
'get': {
'description': 'Description for getNetworkObjectList operation',
'parameters': [{'name': 'offset', 'description': 'Description for offset field'}]
},
'post': {'description': 'Description for addNetworkObject operation'}
}
}
}
self.fdm_data = FdmSwaggerParser().parse_spec(api_spec, docs)
assert 'Description for Network Object' == self.fdm_data['models']['NetworkObject']['description']
assert '' == self.fdm_data['models']['NetworkObjectWrapper']['description']
network_properties = self.fdm_data['models']['NetworkObject']['properties']
assert '' == network_properties['id']['description']
assert not network_properties['id']['required']
assert 'Description for name field' == network_properties['name']['description']
assert network_properties['name']['required']
ops = self.fdm_data['operations']
assert 'Description for getNetworkObjectList operation' == ops['getNetworkObjectList']['description']
assert 'Description for addNetworkObject operation' == ops['addNetworkObject']['description']
assert '' == ops['deleteNetworkObject']['description']
get_op_params = ops['getNetworkObjectList']['parameters']
assert 'Description for offset field' == get_op_params['query']['offset']['description']
assert '' == get_op_params['query']['limit']['description']
def test_model_operations_should_contain_all_operations(self):
data = {
'basePath': '/v2/',
'definitions': {
'Model1': {"type": "object"},
'Model2': {"type": "object"},
'Model3': {"type": "object"}
},
'paths': {
'path1': {
'get': {
'operationId': 'getSomeModelList',
"responses": {
"200": {"description": "",
"schema": {"type": "object",
"title": "NetworkObjectList",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/Model1"
}
}
}}
}
}
},
"post": {
"operationId": "addSomeModel",
"parameters": [{"in": "body",
"name": "body",
"schema": {"$ref": "#/definitions/Model2"}
}]}
},
'path2/{id}': {
"get": {"operationId": "getSomeModel",
"responses": {"200": {"description": "",
"schema": {"type": "object",
"$ref": "#/definitions/Model3"}},
}
},
"put": {"operationId": "editSomeModel",
"parameters": [{"in": "body",
"name": "body",
"schema": {"$ref": "#/definitions/Model1"}}
]},
"delete": {
"operationId": "deleteModel3",
}},
'path3': {
"delete": {
"operationId": "deleteNoneModel",
}
}
}
}
expected_operations = {
'getSomeModelList': {
'method': HTTPMethod.GET,
'url': '/v2/path1',
'modelName': 'Model1',
'returnMultipleItems': True,
'tags': []
},
'addSomeModel': {
'method': HTTPMethod.POST,
'url': '/v2/path1',
'modelName': 'Model2',
'parameters': {
'path': {},
'query': {}
},
'returnMultipleItems': False,
'tags': []
},
'getSomeModel': {
'method': HTTPMethod.GET,
'url': '/v2/path2/{id}',
'modelName': 'Model3',
'returnMultipleItems': False,
'tags': []
},
'editSomeModel': {
'method': HTTPMethod.PUT,
'url': '/v2/path2/{id}',
'modelName': 'Model1',
'parameters': {
'path': {},
'query': {}
},
'returnMultipleItems': False,
'tags': []
},
'deleteModel3': {
'method': HTTPMethod.DELETE,
'url': '/v2/path2/{id}',
'modelName': 'Model3',
'returnMultipleItems': False,
'tags': []
},
'deleteNoneModel': {
'method': HTTPMethod.DELETE,
'url': '/v2/path3',
'modelName': None,
'returnMultipleItems': False,
'tags': []
}
}
fdm_data = FdmSwaggerParser().parse_spec(data)
assert sorted(['Model1', 'Model2', 'Model3']) == sorted(fdm_data['models'].keys())
assert expected_operations == fdm_data['operations']
assert {
'Model1': {
'getSomeModelList': expected_operations['getSomeModelList'],
'editSomeModel': expected_operations['editSomeModel'],
},
'Model2': {
'addSomeModel': expected_operations['addSomeModel']
},
'Model3': {
'getSomeModel': expected_operations['getSomeModel'],
'deleteModel3': expected_operations['deleteModel3']
},
None: {
'deleteNoneModel': expected_operations['deleteNoneModel']
}
} == fdm_data['model_operations']
| gpl-3.0 |
ebukoz/thrive | erpnext/maintenance/doctype/maintenance_visit/maintenance_visit.py | 11 | 3120 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.utilities.transaction_base import TransactionBase
class MaintenanceVisit(TransactionBase):
def get_feed(self):
return _("To {0}").format(self.customer_name)
def validate_serial_no(self):
for d in self.get('purposes'):
if d.serial_no and not frappe.db.exists("Serial No", d.serial_no):
frappe.throw(_("Serial No {0} does not exist").format(d.serial_no))
def validate(self):
self.validate_serial_no()
def update_customer_issue(self, flag):
for d in self.get('purposes'):
if d.prevdoc_docname and d.prevdoc_doctype == 'Warranty Claim' :
if flag==1:
mntc_date = self.mntc_date
service_person = d.service_person
work_done = d.work_done
status = "Open"
if self.completion_status == 'Fully Completed':
status = 'Closed'
elif self.completion_status == 'Partially Completed':
status = 'Work In Progress'
else:
nm = frappe.db.sql("select t1.name, t1.mntc_date, t2.service_person, t2.work_done from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.completion_status = 'Partially Completed' and t2.prevdoc_docname = %s and t1.name!=%s and t1.docstatus = 1 order by t1.name desc limit 1", (d.prevdoc_docname, self.name))
if nm:
status = 'Work In Progress'
mntc_date = nm and nm[0][1] or ''
service_person = nm and nm[0][2] or ''
work_done = nm and nm[0][3] or ''
else:
status = 'Open'
mntc_date = None
service_person = None
work_done = None
wc_doc = frappe.get_doc('Warranty Claim', d.prevdoc_docname)
wc_doc.update({
'resolution_date': mntc_date,
'resolved_by': service_person,
'resolution_details': work_done,
'status': status
})
wc_doc.db_update()
def check_if_last_visit(self):
"""check if last maintenance visit against same sales order/ Warranty Claim"""
check_for_docname = None
for d in self.get('purposes'):
if d.prevdoc_docname:
check_for_docname = d.prevdoc_docname
#check_for_doctype = d.prevdoc_doctype
if check_for_docname:
check = frappe.db.sql("select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2 where t2.parent = t1.name and t1.name!=%s and t2.prevdoc_docname=%s and t1.docstatus = 1 and (t1.mntc_date > %s or (t1.mntc_date = %s and t1.mntc_time > %s))", (self.name, check_for_docname, self.mntc_date, self.mntc_date, self.mntc_time))
if check:
check_lst = [x[0] for x in check]
check_lst =','.join(check_lst)
frappe.throw(_("Cancel Material Visits {0} before cancelling this Maintenance Visit").format(check_lst))
raise Exception
else:
self.update_customer_issue(0)
def on_submit(self):
self.update_customer_issue(1)
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
self.check_if_last_visit()
frappe.db.set(self, 'status', 'Cancelled')
def on_update(self):
pass
| gpl-3.0 |
joker946/nova | nova/tests/functional/v3/test_multiple_create.py | 30 | 2240 | # Copyright 2012 Nebula, Inc.
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class MultipleCreateJsonTest(test_servers.ServersSampleBase):
extension_name = "os-multiple-create"
_api_version = 'v2'
def _get_flags(self):
f = super(MultipleCreateJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.multiple_create.'
'Multiple_create')
return f
def test_multiple_create(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'min_count': "2",
'max_count': "3"
}
response = self._do_post('servers', 'multiple-create-post-req', subs)
subs.update(self._get_regexes())
self._verify_response('multiple-create-post-resp', subs, response, 202)
def test_multiple_create_without_reservation_id(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'min_count': "2",
'max_count': "3"
}
response = self._do_post('servers', 'multiple-create-no-resv-post-req',
subs)
subs.update(self._get_regexes())
self._verify_response('multiple-create-no-resv-post-resp', subs,
response, 202)
| apache-2.0 |
owattenmaker/PythonFighter | fighterV02.py | 1 | 14644 | '''
Worst fighting game ever
By: Owen Wattenmaker, Max Lambek
'''
#TODO
############################################################################################
### -add in frames for attacking, looks too choppy, need more pictures ###
### -fix yellow hit marker ###
### -fix framerate issues when loading background ###
### -add knockback ###
### - ###
### -walking animation, possibly this: http://www.pygame.org/project-GIFImage-1039-.html ###
############################################################################################
import pygame, sys, time, random, os
from pygame.locals import *
#from GIFImage import GIFImage
#change this to false to disable the background and vastly improve performance
draw_background = True
def spriteMask(sprite, player, state):
sprite.mask = pygame.mask.from_surface(player[state])
return sprite
def dplayer1():
if os.name == 'nt':
#right stationary
StationaryRight = pygame.transform.scale(pygame.image.load('character\player1\player1_right_stationary.png'), (350, 350))
#left stationary
StationaryLeft = pygame.transform.scale(pygame.image.load('character\player1\player1_left_stationary.png'), (350, 350))
#right punch
PunchRight = pygame.transform.scale(pygame.image.load('character\player1\player1_right_punch.png'), (350, 350))
#left punch
PunchLeft = pygame.transform.scale(pygame.image.load('character\player1\player1_left_punch.png'), (350, 350))
#right kick
KickRight = pygame.transform.scale(pygame.image.load('character\player1\player1_right_kick.png'), (350, 350))
#left kick
KickLeft = pygame.transform.scale(pygame.image.load('character\player1\player1_left_kick.png'), (350, 350))
else:
#right stationary
StationaryRight = pygame.transform.scale(pygame.image.load('character/player1/player1_right_stationary.png'), (350, 350))
#left stationary
StationaryLeft = pygame.transform.scale(pygame.image.load('character/player1/player1_left_stationary.png'), (350, 350))
#right punch
PunchRight = pygame.transform.scale(pygame.image.load('character/player1/player1_right_punch.png'), (350, 350))
#left punch
PunchLeft = pygame.transform.scale(pygame.image.load('character/player1/player1_left_punch.png'), (350, 350))
#right kick
KickRight = pygame.transform.scale(pygame.image.load('character/player1/player1_right_kick.png'), (350, 350))
#left kick
KickLeft = pygame.transform.scale(pygame.image.load('character/player1/player1_left_kick.png'), (350, 350))
player1 = {'right_stationary':StationaryRight, 'left_stationary':StationaryLeft, 'right_punch':PunchRight, 'left_punch':PunchLeft, 'right_kick':KickRight, 'left_kick':KickLeft}
return player1
def dplayer2():
if os.name == 'nt':
#right stationary
StationaryRight = pygame.transform.scale(pygame.image.load('character\player2\player2_right_stationary.png'), (350, 350))
#left stationary
StationaryLeft = pygame.transform.scale(pygame.image.load('character\player2\player2_left_stationary.png'), (350, 350))
#right punch
PunchRight = pygame.transform.scale(pygame.image.load('character\player2\player2_right_punch.png'), (350, 350))
#left punch
PunchLeft = pygame.transform.scale(pygame.image.load('character\player2\player2_left_punch.png'), (350, 350))
#right kick
KickRight = pygame.transform.scale(pygame.image.load('character\player2\player2_right_kick.png'), (350, 350))
#left kick
KickLeft = pygame.transform.scale(pygame.image.load('character\player2\player2_left_kick.png'), (350, 350))
else:
StationaryRight = pygame.transform.scale(pygame.image.load('character/player2/player2_right_stationary.png'), (350, 350))
#left stationary
StationaryLeft = pygame.transform.scale(pygame.image.load('character/player2/player2_left_stationary.png'), (350, 350))
#right punch
PunchRight = pygame.transform.scale(pygame.image.load('character/player2/player2_right_punch.png'), (350, 350))
#left punch
PunchLeft = pygame.transform.scale(pygame.image.load('character/player2/player2_left_punch.png'), (350, 350))
#right kick
KickRight = pygame.transform.scale(pygame.image.load('character/player2/player2_right_kick.png'), (350, 350))
#left kick
KickLeft = pygame.transform.scale(pygame.image.load('character/player2/player2_left_kick.png'), (350, 350))
player2 = {'right_stationary':StationaryRight, 'left_stationary':StationaryLeft, 'right_punch':PunchRight, 'left_punch':PunchLeft, 'right_kick':KickRight, 'left_kick':KickLeft}
return player2
def collision(sprite1, sprite2):
a = pygame.sprite.collide_mask(sprite1, sprite2)
return a
def movement(moveLeft, moveRight, player_left, player_right, MOVESPEED, WINDOWWIDTH):
# move the player
if moveLeft and player_left > -100:
player_right -= MOVESPEED
if moveRight and player_right < WINDOWWIDTH - 300:
player_right += MOVESPEED
return player_left, player_right
def jumping1(player_top, airborn, verticalVelocity):
if airborn:
verticalVelocity += .7
player_top += verticalVelocity
if player_top >= 360:
airborn = False
return player_top, airborn, verticalVelocity
def jumping2(player_top, airborn, verticalVelocity):
if airborn:
verticalVelocity += .7
player_top += verticalVelocity
if player_top >= 360:
airborn = False
return player_top, airborn, verticalVelocity
def score(hpplayer1, hpplayer2, punch1, kick1, punch2, kick2, hit):
if punch1:
hpplayer2 -= random.randint(23, 33)
hit = True
if kick1:
hpplayer2 -= random.randint(38, 45)
hit = True
if punch2:
hpplayer1 -= random.randint(23, 33)
hit = True
if kick2:
hpplayer1 -= random.randint(38, 45)
hit = True
return hpplayer1, hpplayer2, hit
def main():
i = 0
# set up pygame
pygame.init()
font = pygame.font.SysFont("monospace", 72)
mainClock = pygame.time.Clock()
background = pygame.transform.scale(pygame.image.load('background.jpg'), (1300, 1300))
hit_background = pygame.transform.scale(pygame.image.load('flash_back.png'), (1300, 1300))
# set up the window
WINDOWWIDTH = 1280
WINDOWHEIGHT = 760
r=0
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)
pygame.display.set_caption('Terrible Fighting Game')
rectplayer1 = pygame.Rect(1, 360, 5, 5)
rectplayer2 = pygame.Rect(600, 360, 5, 5)
splayer1 = pygame.sprite.Sprite()
splayer1.image = pygame.transform.scale(pygame.image.load('character/player1/player1_right_stationary.png'), (350, 350))
splayer1.rect = splayer1.image.get_rect()
splayer1.rect.topleft = [0, 350]
splayer2 = pygame.sprite.Sprite()
splayer2.image = pygame.transform.scale(pygame.image.load('character/player1/player1_right_stationary.png'), (350, 350))
splayer2.rect = splayer2.image.get_rect()
splayer2.rect.topleft = [450, 350]
#hit_effect = pygame.transform.scale(pygame.image.load('hit_effect.png'), (100, 100))
hit = False
collide = False
airborn1 = False
airborn2 = False
moveLeft1 = False
moveLeft2 = False
moveRight1 = False
moveRight2 = False
MOVESPEED = 6
orientation1 = 'right'
orientation2 = 'left'
state1 = 'right_stationary'
state2 = 'left_stationary'
verticalVelocity1 = 0
verticalVelocity2 = 0
hpplayer1 = 500
hpplayer2 = 500
player1 = dplayer1()
player2 = dplayer2()
gameStart = False
while not gameStart:
windowSurface.blit(background,(0,-450))
windowSurface.blit(hit_background, [0,0])
pressenter = font.render('<Press enter to start>', 1, (255, 255, 0))
windowSurface.blit(pressenter, [150, 400])
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_KP_ENTER] or pressed_keys[K_RETURN]:
gameStart = True
pygame.display.update()
while True:
while hpplayer1 > 0 and hpplayer2 > 0:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pressed_keys = pygame.key.get_pressed()
#player1
if pressed_keys[K_a]:
moveLeft1 = True
moveRight1 = False
orientation1 = 'left'
state1 = 'left_stationary'
if pressed_keys[K_d]:
moveLeft1 = False
moveRight1 = True
orientation1 = 'right'
state1 = 'right_stationary'
if pressed_keys[K_w]:
if not airborn1:
airborn1 = True
verticalVelocity1 = -20
#player2
if pressed_keys[K_LEFT]:
moveLeft2 = True
moveRight2 = False
orientation2 = 'left'
state2 = 'left_stationary'
if pressed_keys[K_RIGHT]:
moveLeft2 = False
moveRight2 = True
orientation2 = 'right'
state2 = 'right_stationary'
if pressed_keys[K_UP]:
if not airborn2:
airborn2 = True
verticalVelocity2 = -20
#player1
if not pressed_keys[K_a]:
moveLeft1 = False
if not pressed_keys[K_d]:
moveRight1 = False
#player2
if not pressed_keys[K_LEFT]:
moveLeft2 = False
if not pressed_keys[K_RIGHT]:
moveRight2 = False
if event.type == KEYDOWN:
# change the keyboard variables
#player1
if event.key == ord('t'):
kick1 = True
if not airborn1:
moveLeft1 = False
moveRight1 = False
if orientation1 == 'right':
state1 = 'right_kick'
if orientation1 == 'left':
state1 = 'left_kick'
if airborn1:
if orientation1 == 'right':
state1 = 'right_kick'
if orientation1 == 'left':
state1 = 'left_kick'
if event.key == ord('y'):
punch1 = True
if not airborn1:
moveLeft = False
moveRight = False
if orientation1 == 'right':
state1 = 'right_punch'
if orientation1 == 'left':
state1 = 'left_punch'
if airborn1:
if orientation1 == 'right':
state1 = 'right_punch'
if orientation1 == 'left':
state1 = 'left_punch'
#player2
if event.key == ord('.'):
kick2 = True
if not airborn2:
moveLeft2 = False
moveRight2 = False
if orientation2 == 'right':
state2 = 'right_kick'
if orientation2 == 'left':
state2 = 'left_kick'
if airborn2:
if orientation2 == 'right':
state2 = 'right_kick'
if orientation2 == 'left':
state2 = 'left_kick'
if event.key == ord('/'):
punch2 = True
if not airborn2:
moveLeft2 = False
moveRight2 = False
if orientation2 == 'right':
state2 = 'right_punch'
if orientation2 == 'left':
state2 = 'left_punch'
if airborn2:
if orientation2 == 'right':
state2 = 'right_punch'
if orientation2 == 'left':
state2 = 'left_punch'
if event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
#player1
if event.key == ord('t') and orientation1 == 'right':
state1 = 'right_stationary'
if event.key == ord('t') and orientation1 == 'left':
state1 = 'left_stationary'
if event.key == ord('y') and orientation1 == 'right':
state1 = 'right_stationary'
if event.key == ord('y') and orientation1 == 'left':
state1 = 'left_stationary'
#player2
if event.key == ord('.') and orientation2 == 'right':
state2 = 'right_stationary'
if event.key == ord('.') and orientation2 == 'left':
state2 = 'left_stationary'
if event.key == ord('/') and orientation2 == 'right':
state2 = 'right_stationary'
if event.key == ord('/') and orientation2 == 'left':
state2 = 'left_stationary'
#sprite.mask = pygame.mask.from_surface(sprite.image)
#moveplayer
rectplayer1.left, rectplayer1.right = movement(moveLeft1, moveRight1, rectplayer1.left, rectplayer1.right, MOVESPEED, WINDOWWIDTH)
rectplayer2.left, rectplayer2.right = movement(moveLeft2, moveRight2, rectplayer2.left, rectplayer2.right, MOVESPEED, WINDOWWIDTH)
#jump player
rectplayer1.top, airborn1, verticalVelocity1 = jumping1(rectplayer1.top, airborn1, verticalVelocity1)
rectplayer2.top, airborn2, verticalVelocity2 = jumping2(rectplayer2.top, airborn2, verticalVelocity2)
if draw_background:
windowSurface.blit(background,(0,-450))
else:
windowSurface.fill((50,50,50))
#assign the image state to the sprite
splayer1.image = player1[state1]
splayer2.image = player2[state2]
#do the mask, do the monster mask, it was a 2 player smash
splayer1.mask = pygame.mask.from_surface(splayer1.image)
splayer2.mask = pygame.mask.from_surface(splayer2.image)
#assign the player rectangle to the sprite
splayer1.rect.topleft = [rectplayer1.left, rectplayer1.top]
splayer2.rect.topleft = [rectplayer2.left, rectplayer2.top]
hitcoordinates = collision(splayer1, splayer2)
#hitcoordinates = pygame.sprite.collide_mask(splayer1, splayer2)
if hitcoordinates != None:
hpplayer1, hpplayer2, hit = score(hpplayer1, hpplayer2, punch1, kick1, punch2, kick2, hit)
if hit:
windowSurface.blit(hit_background, [0,0])
pygame.draw.rect(windowSurface, (216,0,0), (620,30,-500,30), 0)
pygame.draw.rect(windowSurface, (216,0,0), (660,30, 500,30), 0)
if hpplayer1 > 0:
pygame.draw.rect(windowSurface, (19,193,0), (620,30,-hpplayer1,30), 0)
if hpplayer2 > 0:
pygame.draw.rect(windowSurface, (19,193,0), (660,30, hpplayer2,30), 0)
#draw players
windowSurface.blit(splayer1.image, splayer1.rect)
windowSurface.blit(splayer2.image, splayer2.rect)
#if hit:
# windowSurface.blit(hit_effect, [hitcoordinates[0] - 40 , hitcoordinates[1] + 324])
#draw the window onto the screen
pygame.display.update()
#pause for dramatic effect
if hit:
pygame.time.delay(350)
hit = False
mainClock.tick(60)
punch1 = False
punch2 = False
kick1 = False
kick2 = False
if hpplayer1 > 0:
print 'Player 1 wins!'
return 0
if hpplayer2 > 0:
print 'Player 2 wins!'
return 0
else:
print "rip both players"
return 0
main()
| mit |
FluidityProject/fluidity | python/elementtree/SimpleXMLWriter.py | 103 | 8616 | #
# SimpleXMLWriter
# $Id: SimpleXMLWriter.py 2312 2005-03-02 18:13:39Z fredrik $
#
# a simple XML writer
#
# history:
# 2001-12-28 fl created
# 2002-11-25 fl fixed attribute encoding
# 2002-12-02 fl minor fixes for 1.5.2
# 2004-06-17 fl added pythondoc markup
# 2004-07-23 fl added flush method (from Jay Graves)
# 2004-10-03 fl added declaration method
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The SimpleXMLWriter module is
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Tools to write XML files, without having to deal with encoding
# issues, well-formedness, etc.
# <p>
# The current version does not provide built-in support for
# namespaces. To create files using namespaces, you have to provide
# "xmlns" attributes and explicitly add prefixes to tags and
# attributes.
#
# <h3>Patterns</h3>
#
# The following example generates a small XHTML document.
# <pre>
#
# from elementtree.SimpleXMLWriter import XMLWriter
# import sys
#
# w = XMLWriter(sys.stdout)
#
# html = w.start("html")
#
# w.start("head")
# w.element("title", "my document")
# w.element("meta", name="generator", value="my application 1.0")
# w.end()
#
# w.start("body")
# w.element("h1", "this is a heading")
# w.element("p", "this is a paragraph")
#
# w.start("p")
# w.data("this is ")
# w.element("b", "bold")
# w.data(" and ")
# w.element("i", "italic")
# w.data(".")
# w.end("p")
#
# w.close(html)
# </pre>
##
import re, sys, string
try:
unicode("")
except NameError:
def encode(s, encoding):
# 1.5.2: application must use the right encoding
return s
_escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
else:
def encode(s, encoding):
return s.encode(encoding)
_escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
def encode_entity(text, pattern=_escape):
# map reserved and non-ascii characters to numerical entities
def escape_entities(m):
out = []
for char in m.group():
out.append("&#%d;" % ord(char))
return string.join(out, "")
return encode(pattern.sub(escape_entities, text), "ascii")
del _escape
#
# the following functions assume an ascii-compatible encoding
# (or "utf-16")
def escape_cdata(s, encoding=None, replace=string.replace):
s = replace(s, "&", "&")
s = replace(s, "<", "<")
s = replace(s, ">", ">")
if encoding:
try:
return encode(s, encoding)
except UnicodeError:
return encode_entity(s)
return s
def escape_attrib(s, encoding=None, replace=string.replace):
s = replace(s, "&", "&")
s = replace(s, "'", "'")
s = replace(s, "\"", """)
s = replace(s, "<", "<")
s = replace(s, ">", ">")
if encoding:
try:
return encode(s, encoding)
except UnicodeError:
return encode_entity(s)
return s
##
# XML writer class.
#
# @param file A file or file-like object. This object must implement
# a <b>write</b> method that takes an 8-bit string.
# @param encoding Optional encoding.
class XMLWriter:
def __init__(self, file, encoding="us-ascii"):
if not hasattr(file, "write"):
file = open(file, "w")
self.__write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self.__open = 0 # true if start tag is open
self.__tags = []
self.__data = []
self.__encoding = encoding
def __flush(self):
# flush internal buffers
if self.__open:
self.__write(">")
self.__open = 0
if self.__data:
data = string.join(self.__data, "")
self.__write(escape_cdata(data, self.__encoding))
self.__data = []
##
# Writes an XML declaration.
def declaration(self):
encoding = self.__encoding
if encoding == "us-ascii" or encoding == "utf-8":
self.__write("<?xml version='1.0'?>\n")
else:
self.__write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
##
# Opens a new element. Attributes can be given as keyword
# arguments, or as a string/string dictionary. You can pass in
# 8-bit strings or Unicode strings; the former are assumed to use
# the encoding passed to the constructor. The method returns an
# opaque identifier that can be passed to the <b>close</b> method,
# to close all open elements up to and including this one.
#
# @param tag Element tag.
# @param attrib Attribute dictionary. Alternatively, attributes
# can be given as keyword arguments.
# @return An element identifier.
def start(self, tag, attrib={}, **extra):
self.__flush()
tag = escape_cdata(tag, self.__encoding)
self.__data = []
self.__tags.append(tag)
self.__write("<%s" % tag)
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = attrib.items()
attrib.sort()
for k, v in attrib:
k = escape_cdata(k, self.__encoding)
v = escape_attrib(v, self.__encoding)
self.__write(" %s=\"%s\"" % (k, v))
self.__open = 1
return len(self.__tags)-1
##
# Adds a comment to the output stream.
#
# @param comment Comment text, as an 8-bit string or Unicode string.
def comment(self, comment):
self.__flush()
self.__write("<!-- %s -->\n" % escape_cdata(comment, self.__encoding))
##
# Adds character data to the output stream.
#
# @param text Character data, as an 8-bit string or Unicode string.
def data(self, text):
self.__data.append(text)
##
# Closes the current element (opened by the most recent call to
# <b>start</b>).
#
# @param tag Element tag. If given, the tag must match the start
# tag. If omitted, the current element is closed.
def end(self, tag=None):
if tag:
assert self.__tags, "unbalanced end(%s)" % tag
assert escape_cdata(tag, self.__encoding) == self.__tags[-1],\
"expected end(%s), got %s" % (self.__tags[-1], tag)
else:
assert self.__tags, "unbalanced end()"
tag = self.__tags.pop()
if self.__data:
self.__flush()
elif self.__open:
self.__open = 0
self.__write(" />")
return
self.__write("</%s>" % tag)
##
# Closes open elements, up to (and including) the element identified
# by the given identifier.
#
# @param id Element identifier, as returned by the <b>start</b> method.
def close(self, id):
while len(self.__tags) > id:
self.end()
##
# Adds an entire element. This is the same as calling <b>start</b>,
# <b>data</b>, and <b>end</b> in sequence. The <b>text</b> argument
# can be omitted.
def element(self, tag, text=None, attrib={}, **extra):
apply(self.start, (tag, attrib), extra)
if text:
self.data(text)
self.end()
##
# Flushes the output stream.
def flush(self):
pass # replaced by the constructor
| lgpl-2.1 |
alivecor/tensorflow | tensorflow/contrib/learn/python/learn/datasets/base_test.py | 136 | 3072 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.platform import test
mock = test.mock
_TIMEOUT = IOError(110, "timeout")
class BaseTest(test.TestCase):
"""Test load csv functions."""
def testUrlretrieveRetriesOnIOError(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
_TIMEOUT, _TIMEOUT, _TIMEOUT, _TIMEOUT, _TIMEOUT, None
]
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert full backoff was tried
actual_list = [arg[0][0] for arg in mock_time.sleep.call_args_list]
expected_list = [1, 2, 4, 8, 16]
for actual, expected in zip(actual_list, expected_list):
self.assertLessEqual(abs(actual - expected), 0.25 * expected)
self.assertEquals(len(actual_list), len(expected_list))
def testUrlretrieveRaisesAfterRetriesAreExhausted(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
]
with self.assertRaises(IOError):
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert full backoff was tried
actual_list = [arg[0][0] for arg in mock_time.sleep.call_args_list]
expected_list = [1, 2, 4, 8, 16]
for actual, expected in zip(actual_list, expected_list):
self.assertLessEqual(abs(actual - expected), 0.25 * expected)
self.assertEquals(len(actual_list), len(expected_list))
def testUrlretrieveRaisesOnNonRetriableErrorWithoutRetry(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
IOError(2, "No such file or directory"),
]
with self.assertRaises(IOError):
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert no retries
self.assertFalse(mock_time.called)
if __name__ == "__main__":
test.main()
| apache-2.0 |
greguu/linux-4.2.3-c3x00 | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
MER-GROUP/intellij-community | python/helpers/py3only/docutils/languages/de.py | 200 | 1722 | # $Id: de.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Gunnar Schwant <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
German language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': 'Autor',
'authors': 'Autoren',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Kontakt',
'version': 'Version',
'revision': 'Revision',
'status': 'Status',
'date': 'Datum',
'dedication': 'Widmung',
'copyright': 'Copyright',
'abstract': 'Zusammenfassung',
'attention': 'Achtung!',
'caution': 'Vorsicht!',
'danger': '!GEFAHR!',
'error': 'Fehler',
'hint': 'Hinweis',
'important': 'Wichtig',
'note': 'Bemerkung',
'tip': 'Tipp',
'warning': 'Warnung',
'contents': 'Inhalt'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
'autor': 'author',
'autoren': 'authors',
'organisation': 'organization',
'adresse': 'address',
'kontakt': 'contact',
'version': 'version',
'revision': 'revision',
'status': 'status',
'datum': 'date',
'copyright': 'copyright',
'widmung': 'dedication',
'zusammenfassung': 'abstract'}
"""German (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| apache-2.0 |
caphrim007/ansible | lib/ansible/module_utils/facts/system/apparmor.py | 232 | 1311 | # Collect facts related to apparmor
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.facts.collector import BaseFactCollector
class ApparmorFactCollector(BaseFactCollector):
name = 'apparmor'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
facts_dict = {}
apparmor_facts = {}
if os.path.exists('/sys/kernel/security/apparmor'):
apparmor_facts['status'] = 'enabled'
else:
apparmor_facts['status'] = 'disabled'
facts_dict['apparmor'] = apparmor_facts
return facts_dict
| gpl-3.0 |
KanoComputing/kano-toolset | tests/fixtures/keyboard.py | 1 | 1887 | #
# keyboard.py
#
# Copyright (C) 2018 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU GPL v2
#
# Fixtures for fake keyboards
#
import imp
import os
import pytest
KEYBOARD_LSUSB_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'keyboard'
)
KEYBOARD_LSUSB_OUTPUTS = [
('no_keyboard', None),
('other_keyboard', None),
('en_keyboard', 'en'),
('es_keyboard', 'es'),
]
@pytest.fixture(scope='function', params=KEYBOARD_LSUSB_OUTPUTS)
def keyboard(request, fs, monkeypatch):
'''
Simulates different keyboards, mainly by their outputs from terminal
commands.
Note: This fixture auto-reimports the `kano.utils.hardware` module, which we
expect to be the one which requires the patch, however depending on
the module, it may be required to re-import the module being tested
as this fixture patches `kano.utils.run_cmd` and if the tested module
depends on this directly and has already been loaded then the updated
version will not propagate. To re-import use:
import imp
import module.to.be.tested
imp.reload(module.to.be.tested)
'''
kb_file, version = request.param
lsusb_output_path = os.path.join(
KEYBOARD_LSUSB_DIR,
'{}.dump'.format(kb_file)
)
fs.add_real_file(lsusb_output_path)
with open(lsusb_output_path, 'r') as lsusb_output_f:
lsusb_output = lsusb_output_f.read()
def fake_lsusb_out(cmd):
if cmd.startswith('lsusb'):
return lsusb_output, None, None
else:
raise NotImplementedError(
'Command run is not lsusb: {}'.format(cmd)
)
import kano.utils.shell
monkeypatch.setattr(kano.utils.shell, 'run_cmd', fake_lsusb_out)
imp.reload(kano.utils.hardware)
return version
| gpl-2.0 |
SaintEmbers/muse-hack | node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
samthor/intellij-community | python/lib/Lib/site-packages/django/contrib/auth/management/__init__.py | 126 | 2854 | """
Creates permissions for all installed apps that need permissions.
"""
from django.contrib.auth import models as auth_app
from django.db.models import get_models, signals
def _get_permission_codename(action, opts):
return u'%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts):
"Returns (codename, name) for all permissions in the given opts."
perms = []
for action in ('add', 'change', 'delete'):
perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw)))
return perms + list(opts.permissions)
def create_permissions(app, created_models, verbosity, **kwargs):
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
ctype = ContentType.objects.get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a context_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(auth_app.Permission.objects.filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
for ctype, (codename, name) in searched_perms:
# If the permissions exists, move on.
if (ctype.pk, codename) in all_perms:
continue
p = auth_app.Permission.objects.create(
codename=codename,
name=name,
content_type=ctype
)
if verbosity >= 2:
print "Adding permission '%s'" % p
def create_superuser(app, created_models, verbosity, **kwargs):
from django.core.management import call_command
if auth_app.User in created_models and kwargs.get('interactive', True):
msg = ("\nYou just installed Django's auth system, which means you "
"don't have any superusers defined.\nWould you like to create one "
"now? (yes/no): ")
confirm = raw_input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = raw_input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True)
break
signals.post_syncdb.connect(create_permissions,
dispatch_uid = "django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
| apache-2.0 |
sudheesh001/oh-mainline | vendor/packages/PyYaml/lib/yaml/composer.py | 534 | 4921 |
__all__ = ['Composer', 'ComposerError']
from error import MarkedYAMLError
from events import *
from nodes import *
class ComposerError(MarkedYAMLError):
pass
class Composer(object):
def __init__(self):
self.anchors = {}
def check_node(self):
# Drop the STREAM-START event.
if self.check_event(StreamStartEvent):
self.get_event()
# If there are more documents available?
return not self.check_event(StreamEndEvent)
def get_node(self):
# Get the root node of the next document.
if not self.check_event(StreamEndEvent):
return self.compose_document()
def get_single_node(self):
# Drop the STREAM-START event.
self.get_event()
# Compose a document if the stream is not empty.
document = None
if not self.check_event(StreamEndEvent):
document = self.compose_document()
# Ensure that the stream contains no more documents.
if not self.check_event(StreamEndEvent):
event = self.get_event()
raise ComposerError("expected a single document in the stream",
document.start_mark, "but found another document",
event.start_mark)
# Drop the STREAM-END event.
self.get_event()
return document
def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
# Compose the root node.
node = self.compose_node(None, None)
# Drop the DOCUMENT-END event.
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if anchor not in self.anchors:
raise ComposerError(None, None, "found undefined alias %r"
% anchor.encode('utf-8'), event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if anchor is not None:
if anchor in self.anchors:
raise ComposerError("found duplicate anchor %r; first occurence"
% anchor.encode('utf-8'), self.anchors[anchor].start_mark,
"second occurence", event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value,
event.start_mark, event.end_mark, style=event.style)
if anchor is not None:
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
index = 0
while not self.check_event(SequenceEndEvent):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
#key_event = self.peek_event()
item_key = self.compose_node(node, None)
#if item_key in node.value:
# raise ComposerError("while composing a mapping", start_event.start_mark,
# "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
#node.value[item_key] = item_value
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
| agpl-3.0 |
NeCTAR-RC/horizon | openstack_dashboard/templatetags/context_selection.py | 1 | 4352 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.conf import settings
from django import template
from openstack_dashboard.api import keystone
register = template.Library()
def is_multi_region_configured(request):
return False
def is_multidomain_supported():
return (keystone.VERSIONS.active >= 3 and
getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False))
@register.simple_tag(takes_context=True)
def is_multi_region(context):
if 'request' not in context:
return False
return is_multi_region_configured(context['request'])
@register.simple_tag
def is_multidomain():
return is_multidomain_supported()
@register.inclusion_tag('context_selection/_overview.html',
takes_context=True)
def show_overview(context):
if 'request' not in context:
return {}
request = context['request']
project_name = get_project_name(request.user.project_id,
context['authorized_tenants'])
context = {'domain_supported': is_multidomain_supported(),
'domain_name': request.user.user_domain_name,
'project_name': project_name or request.user.project_name,
'multi_region': is_multi_region_configured(request),
'region_name': request.user.services_region,
'request': request}
return context
@register.inclusion_tag('context_selection/_domain_list.html',
takes_context=True)
def show_domain_list(context):
# TODO(Thai): once domain switching is support, need to revisit
if 'request' not in context:
return {}
request = context['request']
context = {'domain_name': request.user.user_domain_name,
'request': request}
return context
@register.inclusion_tag('context_selection/_project_list.html',
takes_context=True)
def show_project_list(context):
max_proj = getattr(settings, 'DROPDOWN_MAX_ITEMS', 30)
if 'request' not in context:
return {}
request = context['request']
projects = sorted(context['authorized_tenants'],
key=lambda project: project.name.lower())
panel = request.horizon.get('panel')
context = {'projects': projects[:max_proj],
'project_id': request.user.project_id,
'page_url': panel.get_absolute_url() if panel else None}
return context
@register.inclusion_tag('context_selection/_region_list.html',
takes_context=True)
def show_region_list(context):
if 'request' not in context:
return {}
request = context['request']
panel = request.horizon.get('panel')
context = {'region_name': request.user.services_region,
'regions': sorted(request.user.available_services_regions,
key=lambda x: (x or '').lower()),
'page_url': panel.get_absolute_url() if panel else None}
return context
@register.inclusion_tag('context_selection/_anti_clickjack.html',
takes_context=True)
def iframe_embed_settings(context):
disallow_iframe_embed = getattr(settings,
'DISALLOW_IFRAME_EMBED',
True)
context = {'disallow_iframe_embed': disallow_iframe_embed}
return context
def get_project_name(project_id, projects):
"""Retrieves project name for given project id
Args:
projects: List of projects
project_id: project id
Returns: Project name or None if there is no match
"""
for project in projects:
if project_id == project.id:
return project.name
| apache-2.0 |
bj7/pwndbg | ida_script.py | 2 | 1687 | import idaapi
import idautils
import idc
import functools
import datetime
import threading
import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
import idaapi
import idautils
import idc
# Save the database so nothing gets lost.
idc.SaveBase(idc.GetIdbPath() + '.' + datetime.datetime.now().isoformat())
xmlrpclib.Marshaller.dispatch[type(0L)] = lambda _, v, w: w("<value><i8>%d</i8></value>" % v)
xmlrpclib.Marshaller.dispatch[type(0)] = lambda _, v, w: w("<value><i8>%d</i8></value>" % v)
port = 8888
orig_LineA = idc.LineA
def LineA(*a,**kw):
v = orig_LineA(*a,**kw)
if v and v.startswith('\x01\x04; '):
v = v[4:]
return v
idc.LineA = LineA
mutex = threading.Condition()
def wrap(f):
def wrapper(*a, **kw):
try:
rv = []
def work(): rv.append(f(*a,**kw))
with mutex:
flags = idaapi.MFF_WRITE
if f == idc.SetColor:
flags |= idaapi.MFF_NOWAIT
rv.append(None)
idaapi.execute_sync(work, flags)
return rv[0]
except:
import traceback
traceback.print_exc()
raise
return wrapper
def register_module(module):
for name, function in module.__dict__.items():
if hasattr(function, '__call__'):
server.register_function(wrap(function), name)
server = SimpleXMLRPCServer(('127.0.0.1', port), logRequests=True, allow_none=True)
register_module(idc)
register_module(idautils)
register_module(idaapi)
server.register_introspection_functions()
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
| mit |
7digital/troposphere | examples/ApiGateway.py | 4 | 4609 | from troposphere import Ref, Template, Output
from troposphere.apigateway import RestApi, Method
from troposphere.apigateway import Resource, MethodResponse
from troposphere.apigateway import Integration, IntegrationResponse
from troposphere.apigateway import Deployment, Stage, ApiStage
from troposphere.apigateway import UsagePlan, QuotaSettings, ThrottleSettings
from troposphere.apigateway import ApiKey, StageKey, UsagePlanKey
from troposphere.iam import Role, Policy
from troposphere.awslambda import Function, Code
from troposphere import GetAtt, Join
t = Template()
# Create the Api Gateway
rest_api = t.add_resource(RestApi(
"ExampleApi",
Name="ExampleApi"
))
# Create a Lambda function that will be mapped
code = [
"var response = require('cfn-response');",
"exports.handler = function(event, context) {",
" context.succeed('foobar!');",
" return 'foobar!';",
"};",
]
# Create a role for the lambda function
t.add_resource(Role(
"LambdaExecutionRole",
Path="/",
Policies=[Policy(
PolicyName="root",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [{
"Action": ["logs:*"],
"Resource": "arn:aws:logs:*:*:*",
"Effect": "Allow"
}, {
"Action": ["lambda:*"],
"Resource": "*",
"Effect": "Allow"
}]
})],
AssumeRolePolicyDocument={"Version": "2012-10-17", "Statement": [
{
"Action": ["sts:AssumeRole"],
"Effect": "Allow",
"Principal": {
"Service": [
"lambda.amazonaws.com",
"apigateway.amazonaws.com"
]
}
}
]},
))
# Create the Lambda function
foobar_function = t.add_resource(Function(
"FoobarFunction",
Code=Code(
ZipFile=Join("", code)
),
Handler="index.handler",
Role=GetAtt("LambdaExecutionRole", "Arn"),
Runtime="nodejs4.3",
))
# Create a resource to map the lambda function to
resource = t.add_resource(Resource(
"FoobarResource",
RestApiId=Ref(rest_api),
PathPart="foobar",
ParentId=GetAtt("ExampleApi", "RootResourceId"),
))
# Create a Lambda API method for the Lambda resource
method = t.add_resource(Method(
"LambdaMethod",
DependsOn='FoobarFunction',
RestApiId=Ref(rest_api),
AuthorizationType="NONE",
ResourceId=Ref(resource),
HttpMethod="GET",
Integration=Integration(
Credentials=GetAtt("LambdaExecutionRole", "Arn"),
Type="AWS",
IntegrationHttpMethod='POST',
IntegrationResponses=[
IntegrationResponse(
StatusCode='200'
)
],
Uri=Join("", [
"arn:aws:apigateway:eu-west-1:lambda:path/2015-03-31/functions/",
GetAtt("FoobarFunction", "Arn"),
"/invocations"
])
),
MethodResponses=[
MethodResponse(
"CatResponse",
StatusCode='200'
)
]
))
# Create a deployment
stage_name = 'v1'
deployment = t.add_resource(Deployment(
"%sDeployment" % stage_name,
DependsOn="LambdaMethod",
RestApiId=Ref(rest_api),
))
stage = t.add_resource(Stage(
'%sStage' % stage_name,
StageName=stage_name,
RestApiId=Ref(rest_api),
DeploymentId=Ref(deployment)
))
key = t.add_resource(ApiKey(
"ApiKey",
StageKeys=[StageKey(
RestApiId=Ref(rest_api),
StageName=Ref(stage)
)]
))
# Create an API usage plan
usagePlan = t.add_resource(UsagePlan(
"ExampleUsagePlan",
UsagePlanName="ExampleUsagePlan",
Description="Example usage plan",
Quota=QuotaSettings(
Limit=50000,
Period="MONTH"
),
Throttle=ThrottleSettings(
BurstLimit=500,
RateLimit=5000
),
ApiStages=[
ApiStage(
ApiId=Ref(rest_api),
Stage=Ref(stage)
)]
))
# tie the usage plan and key together
usagePlanKey = t.add_resource(UsagePlanKey(
"ExampleUsagePlanKey",
KeyId=Ref(key),
KeyType="API_KEY",
UsagePlanId=Ref(usagePlan)
))
# Add the deployment endpoint as an output
t.add_output([
Output(
"ApiEndpoint",
Value=Join("", [
"https://",
Ref(rest_api),
".execute-api.eu-west-1.amazonaws.com/",
stage_name
]),
Description="Endpoint for this stage of the api"
),
Output(
"ApiKey",
Value=Ref(key),
Description="API key"
),
])
print(t.to_json())
| bsd-2-clause |
andreparrish/python-for-android | python-build/python-libs/gdata/build/lib/gdata/blogger/__init__.py | 140 | 6426 | #!/usr/bin/python
#
# Copyright (C) 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Blogger."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import atom
import gdata
import re
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
THR_NAMESPACE = 'http://purl.org/syndication/thread/1.0'
class BloggerEntry(gdata.GDataEntry):
"""Adds convenience methods inherited by all Blogger entries."""
blog_name_pattern = re.compile('(http://)(\w*)')
blog_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
blog_id2_pattern = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)')
def GetBlogId(self):
"""Extracts the Blogger id of this blog.
This method is useful when contructing URLs by hand. The blog id is
often used in blogger operation URLs. This should not be confused with
the id member of a BloggerBlog. The id element is the Atom id XML element.
The blog id which this method returns is a part of the Atom id.
Returns:
The blog's unique id as a string.
"""
if self.id.text:
match = self.blog_id_pattern.match(self.id.text)
if match:
return match.group(2)
else:
return self.blog_id2_pattern.match(self.id.text).group(2)
return None
def GetBlogName(self):
"""Finds the name of this blog as used in the 'alternate' URL.
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
entry representing the above example, this method would return 'blogName'.
Returns:
The blog's URL name component as a string.
"""
for link in self.link:
if link.rel == 'alternate':
return self.blog_name_pattern.match(link.href).group(2)
return None
class BlogEntry(BloggerEntry):
"""Describes a blog entry in the feed listing a user's blogs."""
def BlogEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BlogEntry, xml_string)
class BlogFeed(gdata.GDataFeed):
"""Describes a feed of a user's blogs."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogEntry])
def BlogFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BlogFeed, xml_string)
class BlogPostEntry(BloggerEntry):
"""Describes a blog post entry in the feed of a blog's posts."""
post_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
def AddLabel(self, label):
"""Adds a label to the blog post.
The label is represented by an Atom category element, so this method
is shorthand for appending a new atom.Category object.
Args:
label: str
"""
self.category.append(atom.Category(scheme=LABEL_SCHEME, term=label))
def GetPostId(self):
"""Extracts the postID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return self.post_id_pattern.match(self.id.text).group(4)
return None
def BlogPostEntryFromString(xml_string):
return atom.CreateClassFromXMLString(BlogPostEntry, xml_string)
class BlogPostFeed(gdata.GDataFeed):
"""Describes a feed of a blog's posts."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogPostEntry])
def BlogPostFeedFromString(xml_string):
return atom.CreateClassFromXMLString(BlogPostFeed, xml_string)
class InReplyTo(atom.AtomBase):
_tag = 'in-reply-to'
_namespace = THR_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['href'] = 'href'
_attributes['ref'] = 'ref'
_attributes['source'] = 'source'
_attributes['type'] = 'type'
def __init__(self, href=None, ref=None, source=None, type=None,
extension_elements=None, extension_attributes=None, text=None):
self.href = href
self.ref = ref
self.source = source
self.type = type
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
self.text = text
def InReplyToFromString(xml_string):
return atom.CreateClassFromXMLString(InReplyTo, xml_string)
class CommentEntry(BloggerEntry):
"""Describes a blog post comment entry in the feed of a blog post's
comments."""
_children = BloggerEntry._children.copy()
_children['{%s}in-reply-to' % THR_NAMESPACE] = ('in_reply_to', InReplyTo)
comment_id_pattern = re.compile('.*-(\w*)$')
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, control=None, title=None, updated=None,
in_reply_to=None, extension_elements=None, extension_attributes=None,
text=None):
BloggerEntry.__init__(self, author=author, category=category,
content=content, contributor=contributor, atom_id=atom_id, link=link,
published=published, rights=rights, source=source, summary=summary,
control=control, title=title, updated=updated,
extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
self.in_reply_to = in_reply_to
def GetCommentId(self):
"""Extracts the commentID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return self.comment_id_pattern.match(self.id.text).group(1)
return None
def CommentEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CommentEntry, xml_string)
class CommentFeed(gdata.GDataFeed):
"""Describes a feed of a blog post's comments."""
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CommentEntry])
def CommentFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CommentFeed, xml_string)
| apache-2.0 |
kobotoolbox/kobocat | onadata/libs/constants.py | 1 | 1349 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
# Userprofile Permissions
CAN_ADD_USERPROFILE = 'add_userprofile'
CAN_CHANGE_USERPROFILE = 'change_userprofile'
CAN_DELETE_USERPROFILE = 'delete_userprofile'
CAN_ADD_XFORM_TO_PROFILE = 'can_add_xform'
CAN_VIEW_PROFILE = 'view_profile'
# Organization Permissions
CAN_VIEW_ORGANIZATION_PROFILE = 'view_organizationprofile'
CAN_ADD_ORGANIZATION_PROFILE = 'add_organizationprofile'
CAN_ADD_ORGANIZATION_XFORM = 'can_add_xform'
CAN_CHANGE_ORGANIZATION_PROFILE = 'change_organizationprofile'
CAN_DELETE_ORGANIZATION_PROFILE = 'delete_organizationprofile'
IS_ORGANIZATION_OWNER = 'is_org_owner'
# Xform Permissions
CAN_CHANGE_XFORM = 'change_xform'
CAN_VALIDATE_XFORM = 'validate_xform'
CAN_DELETE_DATA_XFORM = 'delete_data_xform'
CAN_ADD_XFORM = 'add_xform'
CAN_DELETE_XFORM = 'delete_xform'
CAN_VIEW_XFORM = 'view_xform'
CAN_ADD_SUBMISSIONS = 'report_xform'
CAN_TRANSFER_OWNERSHIP = 'transfer_xform'
CAN_MOVE_TO_FOLDER = 'move_xform'
# Project Permissions
CAN_VIEW_PROJECT = 'view_project'
CAN_CHANGE_PROJECT = 'change_project'
CAN_TRANSFER_PROJECT_OWNERSHIP = 'transfer_project'
CAN_DELETE_PROJECT = 'delete_project'
CAN_ADD_DATADICTIONARY = 'add_datadictionary'
CAN_CHANGE_DATADICTIONARY = 'change_datadictionary'
CAN_DELETE_DATADICTIONARY = 'delete_datadictionary'
| bsd-2-clause |
ChanduERP/odoo | addons/purchase/company.py | 383 | 1576 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class company(osv.osv):
_inherit = 'res.company'
_columns = {
'po_lead': fields.float(
'Purchase Lead Time', required=True,
help="Margin of error for supplier lead times. When the system"\
"generates Purchase Orders for procuring products,"\
"they will be scheduled that many days earlier "\
"to cope with unexpected supplier delays."),
}
_defaults = {
'po_lead': lambda *a: 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
alexteodor/odoo | addons/pos_restaurant/__init__.py | 332 | 1074 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import restaurant
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
GeoNode/geonode | geonode/geoserver/tests/test_helpers.py | 2 | 7983 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2019 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib.auth import get_user_model
from geonode.tests.base import GeoNodeBaseTestSupport
import os
import re
import gisdata
from urllib.parse import urljoin
from django.conf import settings
from geonode import geoserver
from geonode.decorators import on_ogc_backend
from geonode.layers.models import Layer
from geonode.layers.utils import file_upload
from geonode.layers.populate_layers_data import create_layer_data
from geonode.geoserver.views import _response_callback
import logging
logger = logging.getLogger(__name__)
class HelperTest(GeoNodeBaseTestSupport):
type = 'layer'
def setUp(self):
super(HelperTest, self).setUp()
self.user = 'admin'
self.passwd = 'admin'
create_layer_data()
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_replace_layer(self):
"""
Ensures the layer_style_manage route returns a 200.
"""
admin = get_user_model().objects.get(username="admin")
layer = Layer.objects.all()[0]
logger.debug(Layer.objects.all())
self.assertIsNotNone(layer)
logger.debug("Attempting to replace a vector layer with a raster.")
filename = filename = os.path.join(
gisdata.GOOD_DATA,
'vector/san_andres_y_providencia_administrative.shp')
vector_layer = file_upload(filename, user=admin)
self.assertTrue(vector_layer.is_vector())
filename = os.path.join(gisdata.GOOD_DATA, 'raster/test_grid.tif')
with self.assertRaisesRegex(Exception, "You are attempting to replace a vector layer with a raster."):
file_upload(filename, layer=vector_layer, overwrite=True)
logger.debug("Attempting to replace a raster layer with a vector.")
raster_layer = file_upload(filename, user=admin)
self.assertFalse(raster_layer.is_vector())
filename = filename = os.path.join(
gisdata.GOOD_DATA,
'vector/san_andres_y_providencia_administrative.shp')
with self.assertRaisesRegex(Exception, "You are attempting to replace a raster layer with a vector."):
file_upload(filename, layer=raster_layer, overwrite=True)
logger.debug("Attempting to replace a vector layer.")
replaced = file_upload(filename, layer=vector_layer, overwrite=True, gtype='LineString')
self.assertIsNotNone(replaced)
self.assertTrue(replaced.is_vector())
@on_ogc_backend(geoserver.BACKEND_PACKAGE)
def test_replace_callback(self):
content = f"""<Layer>
<Title>GeoNode Local GeoServer</Title>
<Abstract>This is a description of your Web Map Server.</Abstract>
<!--Limited list of EPSG projections:-->
<CRS>EPSG:4326</CRS>
<CRS>EPSG:3785</CRS>
<CRS>EPSG:3857</CRS>
<CRS>EPSG:900913</CRS>
<CRS>EPSG:32647</CRS>
<CRS>EPSG:32736</CRS>
<CRS>CRS:84</CRS>
<EX_GeographicBoundingBox>
<westBoundLongitude>-124.731422</westBoundLongitude>
<eastBoundLongitude>12.512771464573753</eastBoundLongitude>
<southBoundLatitude>12.4801497</southBoundLatitude>
<northBoundLatitude>49.371735</northBoundLatitude>
</EX_GeographicBoundingBox>
<BoundingBox CRS="CRS:84" ..../>
<BoundingBox CRS="EPSG:4326" ..../>
<BoundingBox CRS="EPSG:3785" ..../>
<BoundingBox CRS="EPSG:3857" ..../>
<BoundingBox CRS="EPSG:900913" ..../>
<BoundingBox CRS="EPSG:32647" ..../>
<BoundingBox CRS="EPSG:32736" ..../>
<Layer queryable="1" opaque="0">
<Name>geonode:DE_USNG_UTM18</Name>
<Title>DE_USNG_UTM18</Title>
<Abstract>No abstract provided</Abstract>
<KeywordList>
<Keyword>DE_USNG_UTM18</Keyword>
<Keyword>features</Keyword>
</KeywordList>
<CRS>EPSG:26918</CRS>
<CRS>CRS:84</CRS>
<EX_GeographicBoundingBox>
<westBoundLongitude>-75.93570725669369</westBoundLongitude>
<eastBoundLongitude>-75.00000000000001</eastBoundLongitude>
<southBoundLatitude>38.3856300861002</southBoundLatitude>
<northBoundLatitude>39.89406880610797</northBoundLatitude>
</EX_GeographicBoundingBox>
<BoundingBox CRS="CRS:84" .01" maxy="39.89406880610797"/>
<BoundingBox CRS="EPSG:26918" ..../>
<BoundingBox CRS="EPSG:4326" ..../>
<BoundingBox CRS="EPSG:3785" ..../>
<BoundingBox CRS="EPSG:3857" ..../>
<BoundingBox CRS="EPSG:900913" ..../>
<BoundingBox CRS="EPSG:32647" ..../>
<BoundingBox CRS="EPSG:32736" ..../>
<MetadataURL type="other">
<Format>other</Format>
<OnlineResource xlink:type="simple"
xlink:href="{settings.GEOSERVER_LOCATION}catalogue/csw?outputschema=...."/>
</MetadataURL>
<MetadataURL type="other">
<Format>other</Format>
<OnlineResource xlink:type="simple"
xlink:href="{settings.GEOSERVER_LOCATION}catalogue/csw?outputschema=...."/>
</MetadataURL>
<MetadataURL type="other">
<Format>other</Format>
<OnlineResource xlink:type="simple"
xlink:href="{settings.GEOSERVER_LOCATION}catalogue/csw?outputschema=...."/>
</MetadataURL>
<MetadataURL type="other">
<Format>other</Format>
<OnlineResource xlink:type="simple"
xlink:href="{settings.GEOSERVER_LOCATION}catalogue/csw?outputschema=...."/>
</MetadataURL>
<MetadataURL type="FGDC">
<Format>text/xml</Format>
<OnlineResource xlink:type="simple"
xlink:href="{settings.GEOSERVER_LOCATION}catalogue/csw?outputschema=...."/>
</MetadataURL>
<MetadataURL type="other">
<Format>other</Format>
<OnlineResource xlink:type="simple"
xlink:href="{settings.GEOSERVER_LOCATION}catalogue/csw?outputschema=...."/>
</MetadataURL>
<MetadataURL type="other">
<Format>other</Format>
<OnlineResource xlink:type="simple"
xlink:href="{settings.GEOSERVER_LOCATION}showmetadata/xsl/584"/>
</MetadataURL>
<Style>
<Name>geonode:DE_USNG_UTM18</Name>
<Title>Default Polygon</Title>
<Abstract>A sample style that draws a polygon</Abstract>
<LegendURL width="20" height="20">
<Format>image/png</Format>
<OnlineResource
xmlns:xlink="http://www.w3.org/1999/xlink" xlink:type="simple"
xlink:href="{settings.GEOSERVER_LOCATION}ows?service=WMS&request=GetLegendGraphic&...."/>
</LegendURL>
</Style>
</Layer>"""
kwargs = {
'content': content,
'status': 200,
'content_type': 'application/xml'
}
_content = _response_callback(**kwargs).content
self.assertTrue(re.findall(f'{urljoin(settings.SITEURL, "/gs/")}ows', str(_content)))
kwargs = {
'content': content,
'status': 200,
'content_type': 'text/xml; charset=UTF-8'
}
_content = _response_callback(**kwargs).content
self.assertTrue(re.findall(f'{urljoin(settings.SITEURL, "/gs/")}ows', str(_content)))
| gpl-3.0 |
sajuptpm/neutron-ipam | neutron/plugins/ml2/drivers/mech_linuxbridge.py | 26 | 2340 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.openstack.common import log
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mech_agent
LOG = log.getLogger(__name__)
class LinuxbridgeMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
"""Attach to networks using linuxbridge L2 agent.
The LinuxbridgeMechanismDriver integrates the ml2 plugin with the
linuxbridge L2 agent. Port binding with this driver requires the
linuxbridge agent to be running on the port's host, and that agent
to have connectivity to at least one segment of the port's
network.
"""
def __init__(self):
super(LinuxbridgeMechanismDriver, self).__init__(
constants.AGENT_TYPE_LINUXBRIDGE,
portbindings.VIF_TYPE_BRIDGE,
{portbindings.CAP_PORT_FILTER: True})
def check_segment_for_agent(self, segment, agent):
mappings = agent['configurations'].get('interface_mappings', {})
tunnel_types = agent['configurations'].get('tunnel_types', [])
LOG.debug(_("Checking segment: %(segment)s "
"for mappings: %(mappings)s "
"with tunnel_types: %(tunnel_types)s"),
{'segment': segment, 'mappings': mappings,
'tunnel_types': tunnel_types})
network_type = segment[api.NETWORK_TYPE]
if network_type == 'local':
return True
elif network_type in tunnel_types:
return True
elif network_type in ['flat', 'vlan']:
return segment[api.PHYSICAL_NETWORK] in mappings
else:
return False
| apache-2.0 |
nerdvegas/rez | src/rez/data/tests/builds/packages/foo/1.1.0/build.py | 1 | 1617 | from __future__ import print_function
from build_util import build_directory_recurse, check_visible
import os.path
def build(source_path, build_path, install_path, targets):
# build requirement 'floob' should be visible
check_visible("foo", "floob")
import floob
print(floob.hello())
# do the build
if "install" not in (targets or []):
install_path = None
build_directory_recurse(src_dir="foo",
dest_dir=os.path.join("python", "foo"),
source_path=source_path,
build_path=build_path,
install_path=install_path)
if __name__ == '__main__':
import os, sys
build(
source_path=os.environ['REZ_BUILD_SOURCE_PATH'],
build_path=os.environ['REZ_BUILD_PATH'],
install_path=os.environ['REZ_BUILD_INSTALL_PATH'],
targets=sys.argv[1:]
)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 |
cfg2015/EPT-2015-2 | addons/marketing_campaign/report/__init__.py | 441 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import campaign_analysis
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
SheepDogInc/ssheepdog | ssheepdog/migrations/0007_auto__add_loginlog__chg_field_applicationkey_public_key__chg_field_use.py | 1 | 8799 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LoginLog'
db.create_table('ssheepdog_loginlog', (
('stdout', self.gf('django.db.models.fields.TextField')(default='')),
('actor', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('stderr', self.gf('django.db.models.fields.TextField')(default='')),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('login', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ssheepdog.Login'], null=True)),
('message', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('ssheepdog', ['LoginLog'])
# Changing field 'ApplicationKey.public_key'
db.alter_column('ssheepdog_applicationkey', 'public_key', self.gf('ssheepdog.fields.PublicKeyField')())
# Changing field 'UserProfile.ssh_key'
db.alter_column('ssheepdog_userprofile', 'ssh_key', self.gf('ssheepdog.fields.PublicKeyField')())
def backwards(self, orm):
# Deleting model 'LoginLog'
db.delete_table('ssheepdog_loginlog')
# Changing field 'ApplicationKey.public_key'
db.alter_column('ssheepdog_applicationkey', 'public_key', self.gf('django.db.models.fields.TextField')())
# Changing field 'UserProfile.ssh_key'
db.alter_column('ssheepdog_userprofile', 'ssh_key', self.gf('django.db.models.fields.TextField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ssheepdog.applicationkey': {
'Meta': {'object_name': 'ApplicationKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'private_key': ('django.db.models.fields.TextField', [], {}),
'public_key': ('ssheepdog.fields.PublicKeyField', [], {})
},
'ssheepdog.client': {
'Meta': {'object_name': 'Client'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'ssheepdog.login': {
'Meta': {'object_name': 'Login'},
'application_key': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ssheepdog.ApplicationKey']", 'null': 'True'}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ssheepdog.Client']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'machine': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ssheepdog.Machine']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'ssheepdog.loginlog': {
'Meta': {'object_name': 'LoginLog'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ssheepdog.Login']", 'null': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'stderr': ('django.db.models.fields.TextField', [], {'default': "''"}),
'stdout': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'ssheepdog.machine': {
'Meta': {'object_name': 'Machine'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['ssheepdog.Client']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_down': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '22'})
},
'ssheepdog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'ssh_key': ('ssheepdog.fields.PublicKeyField', [], {}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'_profile_cache'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['ssheepdog']
| bsd-3-clause |
flyher/pymo | symbian/PythonForS60_1.9.6/module-repo/standard-modules/encodings/gb2312.py | 816 | 1027 | #
# gb2312.py: Python Unicode Codec for GB2312
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb2312')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb2312',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
Edraak/edx-platform | lms/djangoapps/shoppingcart/context_processor.py | 173 | 1679 | """
This is the shoppingcart context_processor module.
Currently the only context_processor detects whether request.user has a cart that should be displayed in the
navigation. We want to do this in the context_processor to
1) keep database accesses out of templates (this led to a transaction bug with user email changes)
2) because navigation.html is "called" by being included in other templates, there's no "views.py" to put this.
"""
from .models import Order, PaidCourseRegistration, CourseRegCodeItem
from .utils import is_shopping_cart_enabled
def user_has_cart_context_processor(request):
"""
Checks if request has an authenticated user. If so, checks if request.user has a cart that should
be displayed. Anonymous users don't.
Adds `display_shopping_cart` to the context
"""
def should_display_shopping_cart():
"""
Returns a boolean if the user has an items in a cart whereby the shopping cart should be
displayed to the logged in user
"""
return (
# user is logged in and
request.user.is_authenticated() and
# do we have the feature turned on
is_shopping_cart_enabled() and
# does the user actually have a cart (optimized query to prevent creation of a cart when not needed)
Order.does_user_have_cart(request.user) and
# user's cart has PaidCourseRegistrations or CourseRegCodeItem
Order.user_cart_has_items(
request.user,
[PaidCourseRegistration, CourseRegCodeItem]
)
)
return {'should_display_shopping_cart_func': should_display_shopping_cart}
| agpl-3.0 |
AdaptiveApplications/carnegie | tarc_bus_locator_client/quantities-0.10.1/build/lib/quantities/units/radiation.py | 4 | 1072 | """
"""
from __future__ import absolute_import
from ..unitquantity import UnitQuantity
from .time import s
from .mass import kg
from .energy import J
from .electromagnetism import coulomb
Bq = becquerel = UnitQuantity(
'becquerel',
1/s,
symbol='Bq',
aliases=['becquerels']
)
Ci = curie = UnitQuantity(
'curie',
3.7e10*becquerel,
symbol='Ci',
aliases=['curies']
)
rd = rutherford = UnitQuantity(
'rutherford',
1e6*Bq,
symbol='Rd',
aliases=['rutherfords'],
doc='this unit is obsolete, in favor of 1e6 Bq'
)
Gy = gray = Sv = sievert = UnitQuantity(
'gray',
J/kg,
symbol='Gy',
aliases=['grays', 'Sv', 'sievert', 'sieverts']
)
rem = UnitQuantity(
'rem',
1e-2*sievert,
aliases=['rems']
)
rads = UnitQuantity(
'rads',
1e-2*gray,
doc='''
rad is commonly used symbol for radian.
rads unit of radiation is deprecated.
'''
)
R = roentgen = UnitQuantity(
'roentgen',
2.58e-4*coulomb/kg,
symbol='R',
aliases=['roentgens']
)
del UnitQuantity, s, kg, J, coulomb
| mit |
rossburton/yocto-autobuilder | lib/python2.7/site-packages/SQLAlchemy-0.7.0-py2.7-linux-x86_64.egg/sqlalchemy/pool.py | 8 | 33408 | # sqlalchemy/pool.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import weakref, time, traceback
from sqlalchemy import exc, log, event, events, interfaces, util
from sqlalchemy.util import queue as sqla_queue
from sqlalchemy.util import threading, memoized_property, \
chop_traceback
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.itervalues():
manager.close()
proxies.clear()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
_dispatch=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`unique_connection` method is provided to bypass the
threadlocal behavior installed into :meth:`connect`.
:param reset_on_return: If true, reset the database state of
connections returned to the pool. This is typically a
ROLLBACK to release locks and transaction resources.
Disable at your own peril. Defaults to True.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to event.listen()
upon construction. Provided here so that event listeners
can be assigned via ``create_engine`` before dialect-level
listeners are applied.
:param listeners: Deprecated. A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool. This has been superseded by
:func:`~sqlalchemy.event.listen`.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._use_threadlocal = use_threadlocal
self._reset_on_return = reset_on_return
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
util.warn_deprecated(
"The 'listeners' argument to Pool (and "
"create_engine()) is deprecated. Use event.listen().")
for l in listeners:
self.add_listener(l)
dispatch = event.dispatcher(events.PoolEvents)
@util.deprecated(2.7, "Pool.add_listener is deprecated. Use event.listen()")
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is different from :meth:`.Pool.connect` only if the
``use_threadlocal`` flag has been set to ``True``.
"""
return _ConnectionFairy(self).checkout()
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunection with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, It is advised to not reuse the pool once dispose()
is called, and to instead use a new pool constructed by the
recreate() method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy(self).checkout()
try:
rec = self._threadconns.current()
if rec:
return rec.checkout()
except AttributeError:
pass
agent = _ConnectionFairy(self)
self._threadconns.current = weakref.ref(agent)
return agent.checkout()
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
finalize_callback = None
def __init__(self, pool):
self.__pool = pool
self.connection = self.__connect()
self.info = {}
pool.dispatch.first_connect.exec_once(self.connection, self)
pool.dispatch.connect(self.connection, self)
def close(self):
if self.connection is not None:
self.__pool.logger.debug("Closing connection %r", self.connection)
try:
self.connection.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
self.__pool.logger.debug("Exception closing connection %r",
self.connection)
def invalidate(self, e=None):
if e is not None:
self.__pool.logger.info(
"Invalidate connection %r (reason: %s:%s)",
self.connection, e.__class__.__name__, e)
else:
self.__pool.logger.info(
"Invalidate connection %r", self.connection)
self.__close()
self.connection = None
def get_connection(self):
if self.connection is None:
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.connection)
self.__close()
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
return self.connection
def __close(self):
try:
self.__pool.logger.debug("Closing connection %r", self.connection)
self.connection.close()
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e:
self.__pool.logger.debug(
"Connection %r threw an error on close: %s",
self.connection, e)
def __connect(self):
try:
self.starttime = time.time()
connection = self.__pool._creator()
self.__pool.logger.debug("Created new connection %r", connection)
return connection
except Exception, e:
self.__pool.logger.debug("Error on connect(): %s", e)
raise
def _finalize_fairy(connection, connection_record, pool, ref, echo):
_refs.discard(connection_record)
if ref is not None and \
connection_record.fairy is not ref:
return
if connection is not None:
try:
if pool._reset_on_return:
connection.rollback()
# Immediately close detached instances
if connection_record is None:
connection.close()
except Exception, e:
if connection_record is not None:
connection_record.invalidate(e=e)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
if connection_record is not None:
connection_record.fairy = None
if echo:
pool.logger.debug("Connection %r being returned to pool",
connection)
if connection_record.finalize_callback:
connection_record.finalize_callback(connection)
del connection_record.finalize_callback
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, connection_record)
pool._return_conn(connection_record)
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DB-API connection and provides return-on-dereference
support."""
__slots__ = '_pool', '__counter', 'connection', \
'_connection_record', '__weakref__', \
'_detached_info', '_echo'
def __init__(self, pool):
self._pool = pool
self.__counter = 0
self._echo = _echo = pool._should_log_debug()
try:
rec = self._connection_record = pool._do_get()
conn = self.connection = self._connection_record.get_connection()
rec.fairy = weakref.ref(
self,
lambda ref:_finalize_fairy(conn, rec, pool, ref, _echo)
)
_refs.add(rec)
except:
# helps with endless __getattr__ loops later on
self.connection = None
self._connection_record = None
raise
if self._echo:
self._pool.logger.debug("Connection %r checked out from pool" %
self.connection)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
return self.connection is not None
@property
def info(self):
"""An info collection unique to this DB-API connection."""
try:
return self._connection_record.info
except AttributeError:
if self.connection is None:
raise exc.InvalidRequestError("This connection is closed")
try:
return self._detached_info
except AttributeError:
self._detached_info = value = {}
return value
def invalidate(self, e=None):
"""Mark this connection as invalidated.
The connection will be immediately closed. The containing
ConnectionRecord will create a new connection when next used.
"""
if self.connection is None:
raise exc.InvalidRequestError("This connection is closed")
if self._connection_record is not None:
self._connection_record.invalidate(e=e)
self.connection = None
self._close()
def cursor(self, *args, **kwargs):
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def checkout(self):
if self.connection is None:
raise exc.InvalidRequestError("This connection is closed")
self.__counter += 1
if not self._pool.dispatch.checkout or self.__counter != 1:
return self
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
self._pool.dispatch.checkout(self.connection,
self._connection_record,
self)
return self
except exc.DisconnectionError, e:
self._pool.logger.info(
"Disconnection detected on checkout: %s", e)
self._connection_record.invalidate(e)
self.connection = self._connection_record.get_connection()
attempts -= 1
self._pool.logger.info("Reconnection attempts exhausted on checkout")
self.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
_refs.remove(self._connection_record)
self._connection_record.fairy = None
self._connection_record.connection = None
self._pool._do_return_conn(self._connection_record)
self._detached_info = \
self._connection_record.info.copy()
self._connection_record = None
def close(self):
self.__counter -= 1
if self.__counter == 0:
self._close()
def _close(self):
_finalize_fairy(self.connection, self._connection_record,
self._pool, None, self._echo)
self.connection = None
self._connection_record = None
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
Options are the same as those of :class:`.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
kw['use_threadlocal'] = True
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return SingletonThreadPool(self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
_dispatch=self.dispatch)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) > self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % \
(id(self), len(self._all_conns))
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
self._all_conns.add(c)
if len(self._all_conns) > self.size:
self._cleanup()
return c
class QueuePool(Pool):
"""A :class:`.Pool` that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30,
**kw):
"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`unique_connection` method is provided to bypass the
threadlocal behavior installed into :meth:`connect`.
:param reset_on_return: If true, reset the database state of
connections returned to the pool. This is typically a
ROLLBACK to release locks and transaction resources.
Disable at your own peril. Defaults to True.
:param listeners: A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = self._max_overflow > -1 and \
threading.Lock() or None
def recreate(self):
self.logger.info("Pool recreating")
return QueuePool(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
_dispatch=self.dispatch)
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
conn.close()
if self._overflow_lock is None:
self._overflow -= 1
else:
self._overflow_lock.acquire()
try:
self._overflow -= 1
finally:
self._overflow_lock.release()
def _do_get(self):
try:
wait = self._max_overflow > -1 and \
self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
if self._max_overflow > -1 and \
self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
if self._overflow_lock is not None:
self._overflow_lock.acquire()
if self._max_overflow > -1 and \
self._overflow >= self._max_overflow:
if self._overflow_lock is not None:
self._overflow_lock.release()
return self._do_get()
try:
con = self._create_connection()
self._overflow += 1
finally:
if self._overflow_lock is not None:
self._overflow_lock.release()
return con
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
"connections: %d" % (self.size(),
self.checkedin(),
self.overflow(),
self.checkedout())
def size(self):
return self._pool.maxsize
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
:class:`.NullPool` is used by the SQlite dilalect automatically
when a file-based database is used (as of SQLAlchemy 0.7).
See :ref:`sqlite_toplevel`.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return NullPool(self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
_dispatch=self.dispatch)
def dispose(self):
pass
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@memoized_property
def _conn(self):
return self._creator()
@memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if '_conn' in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`.Pool` that allows at most one checked out connection at any given
time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
:class:`.AssertionPool` also logs a traceback of where
the original connection was checked out, and reports
this in the assertion error raised (new in 0.7).
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop('store_traceback', True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return AssertionPool(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = ' at:\n%s' % ''.join(
chop_traceback(self._checkout_traceback))
else:
suffix = ''
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in self.pools.keys():
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
pool = self.poolclass(lambda:
self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
return tuple(
list(args) +
[(k, kw[k]) for k in sorted(kw)]
)
| gpl-2.0 |
GoogleCloudPlatform/python-compat-runtime | appengine-compat/exported_appengine_sdk/google/appengine/ext/vmruntime/initialize.py | 1 | 3059 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Functions that prepare GAE user code for running in a GCE VM."""
import json
import logging
import logging.handlers
import math
import sys
import traceback
from google.appengine import api
from google.appengine.api import app_logging
from google.appengine.api.logservice import logservice
from google.appengine.ext.vmruntime import background_thread
from google.appengine.runtime import request_environment
from google.appengine.runtime import runtime
APP_LOG_FILE = '/var/log/app_engine/app.log.json'
MAX_LOG_BYTES = 128 * 1024 * 1024
LOG_BACKUP_COUNT = 3
class JsonFormatter(logging.Formatter):
"""Class for logging to the cloud logging api with json metadata."""
def format(self, record):
"""Format the record as json the cloud logging agent understands.
Args:
record: A logging.LogRecord to format.
Returns:
A json string to log.
"""
float_frac_sec, float_sec = math.modf(record.created)
data = {'thread': record.thread,
'timestamp': {
'seconds': int(float_sec),
'nanos': int(float_frac_sec * 1000000000)}}
if record.exc_info:
data['message'] = '%s\n%s' % (record.getMessage(),
traceback.format_exc(
record.exc_info))
data['severity'] = 'CRITICAL'
else:
data['message'] = record.getMessage()
data['severity'] = record.levelname
return json.dumps(data)
def InitializeFileLogging():
"""Helper called from CreateAndRunService() to set up syslog logging."""
logging.basicConfig()
logger = logging.getLogger()
logger.handlers = []
file_handler = logging.handlers.RotatingFileHandler(
APP_LOG_FILE, maxBytes=MAX_LOG_BYTES, backupCount=LOG_BACKUP_COUNT)
file_handler.setFormatter(JsonFormatter())
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
def InitializeApiLogging():
"""Helper called from CreateAndRunService() to set up api logging."""
logservice.logs_buffer = lambda: request_environment.current_request.errors
logger = logging.getLogger()
app_log_handler = app_logging.AppLogsHandler()
logger.addHandler(app_log_handler)
def InitializeThreadingApis():
"""Helper to monkey-patch various threading APIs."""
runtime.PatchStartNewThread()
sys.modules[api.__name__ + '.background_thread'] = background_thread
api.background_thread = background_thread
| apache-2.0 |
mogotest/selenium | selenium/src/py/lib/docutils/readers/pep.py | 5 | 1666 | # Author: David Goodger
# Contact: [email protected]
# Revision: $Revision: 3892 $
# Date: $Date: 2005-09-20 22:04:53 +0200 (Tue, 20 Sep 2005) $
# Copyright: This module has been placed in the public domain.
"""
Python Enhancement Proposal (PEP) Reader.
"""
__docformat__ = 'reStructuredText'
from docutils.readers import standalone
from docutils.transforms import peps, references, misc, frontmatter
from docutils.parsers import rst
class Reader(standalone.Reader):
supported = ('pep',)
"""Contexts this reader supports."""
settings_spec = (
'PEP Reader Option Defaults',
'The --pep-references and --rfc-references options (for the '
'reStructuredText parser) are on by default.',
())
config_section = 'pep reader'
config_section_dependencies = ('readers', 'standalone reader')
def get_transforms(self):
transforms = standalone.Reader.get_transforms(self)
# We have PEP-specific frontmatter handling.
transforms.remove(frontmatter.DocTitle)
transforms.remove(frontmatter.SectionSubTitle)
transforms.remove(frontmatter.DocInfo)
transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes])
return transforms
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
inliner_class = rst.states.Inliner
def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``."""
if parser is None:
parser = rst.Parser(rfc2822=1, inliner=self.inliner_class())
standalone.Reader.__init__(self, parser, '')
| apache-2.0 |
Timurdov/bionic | bionic/Lib/site-packages/django/contrib/sessions/backends/cache.py | 102 | 2499 | from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import caches
from django.utils.six.moves import xrange
KEY_PREFIX = "django.contrib.sessions.cache"
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
session_data = self._cache.get(self.cache_key, None)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
session_data = None
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError(
"Unable to create a new session key. "
"It is likely that the cache is unavailable.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.cache_key,
self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
return (KEY_PREFIX + session_key) in self._cache
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(KEY_PREFIX + session_key)
@classmethod
def clear_expired(cls):
pass
| apache-2.0 |
mushtaqak/edx-platform | cms/djangoapps/contentstore/features/transcripts.py | 46 | 8895 | # disable missing docstring
# pylint: disable=missing-docstring
import os
from lettuce import world, step
from django.conf import settings
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from splinter.request_handler.request_handler import RequestHandler
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
ERROR_MESSAGES = {
'url_format': u'Incorrect url format.',
'file_type': u'Link types should be unique.',
'links_duplication': u'Links should be unique.',
}
STATUSES = {
'found': u'Timed Transcript Found',
'not found on edx': u'No EdX Timed Transcript',
'not found': u'No Timed Transcript',
'replace': u'Timed Transcript Conflict',
'uploaded_successfully': u'Timed Transcript Uploaded Successfully',
'use existing': u'Confirm Timed Transcript',
}
SELECTORS = {
'error_bar': '.transcripts-error-message',
'url_inputs': '.videolist-settings-item input.input',
'collapse_link': '.collapse-action.collapse-setting',
'collapse_bar': '.videolist-extra-videos',
'status_bar': '.transcripts-message-status',
}
# button type , button css selector, button message
TRANSCRIPTS_BUTTONS = {
'import': ('.setting-import', 'Import YouTube Transcript'),
'download_to_edit': ('.setting-download', 'Download Transcript for Editing'),
'disabled_download_to_edit': ('.setting-download.is-disabled', 'Download Transcript for Editing'),
'upload_new_timed_transcripts': ('.setting-upload', 'Upload New Transcript'),
'replace': ('.setting-replace', 'Yes, replace the edX transcript with the YouTube transcript'),
'choose': ('.setting-choose', 'Timed Transcript from {}'),
'use_existing': ('.setting-use-existing', 'Use Current Transcript'),
}
@step('I clear fields$')
def clear_fields(_step):
# Clear the input fields and trigger an 'input' event
script = """
$('{selector}')
.prop('disabled', false)
.removeClass('is-disabled')
.attr('aria-disabled', false)
.val('')
.trigger('input');
""".format(selector=SELECTORS['url_inputs'])
world.browser.execute_script(script)
world.wait(DELAY)
world.wait_for_ajax_complete()
@step('I clear field number (.+)$')
def clear_field(_step, index):
index = int(index) - 1
world.css_fill(SELECTORS['url_inputs'], '', index)
# For some reason ChromeDriver doesn't trigger an 'input' event after filling
# the field with an empty value. That's why we trigger it manually via jQuery.
world.trigger_event(SELECTORS['url_inputs'], event='input', index=index)
world.wait(DELAY)
world.wait_for_ajax_complete()
@step('I expect (.+) inputs are disabled$')
def inputs_are_disabled(_step, indexes):
index_list = [int(i.strip()) - 1 for i in indexes.split(',')]
for index in index_list:
el = world.css_find(SELECTORS['url_inputs'])[index]
assert el['disabled']
@step('I expect inputs are enabled$')
def inputs_are_enabled(_step):
for index in range(3):
el = world.css_find(SELECTORS['url_inputs'])[index]
assert not el['disabled']
@step('I do not see error message$')
def i_do_not_see_error_message(_step):
assert not world.css_visible(SELECTORS['error_bar'])
@step('I see error message "([^"]*)"$')
def i_see_error_message(_step, error):
assert world.css_has_text(SELECTORS['error_bar'], ERROR_MESSAGES[error])
@step('I do not see status message$')
def i_do_not_see_status_message(_step):
assert not world.css_visible(SELECTORS['status_bar'])
@step('I see status message "([^"]*)"$')
def i_see_status_message(_step, status):
assert not world.css_visible(SELECTORS['error_bar'])
assert world.css_has_text(SELECTORS['status_bar'], STATUSES[status])
DOWNLOAD_BUTTON = TRANSCRIPTS_BUTTONS["download_to_edit"][0]
if world.is_css_present(DOWNLOAD_BUTTON, wait_time=1) and not world.css_find(DOWNLOAD_BUTTON)[0].has_class('is-disabled'):
assert _transcripts_are_downloaded()
@step('I (.*)see button "([^"]*)"$')
def i_see_button(_step, not_see, button_type):
button = button_type.strip()
if not_see.strip():
assert world.is_css_not_present(TRANSCRIPTS_BUTTONS[button][0])
else:
assert world.css_has_text(TRANSCRIPTS_BUTTONS[button][0], TRANSCRIPTS_BUTTONS[button][1])
@step('I (.*)see (.*)button "([^"]*)" number (\d+)$')
def i_see_button_with_custom_text(_step, not_see, button_type, custom_text, index):
button = button_type.strip()
custom_text = custom_text.strip()
index = int(index.strip()) - 1
if not_see.strip():
assert world.is_css_not_present(TRANSCRIPTS_BUTTONS[button][0])
else:
assert world.css_has_text(TRANSCRIPTS_BUTTONS[button][0], TRANSCRIPTS_BUTTONS[button][1].format(custom_text), index)
@step('I click transcript button "([^"]*)"$')
def click_button_transcripts_variant(_step, button_type):
button = button_type.strip()
world.css_click(TRANSCRIPTS_BUTTONS[button][0])
world.wait_for_ajax_complete()
@step('I click transcript button "([^"]*)" number (\d+)$')
def click_button_index(_step, button_type, index):
button = button_type.strip()
index = int(index.strip()) - 1
world.css_click(TRANSCRIPTS_BUTTONS[button][0], index)
world.wait_for_ajax_complete()
@step('I remove "([^"]+)" transcripts id from store')
def remove_transcripts_from_store(_step, subs_id):
"""Remove from store, if transcripts content exists."""
filename = 'subs_{0}.srt.sjson'.format(subs_id.strip())
content_location = StaticContent.compute_location(
world.scenario_dict['COURSE'].id,
filename
)
try:
content = contentstore().find(content_location)
contentstore().delete(content.location)
print('Transcript file was removed from store.')
except NotFoundError:
print('Transcript file was NOT found and not removed.')
@step('I enter a "([^"]+)" source to field number (\d+)$')
def i_enter_a_source(_step, link, index):
index = int(index) - 1
if index is not 0 and not world.css_visible(SELECTORS['collapse_bar']):
world.css_click(SELECTORS['collapse_link'])
assert world.css_visible(SELECTORS['collapse_bar'])
world.css_fill(SELECTORS['url_inputs'], link, index)
world.wait(DELAY)
world.wait_for_ajax_complete()
@step('I upload the transcripts file "([^"]*)"$')
def upload_file(_step, file_name):
path = os.path.join(TEST_ROOT, 'uploads/', file_name.strip())
world.browser.execute_script("$('form.file-chooser').show()")
world.browser.attach_file('transcript-file', os.path.abspath(path))
world.wait_for_ajax_complete()
@step('I see "([^"]*)" text in the captions')
def check_text_in_the_captions(_step, text):
world.wait_for_present('.video.is-captions-rendered')
world.wait_for(lambda _: world.css_text('.subtitles'), timeout=30)
actual_text = world.css_text('.subtitles')
assert (text in actual_text)
@step('I see value "([^"]*)" in the field "([^"]*)"$')
def check_transcripts_field(_step, values, field_name):
world.select_editor_tab('Advanced')
tab = world.css_find('#settings-tab').first
field_id = '#' + tab.find_by_xpath('.//label[text()="%s"]' % field_name.strip())[0]['for']
values_list = [i.strip() == world.css_value(field_id) for i in values.split('|')]
assert any(values_list)
world.select_editor_tab('Basic')
@step('I save changes$')
def save_changes(_step):
world.save_component()
@step('I open tab "([^"]*)"$')
def open_tab(_step, tab_name):
world.select_editor_tab(tab_name)
@step('I set value "([^"]*)" to the field "([^"]*)"$')
def set_value_transcripts_field(_step, value, field_name):
tab = world.css_find('#settings-tab').first
XPATH = './/label[text()="{name}"]'.format(name=field_name)
SELECTOR = '#' + tab.find_by_xpath(XPATH)[0]['for']
element = world.css_find(SELECTOR).first
if element['type'] == 'text':
SCRIPT = '$("{selector}").val("{value}").change()'.format(
selector=SELECTOR,
value=value
)
world.browser.execute_script(SCRIPT)
assert world.css_has_value(SELECTOR, value)
else:
assert False, 'Incorrect element type.'
world.wait_for_ajax_complete()
@step('I revert the transcript field "([^"]*)"$')
def revert_transcripts_field(_step, field_name):
world.revert_setting_entry(field_name)
def _transcripts_are_downloaded():
world.wait_for_ajax_complete()
request = RequestHandler()
DOWNLOAD_BUTTON = world.css_find(TRANSCRIPTS_BUTTONS["download_to_edit"][0]).first
url = DOWNLOAD_BUTTON['href']
request.connect(url)
return request.status_code.is_success()
| agpl-3.0 |
ioanpocol/superdesk-core | tests/publish/ninjs_formatter_test.py | 1 | 44687 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import json
from unittest import mock
from datetime import timedelta
from superdesk.utc import utcnow
from superdesk.tests import TestCase
from superdesk.publish.formatters.ninjs_formatter import NINJSFormatter
from superdesk.publish import init_app
from bson import ObjectId
@mock.patch("superdesk.publish.subscribers.SubscribersService.generate_sequence_number", lambda self, subscriber: 1)
class NinjsFormatterTest(TestCase):
def setUp(self):
self.formatter = NINJSFormatter()
init_app(self.app)
self.maxDiff = None
def test_text_formatter(self):
self.app.data.insert(
"vocabularies",
[
{
"_id": "locators",
"display_name": "Locators",
"type": "unmanageable",
"unique_field": "qcode",
"items": [
{
"is_active": True,
"name": "NSW",
"qcode": "NSW",
"state": "New South Wales",
"country": "Australia",
"world_region": "Oceania",
"group": "Australia",
}
],
}
],
)
embargo_ts = utcnow() + timedelta(days=2)
article = {
"_id": "tag:aap.com.au:20150613:12345",
"guid": "tag:aap.com.au:20150613:12345",
"_current_version": 1,
"anpa_category": [{"qcode": "a"}],
"source": "AAP",
"headline": "This is a test headline",
"byline": "joe",
"slugline": "slugline",
"subject": [
{"qcode": "02011001", "name": "international court or tribunal", "parent": None},
{"qcode": "02011002", "name": "extradition"},
],
"anpa_take_key": "take_key",
"unique_id": "1",
"body_html": "The story body",
"type": "text",
"word_count": "1",
"priority": 1,
"profile": "snap",
"state": "published",
"urgency": 2,
"pubstatus": "usable",
"creditline": "sample creditline",
"keywords": ["traffic"],
"abstract": "<p>sample <b>abstract</b></p>",
"place": [{"name": "NSW", "qcode": "NSW"}],
"embargo": embargo_ts,
"body_footer": "<p>call helpline 999 if you are planning to quit smoking</p>",
"company_codes": [{"name": "YANCOAL AUSTRALIA LIMITED", "qcode": "YAL", "security_exchange": "ASX"}],
"genre": [{"name": "Article", "qcode": "article"}],
"flags": {"marked_for_legal": True},
"extra": {"foo": "test"},
"annotations": [{"msg": "test"}],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
expected = {
"guid": "tag:aap.com.au:20150613:12345",
"version": "1",
"place": [{"code": "NSW", "name": "New South Wales"}],
"pubstatus": "usable",
"body_html": "The story body<p>call helpline 999 if you are planning to quit smoking</p>",
"type": "text",
"subject": [
{"code": "02011001", "name": "international court or tribunal"},
{"code": "02011002", "name": "extradition"},
],
"service": [{"code": "a"}],
"source": "AAP",
"headline": "This is a test headline",
"byline": "joe",
"urgency": 2,
"priority": 1,
"embargoed": embargo_ts.isoformat(),
"profile": "snap",
"slugline": "slugline",
"description_text": "sample abstract",
"description_html": "<p>sample <b>abstract</b></p>",
"keywords": ["traffic"],
"organisation": [
{
"name": "YANCOAL AUSTRALIA LIMITED",
"rel": "Securities Identifier",
"symbols": [{"ticker": "YAL", "exchange": "ASX"}],
}
],
"genre": [{"name": "Article", "code": "article"}],
"signal": [{"name": "Content Warning", "code": "cwarn", "scheme": "http://cv.iptc.org/newscodes/signal/"}],
"extra": {"foo": "test"},
"charcount": 67,
"wordcount": 13,
"readtime": 0,
"annotations": article["annotations"],
}
self.assertEqual(json.loads(doc), expected)
def test_picture_formatter(self):
article = {
"guid": "20150723001158606583",
"_current_version": 1,
"slugline": "AMAZING PICTURE",
"original_source": "AAP",
"renditions": {
"viewImage": {
"width": 640,
"href": "http://localhost:5000/api/upload/55b032041d41c8d278d21b6f/raw?_schema=http",
"mimetype": "image/jpeg",
"height": 401,
},
"original": {
"href": "https://one-api.aap.com.au/api/v3/Assets/20150723001158606583/Original/download",
"mimetype": "image/jpeg",
},
},
"byline": "MICKEY MOUSE",
"headline": "AMAZING PICTURE",
"versioncreated": "2015-07-23T00:15:00.000Z",
"ednote": "TEST ONLY",
"type": "picture",
"pubstatus": "usable",
"source": "AAP",
"description": "The most amazing picture you will ever see",
"guid": "20150723001158606583",
"body_footer": "<p>call helpline 999 if you are planning to quit smoking</p>",
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
expected = {
"byline": "MICKEY MOUSE",
"renditions": {
"original": {
"href": "https://one-api.aap.com.au/api/v3/Assets/20150723001158606583/Original/download",
"mimetype": "image/jpeg",
}
},
"headline": "AMAZING PICTURE",
"pubstatus": "usable",
"version": "1",
"versioncreated": "2015-07-23T00:15:00.000Z",
"guid": "20150723001158606583",
"description_html": "The most amazing picture you will ever see<p>call helpline 999 if you are planning to "
"quit smoking</p>",
"type": "picture",
"priority": 5,
"slugline": "AMAZING PICTURE",
"ednote": "TEST ONLY",
"source": "AAP",
}
self.assertEqual(expected, json.loads(doc))
self.assertNotIn('viewImage', json.loads(doc).get('renditions'))
def test_composite_formatter(self):
article = {
"guid": "urn:newsml:localhost:2015-07-24T15:05:00.116047:435c93c2-492c-4668-ab47-ae6e2b9b1c2c",
"groups": [
{"id": "root", "refs": [{"idRef": "main"}, {"idRef": "sidebars"}], "role": "grpRole:NEP"},
{
"id": "main",
"refs": [
{
"renditions": {},
"slugline": "Boat",
"guid": "tag:localhost:2015:515b895a-b336-48b2-a506-5ffaf561b916",
"headline": "WA:Navy steps in with WA asylum-seeker boat",
"location": "archive",
"type": "text",
"itemClass": "icls:text",
"residRef": "tag:localhost:2015:515b895a-b336-48b2-a506-5ffaf561b916",
}
],
"role": "grpRole:main",
},
{
"id": "sidebars",
"refs": [
{
"renditions": {
"original_source": {
"href": "https://one-api.aap.com.au\
/api/v3/Assets/20150723001158639795/Original/download",
"mimetype": "image/jpeg",
},
"original": {
"width": 2784,
"height": 4176,
"href": "http://localhost:5000\
/api/upload/55b078b21d41c8e974d17ec5/raw?_schema=http",
"mimetype": "image/jpeg",
"media": "55b078b21d41c8e974d17ec5",
},
"thumbnail": {
"width": 80,
"height": 120,
"href": "http://localhost:5000\
/api/upload/55b078b41d41c8e974d17ed3/raw?_schema=http",
"mimetype": "image/jpeg",
"media": "55b078b41d41c8e974d17ed3",
},
"viewImage": {
"width": 426,
"height": 640,
"href": "http://localhost:5000\
/api/upload/55b078b31d41c8e974d17ed1/raw?_schema=http",
"mimetype": "image/jpeg",
"media": "55b078b31d41c8e974d17ed1",
},
"baseImage": {
"width": 933,
"height": 1400,
"href": "http://localhost:5000\
/api/upload/55b078b31d41c8e974d17ecf/raw?_schema=http",
"mimetype": "image/jpeg",
"media": "55b078b31d41c8e974d17ecf",
},
},
"slugline": "ABC SHOP CLOSURES",
"type": "picture",
"guid": "urn:newsml:localhost:2015-07-24T15:04:29.589984:"
"af3bef9a-5002-492b-a15a-8b460e69b164",
"headline": "ABC SHOP CLOSURES",
"location": "archive",
"itemClass": "icls:picture",
"residRef": "urn:newsml:localhost:2015-07-24T15:04:29.589984:"
"af3bef9a-5002-492b-a15a-8b460e69b164",
}
],
"role": "grpRole:sidebars",
},
],
"description": "",
"operation": "update",
"sign_off": "mar",
"type": "composite",
"pubstatus": "usable",
"version_creator": "558379451d41c83ff598a3af",
"language": "en",
"guid": "urn:newsml:localhost:2015-07-24T15:05:00.116047:435c93c2-492c-4668-ab47-ae6e2b9b1c2c",
"unique_name": "#145",
"headline": "WA:Navy steps in with WA asylum-seeker boat",
"original_creator": "558379451d41c83ff598a3af",
"source": "AAP",
"_etag": "b41df79084304219524a092abf07ecba9e1bb2c5",
"slugline": "Boat",
"firstcreated": "2015-07-24T05:05:00.000Z",
"unique_id": 145,
"versioncreated": "2015-07-24T05:05:14.000Z",
"_updated": "2015-07-24T05:05:25.000Z",
"family_id": "urn:newsml:localhost:2015-07-24T15:05:00.116047:435c93c2-492c-4668-ab47-ae6e2b9b1c2c",
"_current_version": 2,
"_created": "2015-07-24T05:05:00.000Z",
"version": 2,
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
expected = {
"headline": "WA:Navy steps in with WA asylum-seeker boat",
"version": "2",
"guid": "urn:newsml:localhost:2015-07-24T15:05:00.116047:435c93c2-492c-4668-ab47-ae6e2b9b1c2c",
"associations": {
"main": {"guid": "tag:localhost:2015:515b895a-b336-48b2-a506-5ffaf561b916", "type": "text"},
"sidebars": {
"guid": "urn:newsml:localhost:2015-07-24T15:04:29.589984:af3bef9a-5002-492b-a15a-8b460e69b164",
"type": "picture",
},
},
"firstcreated": "2015-07-24T05:05:00.000Z",
"versioncreated": "2015-07-24T05:05:14.000Z",
"type": "composite",
"pubstatus": "usable",
"language": "en",
"priority": 5,
"slugline": "Boat",
"source": "AAP",
}
self.assertEqual(expected, json.loads(doc))
def test_item_with_usable_associations(self):
article = {
"_id": "urn:bar",
"guid": "urn:bar",
"_current_version": 1,
"type": "text",
"associations": {
"image": {
"_id": "urn:foo",
"guid": "urn:foo",
"pubstatus": "usable",
"headline": "Foo",
"type": "picture",
"task": {},
"copyrightholder": "Foo ltd.",
"description_text": "Foo picture",
"renditions": {
"original": {
"href": "http://example.com",
"width": 100,
"height": 80,
"mimetype": "image/jpeg",
"CropLeft": 0,
}
},
}
},
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
formatted = json.loads(doc)
self.assertIn("associations", formatted)
self.assertIn("image", formatted["associations"])
image = formatted["associations"]["image"]
self.assertEqual("urn:foo", image["guid"])
self.assertEqual("Foo", image["headline"])
self.assertEqual("usable", image["pubstatus"])
self.assertNotIn("task", image)
self.assertEqual("Foo ltd.", image["copyrightholder"])
self.assertEqual("Foo picture", image["description_text"])
rendition = image["renditions"]["original"]
self.assertEqual(100, rendition["width"])
self.assertEqual(80, rendition["height"])
self.assertEqual("image/jpeg", rendition["mimetype"])
self.assertNotIn("CropLeft", rendition)
def test_item_with_empty_associations(self):
article = {
"_id": "urn:bar",
"guid": "urn:bar",
"_current_version": 1,
"type": "text",
"associations": {"image": None},
}
_, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
formatted = json.loads(doc)
self.assertIn("associations", formatted)
self.assertNotIn("image", formatted["associations"])
def test_vidible_formatting(self):
article = {
"_id": "tag:aap.com.au:20150613:12345",
"guid": "tag:aap.com.au:20150613:12345",
"_current_version": 1,
"source": "AAP",
"headline": "This is a test headline",
"slugline": "slugline",
"unique_id": "1",
"body_html": "The story body",
"type": "text",
"state": "published",
"pubstatus": "usable",
"associations": {
"embedded5346670761": {
"uri": "56ba77bde4b0568f54a1ce68",
"alt_text": "alternative",
"copyrightholder": "Edouard",
"copyrightnotice": "Edited with Gimp",
"usageterms": "indefinite-usage",
"type": "video",
"title": "Embed title",
"company": "Press Association",
"url": "https://videos.vidible.tv/prod/2016-02/09/56ba777ce4b0b6448ed478f5_640x360.mp4",
"thumbnail": "https://cdn-ssl.vidible.tv/2016-02/09/56ba777ce4b0b6448ed478f5_60x60.jpg",
"duration": 100,
"width": 400,
"height": 200,
}
},
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
expected = {
"guid": "tag:aap.com.au:20150613:12345",
"version": "1",
"pubstatus": "usable",
"body_html": "The story body",
"type": "text",
"headline": "This is a test headline",
"slugline": "slugline",
"priority": 5,
"source": "AAP",
"charcount": 14,
"wordcount": 3,
"readtime": 0,
"associations": {
"embedded5346670761": {
"guid": "56ba77bde4b0568f54a1ce68",
"type": "video",
"version": "1",
"priority": 5,
"body_text": "alternative",
"copyrightholder": "Edouard",
"copyrightnotice": "Edited with Gimp",
"usageterms": "indefinite-usage",
"headline": "Embed title",
"organisation": [{"name": "Press Association"}],
"renditions": {
"original": {
"href": "https://videos.vidible.tv/prod/2016-02/09/56ba777ce4b0b6448ed478f5_640x360.mp4",
"duration": 100,
"width": 400,
"height": 200,
},
"thumbnail": {
"href": "https://cdn-ssl.vidible.tv/2016-02/09/56ba777ce4b0b6448ed478f5_60x60.jpg"
},
},
}
},
}
self.assertEqual(json.loads(doc), expected)
def test_copyright_holder_notice(self):
self.app.data.insert(
"vocabularies",
[
{
"_id": "rightsinfo",
"items": [
{
"is_active": True,
"name": "default",
"copyrightHolder": "copyright holder",
"copyrightNotice": "copyright notice",
"usageTerms": "",
}
],
}
],
)
article = {"_id": "urn:bar", "_current_version": 1, "guid": "urn:bar", "type": "text"}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual("copyright holder", data["copyrightholder"])
self.assertEqual("copyright notice", data["copyrightnotice"])
self.assertEqual("", data["usageterms"])
def test_body_html(self):
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"body_html": (250 * 6 - 40) * "word ",
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual(data["charcount"], 7300)
self.assertEqual(data["wordcount"], 1460)
self.assertEqual(data["readtime"], 6)
def test_body_text(self):
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"body_text": (250 * 7 - 40) * "word ",
}
data = self._format(article)
self.assertEqual(data["charcount"], 8550)
self.assertEqual(data["wordcount"], 1710)
self.assertEqual(data["readtime"], 7)
# check japanese
article["language"] = "ja"
article["body_text"] = 5000 * "x"
data = self._format(article)
self.assertEqual(data["readtime"], 8)
article["body_text"] = 5000 * " "
data = self._format(article)
self.assertEqual(data["readtime"], 0)
def _format(self, article):
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
return json.loads(doc)
def test_empty_amstract(self):
article = {"_id": "urn:bar", "_current_version": 1, "guid": "urn:bar", "type": "text", "abstract": ""}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual(data["description_html"], "")
self.assertEqual(data["description_text"], "")
def test_authors(self):
self.app.data.insert(
"users",
[
{
"_id": "test_id",
"username": "author 1",
"display_name": "author 1",
"is_author": True,
"job_title": "writer_code",
"biography": "bio 1",
"facebook": "johnsmith",
"twitter": "@smith_john",
"instagram": "john_s",
"picture_url": "http://example.com",
},
{
"_id": "test_id_2",
"username": "author 2",
"display_name": "author 2",
"is_author": True,
"job_title": "reporter_code",
"biography": "bio 2",
},
],
)
self.app.data.insert(
"vocabularies",
[
{
"_id": "job_titles",
"display_name": "Job Titles",
"type": "manageable",
"unique_field": "qcode",
"items": [
{"is_active": True, "name": "Writer", "qcode": "writer_code"},
{"is_active": True, "name": "Reporter", "qcode": "reporter_code"},
],
"schema": {"name": {}, "qcode": {}},
}
],
)
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"authors": [
{
"_id": ["test_id", "writer"],
"role": "writer",
"name": "Writer",
"parent": "test_id",
"sub_label": "author 1",
},
{
"_id": ["test_id_2", "writer"],
"role": "photographer",
"name": "photographer",
"parent": "test_id_2",
"sub_label": "author 2",
},
],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
expected = [
{
"name": "author 1",
"role": "writer",
"jobtitle": {"qcode": "writer_code", "name": "Writer"},
"biography": "bio 1",
"facebook": "johnsmith",
"twitter": "@smith_john",
"instagram": "john_s",
"avatar_url": "http://example.com",
},
{
"name": "author 2",
"role": "photographer",
"jobtitle": {"qcode": "reporter_code", "name": "Reporter"},
"biography": "bio 2",
},
]
self.assertEqual(data["authors"], expected)
def test_author_missing_parent(self):
"""Test that older items with missing parent don't make the formatter crashing"""
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"authors": [
{"_id": ["test_id", "writer"], "role": "writer", "name": "Writer", "sub_label": "author 1"},
{
"_id": ["test_id_2", "writer"],
"role": "photographer",
"name": "photographer",
"sub_label": "author 2",
},
],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
expected = {
"guid": "urn:bar",
"version": "1",
"type": "text",
"priority": 5,
"authors": [
{"name": "Writer", "role": "writer", "biography": ""},
{"name": "photographer", "role": "photographer", "biography": ""},
],
}
self.assertEqual(data, expected)
def test_place(self):
self.app.data.insert(
"vocabularies",
[
{
"_id": "locators",
"display_name": "Locators",
"type": "unmanageable",
"unique_field": "qcode",
"items": [
{
"is_active": True,
"name": "JPN",
"qcode": "JPN",
"state": "",
"country": "Japan",
"world_region": "Asia",
"group": "Rest Of World",
},
{"is_active": True, "name": "SAM", "qcode": "SAM", "group": "Rest Of World"},
{
"is_active": True,
"name": "UK",
"qcode": "UK",
"state": "",
"country": "",
"world_region": "Europe",
"group": "Rest Of World",
},
],
}
],
)
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"place": [{"name": "JPN", "qcode": "JPN"}],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual(data["place"], [{"code": "JPN", "name": "Japan"}])
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"place": [{"name": "SAM", "qcode": "SAM"}],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual(data["place"], [{"code": "SAM", "name": "Rest Of World"}])
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"place": [{"name": "UK", "qcode": "UK"}],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
data = json.loads(doc)
self.assertEqual(data["place"], [{"code": "UK", "name": "Europe"}])
def test_translations(self):
"""Check that fields are correctly translated"""
article = {
"_id": "5a68a134cc3a2d4bd6399177",
"type": "text",
"guid": "test",
"genre": [
{
"name": "Education",
"qcode": "genre_custom:Education",
"translations": {
"name": {"de": "Weiterbildung", "it": "Educazione finanziaria", "ja": "トレーニング用教材"}
},
"scheme": "genre_custom",
}
],
"language": "ja",
"headline": "test",
"body_html": "<p>test ter</p>",
"subject": [
{
"name": "Outcome orientated solutions",
"parent": "subject:01000000",
"qcode": "subject:01000002",
"translations": {
"name": {"de": "Ergebnisorientiert", "it": "Orientato ai risultati ", "ja": "アウトカム・オリエンティッド"}
},
"scheme": "subject_custom",
},
{
"name": "Austria",
"qcode": "country_custom:1001002",
"translations": {"name": {"de": "\u00d6sterreich", "it": "Austria", "ja": "オーストリア"}},
"scheme": "country_custom",
},
{
"name": "Asia ex Japan",
"qcode": "region_custom:Asia ex Japan",
"translations": {"name": {"de": "Asien exkl. Japan", "it": "Asia escl. Giappone", "ja": "日本除くアジア"}},
"scheme": "region_custom",
},
{"name": "no translations", "qcode": "test", "translations": None, "scheme": "test"},
],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
ninjs = json.loads(doc)
expected_genre = [{"code": "genre_custom:Education", "name": "トレーニング用教材", "scheme": "genre_custom"}]
self.assertEqual(ninjs["genre"], expected_genre)
expected_subject = [
{"code": "subject:01000002", "name": "アウトカム・オリエンティッド", "scheme": "subject_custom"},
{"code": "country_custom:1001002", "name": "オーストリア", "scheme": "country_custom"},
{"code": "region_custom:Asia ex Japan", "name": "日本除くアジア", "scheme": "region_custom"},
{"code": "test", "name": "no translations", "scheme": "test"},
]
self.assertEqual(ninjs["subject"], expected_subject)
def test_place_geonames(self):
article = {
"_id": "urn:bar",
"_current_version": 1,
"guid": "urn:bar",
"type": "text",
"place": [
{
"name": "Kobeřice",
"code": "3073493",
"scheme": "geonames",
"state": "Moravskoslezský kraj",
"country": "Česko",
"state_code": "80",
"country_code": "CZ",
"location": {"lat": 49.98548, "lon": 18.05212},
}
],
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
ninjs = json.loads(doc)
self.assertEqual({"name": "Kobeřice", "code": "3073493", "scheme": "geonames"}, ninjs["place"][0])
def test_custom_media(self):
"""Test that custom media are put in "groups" field and not associations (SDESK-2955)"""
self.app.data.insert(
"content_types",
[
{
"_id": ObjectId("5ba11fec0d6f1301ac3cbd13"),
"label": "custom media field multi",
"editor": {
"slugline": {"order": 2, "sdWidth": "full"},
"headline": {"order": 3, "formatOptions": ["underline", "link", "bold"]},
"custom_media_field_multi_1": {"order": 1},
},
"schema": {
"headline": {"type": "string", "required": False, "maxlength": 64, "nullable": True},
"slugline": {"type": "string", "required": False, "maxlength": 24, "nullable": True},
"custom_media_field_multi_1": {
"type": "media",
"required": False,
"enabled": True,
"nullable": True,
},
},
}
],
)
article = {
"_id": "5ba1224e0d6f13056bd82d50",
"type": "text",
"version": 1,
"profile": "5ba11fec0d6f1301ac3cbd13",
"format": "HTML",
"template": "5ba11fec0d6f1301ac3cbd15",
"headline": "custom media field multi",
"slugline": "test custom media2",
"guid": "123",
"associations": {
"custom_media_field_multi_1--1": {
"renditions": {
"original": {
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
"height": 331,
}
},
"media": "abc",
"type": "picture",
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
},
"custom_media_field_multi_1--2": {
"renditions": {
"original": {
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
"height": 331,
}
},
"media": "cde",
"type": "picture",
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
},
},
}
expected = {
"associations": {
"custom_media_field_multi_1--1": {
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
"custom_media_field_multi_1--2": {
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
},
"extra_items": {
"custom_media_field_multi_1": {
"type": "media",
"items": [
{
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
{
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
]
}
},
"guid": "123",
"headline": "custom media field multi",
"priority": 5,
"profile": "custommediafieldmulti",
"slugline": "test custom media2",
"type": "text",
"version": "1",
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
ninjs = json.loads(doc)
self.assertEqual(ninjs, expected)
def test_custom_related_items(self):
self.app.data.insert(
"content_types",
[
{
"_id": ObjectId("5ba11fec0d6f1301ac3cbd13"),
"label": "custom related content",
"editor": {
"slugline": {"order": 2, "sdWidth": "full"},
"headline": {"order": 3, "formatOptions": ["underline", "link", "bold"]},
"custom_related_content": {"order": 1},
},
"schema": {
"headline": {"type": "string", "required": False, "maxlength": 64, "nullable": True},
"slugline": {"type": "string", "required": False, "maxlength": 24, "nullable": True},
"custom_related_content": {
"type": "related_content",
"required": False,
"enabled": True,
"nullable": True,
},
},
}
],
)
article = {
"_id": "5ba1224e0d6f13056bd82d50",
"type": "text",
"version": 1,
"profile": "5ba11fec0d6f1301ac3cbd13",
"format": "HTML",
"template": "5ba11fec0d6f1301ac3cbd15",
"headline": "custom related content",
"slugline": "test custom related content",
"guid": "123",
"associations": {
"custom_related_content--1": {
"renditions": {
"original": {
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
"height": 331,
}
},
"media": "abc",
"type": "picture",
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
},
"custom_related_content--2": {
"renditions": {
"original": {
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
"height": 331,
}
},
"media": "cde",
"type": "picture",
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
},
},
}
expected = {
"associations": {
"custom_related_content--1": {
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
"custom_related_content--2": {
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
},
"extra_items": {
"custom_related_content": {
"type": "related_content",
"items": [
{
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/123.jpg",
"media": "abc",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
{
"guid": "tag:localhost:5000:2018:3710ef88-9567-4dbb-a96b-cb53df15b66e",
"priority": 5,
"renditions": {
"original": {
"height": 331,
"href": "http://localhost:5000/api/upload-raw/456.jpg",
"media": "cde",
"mimetype": "image/jpeg",
"width": 550,
}
},
"type": "picture",
"version": "1",
},
]
}
},
"guid": "123",
"headline": "custom related content",
"priority": 5,
"profile": "customrelatedcontent",
"slugline": "test custom related content",
"type": "text",
"version": "1",
}
seq, doc = self.formatter.format(article, {"name": "Test Subscriber"})[0]
ninjs = json.loads(doc)
self.assertEqual(ninjs, expected)
| agpl-3.0 |
zengenti/ansible | lib/ansible/utils/module_docs_fragments/dellos9.py | 42 | 3407 | #
# (c) 2015, Peter Sprygada <[email protected]>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
required: false
default: 22
username:
description:
- User to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Password to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
ssh_keyfile:
description:
- Path to an ssh key used to authenticate the SSH session to the remote
device. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
required: false
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
required: false
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
required: false
default: none
timeout:
description:
- Specifies idle timeout (in seconds) for the connection. Useful if the
console freezes before continuing. For example when saving
configurations.
required: false
default: 10
provider:
description:
- Convenience method that allows all I(dellos9) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
| gpl-3.0 |
dag/genshi | genshi/filters/transform.py | 23 | 48218 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""A filter for functional-style transformations of markup streams.
The `Transformer` filter provides a variety of transformations that can be
applied to parts of streams that match given XPath expressions. These
transformations can be chained to achieve results that would be comparitively
tedious to achieve by writing stream filters by hand. The approach of chaining
node selection and transformation has been inspired by the `jQuery`_ Javascript
library.
.. _`jQuery`: http://jquery.com/
For example, the following transformation removes the ``<title>`` element from
the ``<head>`` of the input document:
>>> from genshi.builder import tag
>>> html = HTML('''<html>
... <head><title>Some Title</title></head>
... <body>
... Some <em>body</em> text.
... </body>
... </html>''',
... encoding='utf-8')
>>> print(html | Transformer('body/em').map(unicode.upper, TEXT)
... .unwrap().wrap(tag.u))
<html>
<head><title>Some Title</title></head>
<body>
Some <u>BODY</u> text.
</body>
</html>
The ``Transformer`` support a large number of useful transformations out of the
box, but custom transformations can be added easily.
:since: version 0.5
"""
import re
import sys
from genshi.builder import Element
from genshi.core import Stream, Attrs, QName, TEXT, START, END, _ensure, Markup
from genshi.path import Path
__all__ = ['Transformer', 'StreamBuffer', 'InjectorTransformation', 'ENTER',
'EXIT', 'INSIDE', 'OUTSIDE', 'BREAK']
class TransformMark(str):
"""A mark on a transformation stream."""
__slots__ = []
_instances = {}
def __new__(cls, val):
return cls._instances.setdefault(val, str.__new__(cls, val))
ENTER = TransformMark('ENTER')
"""Stream augmentation mark indicating that a selected element is being
entered."""
INSIDE = TransformMark('INSIDE')
"""Stream augmentation mark indicating that processing is currently inside a
selected element."""
OUTSIDE = TransformMark('OUTSIDE')
"""Stream augmentation mark indicating that a match occurred outside a selected
element."""
ATTR = TransformMark('ATTR')
"""Stream augmentation mark indicating a selected element attribute."""
EXIT = TransformMark('EXIT')
"""Stream augmentation mark indicating that a selected element is being
exited."""
BREAK = TransformMark('BREAK')
"""Stream augmentation mark indicating a break between two otherwise contiguous
blocks of marked events.
This is used primarily by the cut() transform to provide later transforms with
an opportunity to operate on the cut buffer.
"""
class PushBackStream(object):
"""Allows a single event to be pushed back onto the stream and re-consumed.
"""
def __init__(self, stream):
self.stream = iter(stream)
self.peek = None
def push(self, event):
assert self.peek is None
self.peek = event
def __iter__(self):
while True:
if self.peek is not None:
peek = self.peek
self.peek = None
yield peek
else:
try:
event = self.stream.next()
yield event
except StopIteration:
if self.peek is None:
raise
class Transformer(object):
"""Stream filter that can apply a variety of different transformations to
a stream.
This is achieved by selecting the events to be transformed using XPath,
then applying the transformations to the events matched by the path
expression. Each marked event is in the form (mark, (kind, data, pos)),
where mark can be any of `ENTER`, `INSIDE`, `EXIT`, `OUTSIDE`, or `None`.
The first three marks match `START` and `END` events, and any events
contained `INSIDE` any selected XML/HTML element. A non-element match
outside a `START`/`END` container (e.g. ``text()``) will yield an `OUTSIDE`
mark.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
Transformations act on selected stream events matching an XPath expression.
Here's an example of removing some markup (the title, in this case)
selected by an expression:
>>> print(html | Transformer('head/title').remove())
<html><head/><body>Some <em>body</em> text.</body></html>
Inserted content can be passed in the form of a string, or a markup event
stream, which includes streams generated programmatically via the
`builder` module:
>>> from genshi.builder import tag
>>> print(html | Transformer('body').prepend(tag.h1('Document Title')))
<html><head><title>Some Title</title></head><body><h1>Document
Title</h1>Some <em>body</em> text.</body></html>
Each XPath expression determines the set of tags that will be acted upon by
subsequent transformations. In this example we select the ``<title>`` text,
copy it into a buffer, then select the ``<body>`` element and paste the
copied text into the body as ``<h1>`` enclosed text:
>>> buffer = StreamBuffer()
>>> print(html | Transformer('head/title/text()').copy(buffer)
... .end().select('body').prepend(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body><h1>Some Title</h1>Some
<em>body</em> text.</body></html>
Transformations can also be assigned and reused, although care must be
taken when using buffers, to ensure that buffers are cleared between
transforms:
>>> emphasis = Transformer('body//em').attr('class', 'emphasis')
>>> print(html | emphasis)
<html><head><title>Some Title</title></head><body>Some <em
class="emphasis">body</em> text.</body></html>
"""
__slots__ = ['transforms']
def __init__(self, path='.'):
"""Construct a new transformation filter.
:param path: an XPath expression (as string) or a `Path` instance
"""
self.transforms = [SelectTransformation(path)]
def __call__(self, stream, keep_marks=False):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
:param keep_marks: Do not strip transformer selection marks from the
stream. Useful for testing.
:return: the transformed stream
:rtype: `Stream`
"""
transforms = self._mark(stream)
for link in self.transforms:
transforms = link(transforms)
if not keep_marks:
transforms = self._unmark(transforms)
return Stream(transforms,
serializer=getattr(stream, 'serializer', None))
def apply(self, function):
"""Apply a transformation to the stream.
Transformations can be chained, similar to stream filters. Any callable
accepting a marked stream can be used as a transform.
As an example, here is a simple `TEXT` event upper-casing transform:
>>> def upper(stream):
... for mark, (kind, data, pos) in stream:
... if mark and kind is TEXT:
... yield mark, (kind, data.upper(), pos)
... else:
... yield mark, (kind, data, pos)
>>> short_stream = HTML('<body>Some <em>test</em> text</body>',
... encoding='utf-8')
>>> print(short_stream | Transformer('.//em/text()').apply(upper))
<body>Some <em>TEST</em> text</body>
"""
transformer = Transformer()
transformer.transforms = self.transforms[:]
if isinstance(function, Transformer):
transformer.transforms.extend(function.transforms)
else:
transformer.transforms.append(function)
return transformer
#{ Selection operations
def select(self, path):
"""Mark events matching the given XPath expression, within the current
selection.
>>> html = HTML('<body>Some <em>test</em> text</body>', encoding='utf-8')
>>> print(html | Transformer().select('.//em').trace())
(None, ('START', (QName('body'), Attrs()), (None, 1, 0)))
(None, ('TEXT', u'Some ', (None, 1, 6)))
('ENTER', ('START', (QName('em'), Attrs()), (None, 1, 11)))
('INSIDE', ('TEXT', u'test', (None, 1, 15)))
('EXIT', ('END', QName('em'), (None, 1, 19)))
(None, ('TEXT', u' text', (None, 1, 24)))
(None, ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:param path: an XPath expression (as string) or a `Path` instance
:return: the stream augmented by transformation marks
:rtype: `Transformer`
"""
return self.apply(SelectTransformation(path))
def invert(self):
"""Invert selection so that marked events become unmarked, and vice
versa.
Specificaly, all marks are converted to null marks, and all null marks
are converted to OUTSIDE marks.
>>> html = HTML('<body>Some <em>test</em> text</body>', encoding='utf-8')
>>> print(html | Transformer('//em').invert().trace())
('OUTSIDE', ('START', (QName('body'), Attrs()), (None, 1, 0)))
('OUTSIDE', ('TEXT', u'Some ', (None, 1, 6)))
(None, ('START', (QName('em'), Attrs()), (None, 1, 11)))
(None, ('TEXT', u'test', (None, 1, 15)))
(None, ('END', QName('em'), (None, 1, 19)))
('OUTSIDE', ('TEXT', u' text', (None, 1, 24)))
('OUTSIDE', ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:rtype: `Transformer`
"""
return self.apply(InvertTransformation())
def end(self):
"""End current selection, allowing all events to be selected.
Example:
>>> html = HTML('<body>Some <em>test</em> text</body>', encoding='utf-8')
>>> print(html | Transformer('//em').end().trace())
('OUTSIDE', ('START', (QName('body'), Attrs()), (None, 1, 0)))
('OUTSIDE', ('TEXT', u'Some ', (None, 1, 6)))
('OUTSIDE', ('START', (QName('em'), Attrs()), (None, 1, 11)))
('OUTSIDE', ('TEXT', u'test', (None, 1, 15)))
('OUTSIDE', ('END', QName('em'), (None, 1, 19)))
('OUTSIDE', ('TEXT', u' text', (None, 1, 24)))
('OUTSIDE', ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:return: the stream augmented by transformation marks
:rtype: `Transformer`
"""
return self.apply(EndTransformation())
#{ Deletion operations
def empty(self):
"""Empty selected elements of all content.
Example:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').empty())
<html><head><title>Some Title</title></head><body>Some <em/>
text.</body></html>
:rtype: `Transformer`
"""
return self.apply(EmptyTransformation())
def remove(self):
"""Remove selection from the stream.
Example:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').remove())
<html><head><title>Some Title</title></head><body>Some
text.</body></html>
:rtype: `Transformer`
"""
return self.apply(RemoveTransformation())
#{ Direct element operations
def unwrap(self):
"""Remove outermost enclosing elements from selection.
Example:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').unwrap())
<html><head><title>Some Title</title></head><body>Some body
text.</body></html>
:rtype: `Transformer`
"""
return self.apply(UnwrapTransformation())
def wrap(self, element):
"""Wrap selection in an element.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').wrap('strong'))
<html><head><title>Some Title</title></head><body>Some
<strong><em>body</em></strong> text.</body></html>
:param element: either a tag name (as string) or an `Element` object
:rtype: `Transformer`
"""
return self.apply(WrapTransformation(element))
#{ Content insertion operations
def replace(self, content):
"""Replace selection with content.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//title/text()').replace('New Title'))
<html><head><title>New Title</title></head><body>Some <em>body</em>
text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(ReplaceTransformation(content))
def before(self, content):
"""Insert content before selection.
In this example we insert the word 'emphasised' before the <em> opening
tag:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').before('emphasised '))
<html><head><title>Some Title</title></head><body>Some emphasised
<em>body</em> text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(BeforeTransformation(content))
def after(self, content):
"""Insert content after selection.
Here, we insert some text after the </em> closing tag:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em').after(' rock'))
<html><head><title>Some Title</title></head><body>Some <em>body</em>
rock text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(AfterTransformation(content))
def prepend(self, content):
"""Insert content after the ENTER event of the selection.
Inserting some new text at the start of the <body>:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//body').prepend('Some new body text. '))
<html><head><title>Some Title</title></head><body>Some new body text.
Some <em>body</em> text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(PrependTransformation(content))
def append(self, content):
"""Insert content before the END event of the selection.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//body').append(' Some new body text.'))
<html><head><title>Some Title</title></head><body>Some <em>body</em>
text. Some new body text.</body></html>
:param content: Either a callable, an iterable of events, or a string
to insert.
:rtype: `Transformer`
"""
return self.apply(AppendTransformation(content))
#{ Attribute manipulation
def attr(self, name, value):
"""Add, replace or delete an attribute on selected elements.
If `value` evaulates to `None` the attribute will be deleted from the
element:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em class="before">body</em> <em>text</em>.</body>'
... '</html>', encoding='utf-8')
>>> print(html | Transformer('body/em').attr('class', None))
<html><head><title>Some Title</title></head><body>Some <em>body</em>
<em>text</em>.</body></html>
Otherwise the attribute will be set to `value`:
>>> print(html | Transformer('body/em').attr('class', 'emphasis'))
<html><head><title>Some Title</title></head><body>Some <em
class="emphasis">body</em> <em class="emphasis">text</em>.</body></html>
If `value` is a callable it will be called with the attribute name and
the `START` event for the matching element. Its return value will then
be used to set the attribute:
>>> def print_attr(name, event):
... attrs = event[1][1]
... print(attrs)
... return attrs.get(name)
>>> print(html | Transformer('body/em').attr('class', print_attr))
Attrs([(QName('class'), u'before')])
Attrs()
<html><head><title>Some Title</title></head><body>Some <em
class="before">body</em> <em>text</em>.</body></html>
:param name: the name of the attribute
:param value: the value that should be set for the attribute.
:rtype: `Transformer`
"""
return self.apply(AttrTransformation(name, value))
#{ Buffer operations
def copy(self, buffer, accumulate=False):
"""Copy selection into buffer.
The buffer is replaced by each *contiguous* selection before being passed
to the next transformation. If accumulate=True, further selections will
be appended to the buffer rather than replacing it.
>>> from genshi.builder import tag
>>> buffer = StreamBuffer()
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('head/title/text()').copy(buffer)
... .end().select('body').prepend(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body><h1>Some
Title</h1>Some <em>body</em> text.</body></html>
This example illustrates that only a single contiguous selection will
be buffered:
>>> print(html | Transformer('head/title/text()').copy(buffer)
... .end().select('body/em').copy(buffer).end().select('body')
... .prepend(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body><h1>Some
Title</h1>Some <em>body</em> text.</body></html>
>>> print(buffer)
<em>body</em>
Element attributes can also be copied for later use:
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body><em>Some</em> <em class="before">body</em>'
... '<em>text</em>.</body></html>',
... encoding='utf-8')
>>> buffer = StreamBuffer()
>>> def apply_attr(name, entry):
... return list(buffer)[0][1][1].get('class')
>>> print(html | Transformer('body/em[@class]/@class').copy(buffer)
... .end().buffer().select('body/em[not(@class)]')
... .attr('class', apply_attr))
<html><head><title>Some Title</title></head><body><em
class="before">Some</em> <em class="before">body</em><em
class="before">text</em>.</body></html>
:param buffer: the `StreamBuffer` in which the selection should be
stored
:rtype: `Transformer`
:note: Copy (and cut) copy each individual selected object into the
buffer before passing to the next transform. For example, the
XPath ``*|text()`` will select all elements and text, each
instance of which will be copied to the buffer individually
before passing to the next transform. This has implications for
how ``StreamBuffer`` objects can be used, so some
experimentation may be required.
"""
return self.apply(CopyTransformation(buffer, accumulate))
def cut(self, buffer, accumulate=False):
"""Copy selection into buffer and remove the selection from the stream.
>>> from genshi.builder import tag
>>> buffer = StreamBuffer()
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('.//em/text()').cut(buffer)
... .end().select('.//em').after(tag.h1(buffer)))
<html><head><title>Some Title</title></head><body>Some
<em/><h1>body</h1> text.</body></html>
Specifying accumulate=True, appends all selected intervals onto the
buffer. Combining this with the .buffer() operation allows us operate
on all copied events rather than per-segment. See the documentation on
buffer() for more information.
:param buffer: the `StreamBuffer` in which the selection should be
stored
:rtype: `Transformer`
:note: this transformation will buffer the entire input stream
"""
return self.apply(CutTransformation(buffer, accumulate))
def buffer(self):
"""Buffer the entire stream (can consume a considerable amount of
memory).
Useful in conjunction with copy(accumulate=True) and
cut(accumulate=True) to ensure that all marked events in the entire
stream are copied to the buffer before further transformations are
applied.
For example, to move all <note> elements inside a <notes> tag at the
top of the document:
>>> doc = HTML('<doc><notes></notes><body>Some <note>one</note> '
... 'text <note>two</note>.</body></doc>',
... encoding='utf-8')
>>> buffer = StreamBuffer()
>>> print(doc | Transformer('body/note').cut(buffer, accumulate=True)
... .end().buffer().select('notes').prepend(buffer))
<doc><notes><note>one</note><note>two</note></notes><body>Some text
.</body></doc>
"""
return self.apply(list)
#{ Miscellaneous operations
def filter(self, filter):
"""Apply a normal stream filter to the selection. The filter is called
once for each contiguous block of marked events.
>>> from genshi.filters.html import HTMLSanitizer
>>> html = HTML('<html><body>Some text<script>alert(document.cookie)'
... '</script> and some more text</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('body/*').filter(HTMLSanitizer()))
<html><body>Some text and some more text</body></html>
:param filter: The stream filter to apply.
:rtype: `Transformer`
"""
return self.apply(FilterTransformation(filter))
def map(self, function, kind):
"""Applies a function to the ``data`` element of events of ``kind`` in
the selection.
>>> html = HTML('<html><head><title>Some Title</title></head>'
... '<body>Some <em>body</em> text.</body></html>',
... encoding='utf-8')
>>> print(html | Transformer('head/title').map(unicode.upper, TEXT))
<html><head><title>SOME TITLE</title></head><body>Some <em>body</em>
text.</body></html>
:param function: the function to apply
:param kind: the kind of event the function should be applied to
:rtype: `Transformer`
"""
return self.apply(MapTransformation(function, kind))
def substitute(self, pattern, replace, count=1):
"""Replace text matching a regular expression.
Refer to the documentation for ``re.sub()`` for details.
>>> html = HTML('<html><body>Some text, some more text and '
... '<b>some bold text</b>\\n'
... '<i>some italicised text</i></body></html>',
... encoding='utf-8')
>>> print(html | Transformer('body/b').substitute('(?i)some', 'SOME'))
<html><body>Some text, some more text and <b>SOME bold text</b>
<i>some italicised text</i></body></html>
>>> tags = tag.html(tag.body('Some text, some more text and\\n',
... Markup('<b>some bold text</b>')))
>>> print(tags.generate() | Transformer('body').substitute(
... '(?i)some', 'SOME'))
<html><body>SOME text, some more text and
<b>SOME bold text</b></body></html>
:param pattern: A regular expression object or string.
:param replace: Replacement pattern.
:param count: Number of replacements to make in each text fragment.
:rtype: `Transformer`
"""
return self.apply(SubstituteTransformation(pattern, replace, count))
def rename(self, name):
"""Rename matching elements.
>>> html = HTML('<html><body>Some text, some more text and '
... '<b>some bold text</b></body></html>',
... encoding='utf-8')
>>> print(html | Transformer('body/b').rename('strong'))
<html><body>Some text, some more text and <strong>some bold text</strong></body></html>
"""
return self.apply(RenameTransformation(name))
def trace(self, prefix='', fileobj=None):
"""Print events as they pass through the transform.
>>> html = HTML('<body>Some <em>test</em> text</body>', encoding='utf-8')
>>> print(html | Transformer('em').trace())
(None, ('START', (QName('body'), Attrs()), (None, 1, 0)))
(None, ('TEXT', u'Some ', (None, 1, 6)))
('ENTER', ('START', (QName('em'), Attrs()), (None, 1, 11)))
('INSIDE', ('TEXT', u'test', (None, 1, 15)))
('EXIT', ('END', QName('em'), (None, 1, 19)))
(None, ('TEXT', u' text', (None, 1, 24)))
(None, ('END', QName('body'), (None, 1, 29)))
<body>Some <em>test</em> text</body>
:param prefix: a string to prefix each event with in the output
:param fileobj: the writable file-like object to write to; defaults to
the standard output stream
:rtype: `Transformer`
"""
return self.apply(TraceTransformation(prefix, fileobj=fileobj))
# Internal methods
def _mark(self, stream):
for event in stream:
yield OUTSIDE, event
def _unmark(self, stream):
for mark, event in stream:
kind = event[0]
if not (kind is None or kind is ATTR or kind is BREAK):
yield event
class SelectTransformation(object):
"""Select and mark events that match an XPath expression."""
def __init__(self, path):
"""Create selection.
:param path: an XPath expression (as string) or a `Path` object
"""
if not isinstance(path, Path):
path = Path(path)
self.path = path
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
namespaces = {}
variables = {}
test = self.path.test()
stream = iter(stream)
next = stream.next
for mark, event in stream:
if mark is None:
yield mark, event
continue
result = test(event, namespaces, variables)
# XXX This is effectively genshi.core._ensure() for transform
# streams.
if result is True:
if event[0] is START:
yield ENTER, event
depth = 1
while depth > 0:
mark, subevent = next()
if subevent[0] is START:
depth += 1
elif subevent[0] is END:
depth -= 1
if depth == 0:
yield EXIT, subevent
else:
yield INSIDE, subevent
test(subevent, namespaces, variables, updateonly=True)
else:
yield OUTSIDE, event
elif isinstance(result, Attrs):
# XXX Selected *attributes* are given a "kind" of None to
# indicate they are not really part of the stream.
yield ATTR, (ATTR, (QName(event[1][0] + '@*'), result), event[2])
yield None, event
elif isinstance(result, tuple):
yield OUTSIDE, result
elif result:
# XXX Assume everything else is "text"?
yield None, (TEXT, unicode(result), (None, -1, -1))
else:
yield None, event
class InvertTransformation(object):
"""Invert selection so that marked events become unmarked, and vice versa.
Specificaly, all input marks are converted to null marks, and all input
null marks are converted to OUTSIDE marks.
"""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
if mark:
yield None, event
else:
yield OUTSIDE, event
class EndTransformation(object):
"""End the current selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
yield OUTSIDE, event
class EmptyTransformation(object):
"""Empty selected elements of all content."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
yield mark, event
if mark is ENTER:
for mark, event in stream:
if mark is EXIT:
yield mark, event
break
class RemoveTransformation(object):
"""Remove selection from the stream."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
if mark is None:
yield mark, event
class UnwrapTransformation(object):
"""Remove outtermost enclosing elements from selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for mark, event in stream:
if mark not in (ENTER, EXIT):
yield mark, event
class WrapTransformation(object):
"""Wrap selection in an element."""
def __init__(self, element):
if isinstance(element, Element):
self.element = element
else:
self.element = Element(element)
def __call__(self, stream):
for mark, event in stream:
if mark:
element = list(self.element.generate())
for prefix in element[:-1]:
yield None, prefix
yield mark, event
start = mark
stopped = False
for mark, event in stream:
if start is ENTER and mark is EXIT:
yield mark, event
stopped = True
break
if not mark:
break
yield mark, event
else:
stopped = True
yield None, element[-1]
if not stopped:
yield mark, event
else:
yield mark, event
class TraceTransformation(object):
"""Print events as they pass through the transform."""
def __init__(self, prefix='', fileobj=None):
"""Trace constructor.
:param prefix: text to prefix each traced line with.
:param fileobj: the writable file-like object to write to
"""
self.prefix = prefix
self.fileobj = fileobj or sys.stdout
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
for event in stream:
self.fileobj.write('%s%s\n' % (self.prefix, event))
yield event
class FilterTransformation(object):
"""Apply a normal stream filter to the selection. The filter is called once
for each selection."""
def __init__(self, filter):
"""Create the transform.
:param filter: The stream filter to apply.
"""
self.filter = filter
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
def flush(queue):
if queue:
for event in self.filter(queue):
yield OUTSIDE, event
del queue[:]
queue = []
for mark, event in stream:
if mark is ENTER:
queue.append(event)
for mark, event in stream:
queue.append(event)
if mark is EXIT:
break
for queue_event in flush(queue):
yield queue_event
elif mark is OUTSIDE:
stopped = False
queue.append(event)
for mark, event in stream:
if mark is not OUTSIDE:
break
queue.append(event)
else:
stopped = True
for queue_event in flush(queue):
yield queue_event
if not stopped:
yield mark, event
else:
yield mark, event
for queue_event in flush(queue):
yield queue_event
class MapTransformation(object):
"""Apply a function to the `data` element of events of ``kind`` in the
selection.
"""
def __init__(self, function, kind):
"""Create the transform.
:param function: the function to apply; the function must take one
argument, the `data` element of each selected event
:param kind: the stream event ``kind`` to apply the `function` to
"""
self.function = function
self.kind = kind
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, (kind, data, pos) in stream:
if mark and self.kind in (None, kind):
yield mark, (kind, self.function(data), pos)
else:
yield mark, (kind, data, pos)
class SubstituteTransformation(object):
"""Replace text matching a regular expression.
Refer to the documentation for ``re.sub()`` for details.
"""
def __init__(self, pattern, replace, count=0):
"""Create the transform.
:param pattern: A regular expression object, or string.
:param replace: Replacement pattern.
:param count: Number of replacements to make in each text fragment.
"""
if isinstance(pattern, basestring):
self.pattern = re.compile(pattern)
else:
self.pattern = pattern
self.count = count
self.replace = replace
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, (kind, data, pos) in stream:
if mark is not None and kind is TEXT:
new_data = self.pattern.sub(self.replace, data, self.count)
if isinstance(data, Markup):
data = Markup(new_data)
else:
data = new_data
yield mark, (kind, data, pos)
class RenameTransformation(object):
"""Rename matching elements."""
def __init__(self, name):
"""Create the transform.
:param name: New element name.
"""
self.name = QName(name)
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, (kind, data, pos) in stream:
if mark is ENTER:
data = self.name, data[1]
elif mark is EXIT:
data = self.name
yield mark, (kind, data, pos)
class InjectorTransformation(object):
"""Abstract base class for transformations that inject content into a
stream.
>>> class Top(InjectorTransformation):
... def __call__(self, stream):
... for event in self._inject():
... yield event
... for event in stream:
... yield event
>>> html = HTML('<body>Some <em>test</em> text</body>', encoding='utf-8')
>>> print(html | Transformer('.//em').apply(Top('Prefix ')))
Prefix <body>Some <em>test</em> text</body>
"""
def __init__(self, content):
"""Create a new injector.
:param content: An iterable of Genshi stream events, or a string to be
injected.
"""
self.content = content
def _inject(self):
content = self.content
if hasattr(content, '__call__'):
content = content()
for event in _ensure(content):
yield None, event
class ReplaceTransformation(InjectorTransformation):
"""Replace selection with content."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
if mark is not None:
start = mark
for subevent in self._inject():
yield subevent
for mark, event in stream:
if start is ENTER:
if mark is EXIT:
break
elif mark != start:
stream.push((mark, event))
break
else:
yield mark, event
class BeforeTransformation(InjectorTransformation):
"""Insert content before selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
if mark is not None:
start = mark
for subevent in self._inject():
yield subevent
yield mark, event
for mark, event in stream:
if mark != start and start is not ENTER:
stream.push((mark, event))
break
yield mark, event
if start is ENTER and mark is EXIT:
break
else:
yield mark, event
class AfterTransformation(InjectorTransformation):
"""Insert content after selection."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
yield mark, event
if mark:
start = mark
for mark, event in stream:
if start is not ENTER and mark != start:
stream.push((mark, event))
break
yield mark, event
if start is ENTER and mark is EXIT:
break
for subevent in self._inject():
yield subevent
class PrependTransformation(InjectorTransformation):
"""Prepend content to the inside of selected elements."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, event in stream:
yield mark, event
if mark is ENTER:
for subevent in self._inject():
yield subevent
class AppendTransformation(InjectorTransformation):
"""Append content after the content of selected elements."""
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
for mark, event in stream:
yield mark, event
if mark is ENTER:
for mark, event in stream:
if mark is EXIT:
break
yield mark, event
for subevent in self._inject():
yield subevent
yield mark, event
class AttrTransformation(object):
"""Set an attribute on selected elements."""
def __init__(self, name, value):
"""Construct transform.
:param name: name of the attribute that should be set
:param value: the value to set
"""
self.name = name
self.value = value
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: The marked event stream to filter
"""
callable_value = hasattr(self.value, '__call__')
for mark, (kind, data, pos) in stream:
if mark is ENTER:
if callable_value:
value = self.value(self.name, (kind, data, pos))
else:
value = self.value
if value is None:
attrs = data[1] - [QName(self.name)]
else:
attrs = data[1] | [(QName(self.name), value)]
data = (data[0], attrs)
yield mark, (kind, data, pos)
class StreamBuffer(Stream):
"""Stream event buffer used for cut and copy transformations."""
def __init__(self):
"""Create the buffer."""
Stream.__init__(self, [])
def append(self, event):
"""Add an event to the buffer.
:param event: the markup event to add
"""
self.events.append(event)
def reset(self):
"""Empty the buffer of events."""
del self.events[:]
class CopyTransformation(object):
"""Copy selected events into a buffer for later insertion."""
def __init__(self, buffer, accumulate=False):
"""Create the copy transformation.
:param buffer: the `StreamBuffer` in which the selection should be
stored
"""
if not accumulate:
buffer.reset()
self.buffer = buffer
self.accumulate = accumulate
def __call__(self, stream):
"""Apply the transformation to the marked stream.
:param stream: the marked event stream to filter
"""
stream = PushBackStream(stream)
for mark, event in stream:
if mark:
if not self.accumulate:
self.buffer.reset()
events = [(mark, event)]
self.buffer.append(event)
start = mark
for mark, event in stream:
if start is not ENTER and mark != start:
stream.push((mark, event))
break
events.append((mark, event))
self.buffer.append(event)
if start is ENTER and mark is EXIT:
break
for i in events:
yield i
else:
yield mark, event
class CutTransformation(object):
"""Cut selected events into a buffer for later insertion and remove the
selection.
"""
def __init__(self, buffer, accumulate=False):
"""Create the cut transformation.
:param buffer: the `StreamBuffer` in which the selection should be
stored
"""
self.buffer = buffer
self.accumulate = accumulate
def __call__(self, stream):
"""Apply the transform filter to the marked stream.
:param stream: the marked event stream to filter
"""
attributes = []
stream = PushBackStream(stream)
broken = False
if not self.accumulate:
self.buffer.reset()
for mark, event in stream:
if mark:
# Send a BREAK event if there was no other event sent between
if not self.accumulate:
if not broken and self.buffer:
yield BREAK, (BREAK, None, None)
self.buffer.reset()
self.buffer.append(event)
start = mark
if mark is ATTR:
attributes.extend([name for name, _ in event[1][1]])
for mark, event in stream:
if start is mark is ATTR:
attributes.extend([name for name, _ in event[1][1]])
# Handle non-element contiguous selection
if start is not ENTER and mark != start:
# Operating on the attributes of a START event
if start is ATTR:
kind, data, pos = event
assert kind is START
data = (data[0], data[1] - attributes)
attributes = None
stream.push((mark, (kind, data, pos)))
else:
stream.push((mark, event))
break
self.buffer.append(event)
if start is ENTER and mark is EXIT:
break
broken = False
else:
broken = True
yield mark, event
if not broken and self.buffer:
yield BREAK, (BREAK, None, None)
| bsd-3-clause |
silentfuzzle/calibre | src/chardet/sjisprober.py | 190 | 3549 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import SJISDistributionAnalysis
from jpcntx import SJISContextAnalysis
from mbcssm import SJISSMModel
import constants, sys
from constants import eStart, eError, eItsMe
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen :], charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen : i + 3 - charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1 : i + 1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mContextAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-3.0 |
sunzuolei/youtube-dl | youtube_dl/extractor/blinkx.py | 199 | 3217 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
remove_start,
int_or_none,
)
class BlinkxIE(InfoExtractor):
_VALID_URL = r'(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
IE_NAME = 'blinkx'
_TEST = {
'url': 'http://www.blinkx.com/ce/Da0Gw3xc5ucpNduzLuDDlv4WC9PuI4fDi1-t6Y3LyfdY2SZS5Urbvn-UPJvrvbo8LTKTc67Wu2rPKSQDJyZeeORCR8bYkhs8lI7eqddznH2ofh5WEEdjYXnoRtj7ByQwt7atMErmXIeYKPsSDuMAAqJDlQZ-3Ff4HJVeH_s3Gh8oQ',
'md5': '337cf7a344663ec79bf93a526a2e06c7',
'info_dict': {
'id': 'Da0Gw3xc',
'ext': 'mp4',
'title': 'No Daily Show for John Oliver; HBO Show Renewed - IGN News',
'uploader': 'IGN News',
'upload_date': '20150217',
'timestamp': 1424215740,
'description': 'HBO has renewed Last Week Tonight With John Oliver for two more seasons.',
'duration': 47.743333,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
display_id = video_id[:8]
api_url = ('https://apib4.blinkx.com/api.php?action=play_video&' +
'video=%s' % video_id)
data_json = self._download_webpage(api_url, display_id)
data = json.loads(data_json)['api']['results'][0]
duration = None
thumbnails = []
formats = []
for m in data['media']:
if m['type'] == 'jpg':
thumbnails.append({
'url': m['link'],
'width': int(m['w']),
'height': int(m['h']),
})
elif m['type'] == 'original':
duration = float(m['d'])
elif m['type'] == 'youtube':
yt_id = m['link']
self.to_screen('Youtube video detected: %s' % yt_id)
return self.url_result(yt_id, 'Youtube', video_id=yt_id)
elif m['type'] in ('flv', 'mp4'):
vcodec = remove_start(m['vcodec'], 'ff')
acodec = remove_start(m['acodec'], 'ff')
vbr = int_or_none(m.get('vbr') or m.get('vbitrate'), 1000)
abr = int_or_none(m.get('abr') or m.get('abitrate'), 1000)
tbr = vbr + abr if vbr and abr else None
format_id = '%s-%sk-%s' % (vcodec, tbr, m['w'])
formats.append({
'format_id': format_id,
'url': m['link'],
'vcodec': vcodec,
'acodec': acodec,
'abr': abr,
'vbr': vbr,
'tbr': tbr,
'width': int_or_none(m.get('w')),
'height': int_or_none(m.get('h')),
})
self._sort_formats(formats)
return {
'id': display_id,
'fullid': video_id,
'title': data['title'],
'formats': formats,
'uploader': data['channel_name'],
'timestamp': data['pubdate_epoch'],
'description': data.get('description'),
'thumbnails': thumbnails,
'duration': duration,
}
| unlicense |
AOSC-Dev/aosc-os-abbs | extra-libs/nss/autobuild/certdata2pem.py | 6 | 7075 | #!/usr/bin/python
# vim:set et sw=4:
#
# certdata2pem.py - splits certdata.txt into multiple files
#
# Copyright (C) 2009 Philipp Kern <[email protected]>
# Copyright (C) 2013 Kai Engert <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
# USA.
import base64
import os.path
import re
import sys
import textwrap
import urllib
objects = []
def printable_serial(obj):
return ".".join(map(lambda x:str(ord(x)), obj['CKA_SERIAL_NUMBER']))
# Dirty file parser.
in_data, in_multiline, in_obj = False, False, False
field, type, value, obj = None, None, None, dict()
for line in open('certdata.txt', 'r'):
# Ignore the file header.
if not in_data:
if line.startswith('BEGINDATA'):
in_data = True
continue
# Ignore comment lines.
if line.startswith('#'):
continue
# Empty lines are significant if we are inside an object.
if in_obj and len(line.strip()) == 0:
objects.append(obj)
obj = dict()
in_obj = False
continue
if len(line.strip()) == 0:
continue
if in_multiline:
if not line.startswith('END'):
if type == 'MULTILINE_OCTAL':
line = line.strip()
for i in re.finditer(r'\\([0-3][0-7][0-7])', line):
value += chr(int(i.group(1), 8))
else:
value += line
continue
obj[field] = value
in_multiline = False
continue
if line.startswith('CKA_CLASS'):
in_obj = True
line_parts = line.strip().split(' ', 2)
if len(line_parts) > 2:
field, type = line_parts[0:2]
value = ' '.join(line_parts[2:])
elif len(line_parts) == 2:
field, type = line_parts
value = None
else:
raise NotImplementedError, 'line_parts < 2 not supported.\n' + line
if type == 'MULTILINE_OCTAL':
in_multiline = True
value = ""
continue
obj[field] = value
if len(obj.items()) > 0:
objects.append(obj)
# Build up trust database.
trustmap = dict()
for obj in objects:
if obj['CKA_CLASS'] != 'CKO_NSS_TRUST':
continue
key = obj['CKA_LABEL'] + printable_serial(obj)
trustmap[key] = obj
print " added trust", key
# Build up cert database.
certmap = dict()
for obj in objects:
if obj['CKA_CLASS'] != 'CKO_CERTIFICATE':
continue
key = obj['CKA_LABEL'] + printable_serial(obj)
certmap[key] = obj
print " added cert", key
def obj_to_filename(obj):
label = obj['CKA_LABEL'][1:-1]
label = label.replace('/', '_')\
.replace(' ', '_')\
.replace('(', '=')\
.replace(')', '=')\
.replace(',', '_')
label = re.sub(r'\\x[0-9a-fA-F]{2}', lambda m:chr(int(m.group(0)[2:], 16)), label)
serial = printable_serial(obj)
return label + ":" + serial
trust_types = {
"CKA_TRUST_DIGITAL_SIGNATURE": "digital-signature",
"CKA_TRUST_NON_REPUDIATION": "non-repudiation",
"CKA_TRUST_KEY_ENCIPHERMENT": "key-encipherment",
"CKA_TRUST_DATA_ENCIPHERMENT": "data-encipherment",
"CKA_TRUST_KEY_AGREEMENT": "key-agreement",
"CKA_TRUST_KEY_CERT_SIGN": "cert-sign",
"CKA_TRUST_CRL_SIGN": "crl-sign",
"CKA_TRUST_SERVER_AUTH": "server-auth",
"CKA_TRUST_CLIENT_AUTH": "client-auth",
"CKA_TRUST_CODE_SIGNING": "code-signing",
"CKA_TRUST_EMAIL_PROTECTION": "email-protection",
"CKA_TRUST_IPSEC_END_SYSTEM": "ipsec-end-system",
"CKA_TRUST_IPSEC_TUNNEL": "ipsec-tunnel",
"CKA_TRUST_IPSEC_USER": "ipsec-user",
"CKA_TRUST_TIME_STAMPING": "time-stamping",
"CKA_TRUST_STEP_UP_APPROVED": "step-up-approved",
}
openssl_trust = {
"CKA_TRUST_SERVER_AUTH": "serverAuth",
"CKA_TRUST_CLIENT_AUTH": "clientAuth",
"CKA_TRUST_CODE_SIGNING": "codeSigning",
"CKA_TRUST_EMAIL_PROTECTION": "emailProtection",
}
for tobj in objects:
if tobj['CKA_CLASS'] == 'CKO_NSS_TRUST':
key = tobj['CKA_LABEL'] + printable_serial(tobj)
print "producing trust for " + key
trustbits = []
distrustbits = []
openssl_trustflags = []
openssl_distrustflags = []
for t in trust_types.keys():
if tobj.has_key(t) and tobj[t] == 'CKT_NSS_TRUSTED_DELEGATOR':
trustbits.append(t)
if t in openssl_trust:
openssl_trustflags.append(openssl_trust[t])
if tobj.has_key(t) and tobj[t] == 'CKT_NSS_NOT_TRUSTED':
distrustbits.append(t)
if t in openssl_trust:
openssl_distrustflags.append(openssl_trust[t])
fname = obj_to_filename(tobj)
try:
obj = certmap[key]
except:
obj = None
if obj != None:
fname += ".crt"
else:
fname += ".p11-kit"
f = open(fname, 'w')
if obj != None:
f.write("# alias=%s\n"%tobj['CKA_LABEL'])
f.write("# trust=" + " ".join(trustbits) + "\n")
f.write("# distrust=" + " ".join(distrustbits) + "\n")
if openssl_trustflags:
f.write("# openssl-trust=" + " ".join(openssl_trustflags) + "\n")
if openssl_distrustflags:
f.write("# openssl-distrust=" + " ".join(openssl_distrustflags) + "\n")
f.write("-----BEGIN CERTIFICATE-----\n")
f.write("\n".join(textwrap.wrap(base64.b64encode(obj['CKA_VALUE']), 64)))
f.write("\n-----END CERTIFICATE-----\n")
else:
f.write("[p11-kit-object-v1]\n")
f.write("label: ");
f.write(tobj['CKA_LABEL']);
f.write("\n")
f.write("class: certificate\n")
f.write("certificate-type: x-509\n")
f.write("issuer: \"");
f.write(urllib.quote(tobj['CKA_ISSUER']));
f.write("\"\n")
f.write("serial-number: \"");
f.write(urllib.quote(tobj['CKA_SERIAL_NUMBER']));
f.write("\"\n")
if (tobj['CKA_TRUST_SERVER_AUTH'] == 'CKT_NSS_NOT_TRUSTED') or (tobj['CKA_TRUST_EMAIL_PROTECTION'] == 'CKT_NSS_NOT_TRUSTED') or (tobj['CKA_TRUST_CODE_SIGNING'] == 'CKT_NSS_NOT_TRUSTED'):
f.write("x-distrusted: true\n")
f.write("\n\n")
f.close()
print " -> written as '%s', trust = %s, openssl-trust = %s, distrust = %s, openssl-distrust = %s" % (fname, trustbits, openssl_trustflags, distrustbits, openssl_distrustflags)
| gpl-2.0 |
frohoff/Empire | lib/listeners/template.py | 2 | 9688 | import base64
import random
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Template',
'Author': ['@harmj0y'],
'Description': ("Listener template"),
# categories - client_server, peer_to_peer, broadcast, third_party
'Category' : ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Listener name.',
'Required' : True,
'Value' : 'http_foreign'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://%s:%s" % (helpers.lhost(), 80)
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : 80
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 5
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 60
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {} # used to keep track of any threaded instances of this server
# optional/specific for this module
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
def default_response(self):
"""
If there's a default response expected from the server that the client needs to ignore,
(i.e. a default HTTP page), put the generation here.
"""
print helpers.color("[!] default_response() not implemented for listeners/template")
return ''
def validate_options(self):
"""
Validate all options for this listener.
"""
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/template generate_launcher(): no language specified!')
return None
if listenerName and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
launchURI = "%s/%s" % (host, stage0)
if language.startswith('po'):
# PowerShell
return ''
if language.startswith('py'):
# Python
return ''
else:
print helpers.color("[!] listeners/template generate_launcher(): invalid language specification: only 'powershell' and 'python' are current supported for this module.")
else:
print helpers.color("[!] listeners/template generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
If you want to support staging for the listener module, generate_stager must be
implemented to return the stage1 key-negotiation stager code.
"""
print helpers.color("[!] generate_stager() not implemented for listeners/template")
return ''
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
If you want to support staging for the listener module, generate_agent must be
implemented to return the actual staged agent code.
"""
print helpers.color("[!] generate_agent() not implemented for listeners/template")
return ''
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
This should be implemented for the module.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
getTask = """
function script:Get-Task {
}
"""
sendMessage = """
function script:Send-Message {
param($Packets)
if($Packets) {
}
}
"""
return updateServers + getTask + sendMessage + "\n'New agent comms registered!'"
elif language.lower() == 'python':
# send_message()
pass
else:
print helpers.color("[!] listeners/template generate_comms(): invalid language specification, only 'powershell' and 'python' are current supported for this module.")
else:
print helpers.color('[!] listeners/template generate_comms(): no language specified!')
def start(self, name=''):
"""
If a server component needs to be started, implement the kick off logic
here and the actual server code in another function to facilitate threading
(i.e. start_server() in the http listener).
"""
# listenerOptions = self.options
# if name and name != '':
# self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
# self.threads[name].start()
# time.sleep(1)
# # returns True if the listener successfully started, false otherwise
# return self.threads[name].is_alive()
# else:
# name = listenerOptions['Name']['Value']
# self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
# self.threads[name].start()
# time.sleep(1)
# # returns True if the listener successfully started, false otherwise
# return self.threads[name].is_alive()
return True
def shutdown(self, name=''):
"""
If a server component was started, implement the logic that kills the particular
named listener here.
"""
# if name and name != '':
# print helpers.color("[!] Killing listener '%s'" % (name))
# self.threads[name].kill()
# else:
# print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
# self.threads[self.options['Name']['Value']].kill()
pass
| bsd-3-clause |
YAOSP/kernel_huawei_angler | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
lamastex/scalable-data-science | db/xtraResources/edXBigDataSeries2015/CS100-1x/Module 4: Text Analysis and Entity Resolution Lab Solutions.py | 2 | 73278 | # Databricks notebook source exported at Mon, 14 Mar 2016 03:33:29 UTC
# MAGIC %md
# MAGIC **SOURCE:** This is from the Community Edition of databricks and has been added to this databricks shard at [/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x](/#workspace/scalable-data-science/xtraResources/edXBigDataSeries2015/CS100-1x) as extra resources for the project-focussed course [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/) that is prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand), and *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome).
# COMMAND ----------
# MAGIC %md
# MAGIC <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png" /></a><br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/">Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License</a>.
# COMMAND ----------
# MAGIC %md
# MAGIC # + 
# MAGIC # **Text Analysis and Entity Resolution**
# MAGIC Entity resolution is a common, yet difficult problem in data cleaning and integration. This lab will demonstrate how we can use Apache Spark to apply powerful and scalable text analysis techniques and perform entity resolution across two datasets of commercial products.
# COMMAND ----------
# MAGIC %md
# MAGIC Entity Resolution, or "[Record linkage][wiki]" is the term used by statisticians, epidemiologists, and historians, among others, to describe the process of joining records from one data source with another that describe the same entity. Our terms with the same meaning include, "entity disambiguation/linking", duplicate detection", "deduplication", "record matching", "(reference) reconciliation", "object identification", "data/information integration", and "conflation".
# MAGIC
# MAGIC Entity Resolution (ER) refers to the task of finding records in a dataset that refer to the same entity across different data sources (e.g., data files, books, websites, databases). ER is necessary when joining datasets based on entities that may or may not share a common identifier (e.g., database key, URI, National identification number), as may be the case due to differences in record shape, storage location, and/or curator style or preference. A dataset that has undergone ER may be referred to as being cross-linked.
# MAGIC [wiki]: https://en.wikipedia.org/wiki/Record_linkage
# COMMAND ----------
labVersion = 'cs100.1x-lab3-1.0.4'
# COMMAND ----------
# MAGIC %md
# MAGIC #### Code
# MAGIC This assignment can be completed using basic Python, pySpark Transformations and actions, and the plotting library matplotlib. Other libraries are not allowed.
# MAGIC
# MAGIC #### Files
# MAGIC Data files for this assignment are from the [metric-learning](https://code.google.com/p/metric-learning/) project and can be found at:
# MAGIC `cs100/lab3`
# MAGIC
# MAGIC The directory contains the following files:
# MAGIC * **Google.csv**, the Google Products dataset
# MAGIC * **Amazon.csv**, the Amazon dataset
# MAGIC * **Google_small.csv**, 200 records sampled from the Google data
# MAGIC * **Amazon_small.csv**, 200 records sampled from the Amazon data
# MAGIC * **Amazon_Google_perfectMapping.csv**, the "gold standard" mapping
# MAGIC * **stopwords.txt**, a list of common English words
# MAGIC
# MAGIC Besides the complete data files, there are "sample" data files for each dataset - we will use these for **Part 1**. In addition, there is a "gold standard" file that contains all of the true mappings between entities in the two datasets. Every row in the gold standard file has a pair of record IDs (one Google, one Amazon) that belong to two record that describe the same thing in the real world. We will use the gold standard to evaluate our algorithms.
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 0: Preliminaries**
# MAGIC We read in each of the files and create an RDD consisting of lines.
# MAGIC For each of the data files ("Google.csv", "Amazon.csv", and the samples), we want to parse the IDs out of each record. The IDs are the first column of the file (they are URLs for Google, and alphanumeric strings for Amazon). Omitting the headers, we load these data files into pair RDDs where the *mapping ID* is the key, and the value is a string consisting of the name/title, description, and manufacturer from the record.
# MAGIC
# MAGIC The file format of an Amazon line is:
# MAGIC
# MAGIC `"id","title","description","manufacturer","price"`
# MAGIC
# MAGIC The file format of a Google line is:
# MAGIC
# MAGIC `"id","name","description","manufacturer","price"`
# COMMAND ----------
import re
DATAFILE_PATTERN = '^(.+),"(.+)",(.*),(.*),(.*)'
def removeQuotes(s):
""" Remove quotation marks from an input string
Args:
s (str): input string that might have the quote "" characters
Returns:
str: a string without the quote characters
"""
return ''.join(i for i in s if i!='"')
def parseDatafileLine(datafileLine):
""" Parse a line of the data file using the specified regular expression pattern
Args:
datafileLine (str): input string that is a line from the data file
Returns:
str: a string parsed using the given regular expression and without the quote characters
"""
match = re.search(DATAFILE_PATTERN, datafileLine)
if match is None:
print 'Invalid datafile line: %s' % datafileLine
return (datafileLine, -1)
elif match.group(1) == '"id"':
print 'Header datafile line: %s' % datafileLine
return (datafileLine, 0)
else:
product = '%s %s %s' % (match.group(2), match.group(3), match.group(4))
return ((removeQuotes(match.group(1)), product), 1)
# COMMAND ----------
display(dbutils.fs.ls('/databricks-datasets/cs100/lab3/data-001/'))
# COMMAND ----------
# MAGIC %md **WARNING:** If *test_helper*, required in the cell below, is not installed, follow the instructions [here](https://databricks-staging-cloudfront.staging.cloud.databricks.com/public/c65da9a2fa40e45a2028cddebe45b54c/8637560089690848/4187311313936645/6977722904629137/05f3c2ecc3.html).
# COMMAND ----------
import sys
import os
from test_helper import Test
baseDir = os.path.join('databricks-datasets')
inputPath = os.path.join('cs100', 'lab3', 'data-001')
GOOGLE_PATH = 'Google.csv'
GOOGLE_SMALL_PATH = 'Google_small.csv'
AMAZON_PATH = 'Amazon.csv'
AMAZON_SMALL_PATH = 'Amazon_small.csv'
GOLD_STANDARD_PATH = 'Amazon_Google_perfectMapping.csv'
STOPWORDS_PATH = 'stopwords.txt'
def parseData(filename):
""" Parse a data file
Args:
filename (str): input file name of the data file
Returns:
RDD: a RDD of parsed lines
"""
return (sc
.textFile(filename, 4, 0)
.map(parseDatafileLine))
def loadData(path):
""" Load a data file
Args:
path (str): input file name of the data file
Returns:
RDD: a RDD of parsed valid lines
"""
filename = os.path.join(baseDir, inputPath, path)
raw = parseData(filename).cache()
failed = (raw
.filter(lambda s: s[1] == -1)
.map(lambda s: s[0]))
for line in failed.take(10):
print '%s - Invalid datafile line: %s' % (path, line)
valid = (raw
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
print '%s - Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (path,
raw.count(),
valid.count(),
failed.count())
assert failed.count() == 0
assert raw.count() == (valid.count() + 1)
return valid
googleSmall = loadData(GOOGLE_SMALL_PATH)
google = loadData(GOOGLE_PATH)
amazonSmall = loadData(AMAZON_SMALL_PATH)
amazon = loadData(AMAZON_PATH)
# COMMAND ----------
# MAGIC %md
# MAGIC Let's examine the lines that were just loaded in the two subset (small) files - one from Google and one from Amazon
# COMMAND ----------
for line in googleSmall.take(3):
print 'google: %s: %s\n' % (line[0], line[1])
for line in amazonSmall.take(3):
print 'amazon: %s: %s\n' % (line[0], line[1])
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 1: ER as Text Similarity - Bags of Words**
# MAGIC
# MAGIC A simple approach to entity resolution is to treat all records as strings and compute their similarity with a string distance function. In this part, we will build some components for performing bag-of-words text-analysis, and then use them to compute record similarity.
# MAGIC [Bag-of-words][bag-of-words] is a conceptually simple yet powerful approach to text analysis.
# MAGIC
# MAGIC The idea is to treat strings, a.k.a. **documents**, as *unordered collections* of words, or **tokens**, i.e., as bags of words.
# MAGIC > **Note on terminology**: a "token" is the result of parsing the document down to the elements we consider "atomic" for the task at hand. Tokens can be things like words, numbers, acronyms, or other exotica like word-roots or fixed-length character strings.
# MAGIC > Bag of words techniques all apply to any sort of token, so when we say "bag-of-words" we really mean "bag-of-tokens," strictly speaking.
# MAGIC Tokens become the atomic unit of text comparison. If we want to compare two documents, we count how many tokens they share in common. If we want to search for documents with keyword queries (this is what Google does), then we turn the keywords into tokens and find documents that contain them. The power of this approach is that it makes string comparisons insensitive to small differences that probably do not affect meaning much, for example, punctuation and word order.
# MAGIC [bag-of-words]: https://en.wikipedia.org/wiki/Bag-of-words_model
# COMMAND ----------
# MAGIC %md
# MAGIC #### **1(a) Tokenize a String**
# MAGIC Implement the function `simpleTokenize(string)` that takes a string and returns a list of non-empty tokens in the string. `simpleTokenize` should split strings using the provided regular expression. Since we want to make token-matching case insensitive, make sure all tokens are turned lower-case. Give an interpretation, in natural language, of what the regular expression, `split_regex`, matches.
# MAGIC If you need help with Regular Expressions, try the site [regex101](https://regex101.com/) where you can interactively explore the results of applying different regular expressions to strings. *Note that \W includes the "_" character*. You should use [re.split()](https://docs.python.org/2/library/re.html#re.split) to perform the string split. Also, make sure you remove any empty tokens.
# COMMAND ----------
# ANSWER
quickbrownfox = 'A quick brown fox jumps over the lazy dog.'
split_regex = r'\W+'
def simpleTokenize(string):
""" A simple implementation of input string tokenization
Args:
string (str): input string
Returns:
list: a list of tokens
"""
return [t for t in re.split(split_regex, string.lower()) if len(t)]
print simpleTokenize(quickbrownfox) # Should give ['a', 'quick', 'brown', ... ]
# COMMAND ----------
# TEST Tokenize a String (1a)
Test.assertEquals(simpleTokenize(quickbrownfox),
['a','quick','brown','fox','jumps','over','the','lazy','dog'],
'simpleTokenize should handle sample text')
Test.assertEquals(simpleTokenize(' '), [], 'simpleTokenize should handle empty string')
Test.assertEquals(simpleTokenize('!!!!123A/456_B/789C.123A'), ['123a','456_b','789c','123a'],
'simpleTokenize should handle punctuations and lowercase result')
Test.assertEquals(simpleTokenize('fox fox'), ['fox', 'fox'],
'simpleTokenize should not remove duplicates')
# COMMAND ----------
# PRIVATE_TEST Tokenize a String (1a)
Test.assertEquals(simpleTokenize(quickbrownfox),
['a','quick','brown','fox','jumps','over','the','lazy','dog'],
'simpleTokenize should handle sample text')
Test.assertEquals(simpleTokenize(' '), [], 'simpleTokenize should handle empty string')
Test.assertEquals(simpleTokenize('!!!!123A/456_B/789C.123A'), ['123a','456_b','789c','123a'],
'simpleTokenize should handle puntuations and lowercase result')
Test.assertEquals(simpleTokenize('fox fox'), ['fox', 'fox'],
'simpleTokenize should not remove duplicates')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(1b) Removing stopwords**
# MAGIC *[Stopwords][stopwords]* are common (English) words that do not contribute much to the content or meaning of a document (e.g., "the", "a", "is", "to", etc.). Stopwords add noise to bag-of-words comparisons, so they are usually excluded.
# MAGIC Using the included file "stopwords.txt", implement `tokenize`, an improved tokenizer that does not emit stopwords.
# MAGIC [stopwords]: https://en.wikipedia.org/wiki/Stop_words
# COMMAND ----------
# ANSWER
stopfile = os.path.join(baseDir, inputPath, STOPWORDS_PATH)
stopwords = set(sc.textFile(stopfile).collect())
print 'These are the stopwords: %s' % stopwords
def tokenize(string):
""" An implementation of input string tokenization that excludes stopwords
Args:
string (str): input string
Returns:
list: a list of tokens without stopwords
"""
return [t for t in simpleTokenize(string) if t not in stopwords]
print tokenize(quickbrownfox) # Should give ['quick', 'brown', ... ]
# COMMAND ----------
# TEST Removing stopwords (1b)
Test.assertEquals(tokenize("Why a the?"), [], 'tokenize should remove all stopwords')
Test.assertEquals(tokenize("Being at the_?"), ['the_'], 'tokenize should handle non-stopwords')
Test.assertEquals(tokenize(quickbrownfox), ['quick','brown','fox','jumps','lazy','dog'],
'tokenize should handle sample text')
# COMMAND ----------
# PRIVATE_TEST Removing stopwords (1b)
Test.assertEquals(tokenize("Why a the?"), [], 'tokenize should remove all stopwords')
Test.assertEquals(tokenize("Being at the_?"), ['the_'], 'tokenize should handle non-stopwords')
Test.assertEquals(tokenize(quickbrownfox), ['quick','brown','fox','jumps','lazy','dog'],
'tokenize should handle sample text')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(1c) Tokenizing the small datasets**
# MAGIC Now let's tokenize the two *small* datasets. For each ID in a dataset, `tokenize` the values, and then count the total number of tokens.
# MAGIC How many tokens, total, are there in the two datasets?
# COMMAND ----------
# ANSWER
amazonRecToToken = amazonSmall.map(lambda s: (s[0], tokenize(s[1])))
googleRecToToken = googleSmall.map(lambda s: (s[0], tokenize(s[1])))
def countTokens(vendorRDD):
""" Count and return the number of tokens
Args:
vendorRDD (RDD of (recordId, tokenizedValue)): Pair tuple of record ID to tokenized output
Returns:
count: count of all tokens
"""
recordCount = vendorRDD.map(lambda s: len(s[1]))
recordSum = recordCount.reduce(lambda a, b : a + b)
return recordSum
totalTokens = countTokens(amazonRecToToken) + countTokens(googleRecToToken)
print 'There are %s tokens in the combined datasets' % totalTokens
# COMMAND ----------
# TEST Tokenizing the small datasets (1c)
Test.assertEquals(totalTokens, 22520, 'incorrect totalTokens')
# COMMAND ----------
# PRIVATE_TEST Tokenizing the small datasets (1c)
Test.assertEquals(totalTokens, 22520, 'incorrect totalTokens')
Test.assertEquals(countTokens(amazonRecToToken), 16707, 'incorrect token count for Amazon records')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(1d) Amazon record with the most tokens**
# MAGIC Which Amazon record has the biggest number of tokens?
# MAGIC In other words, you want to sort the records and get the one with the largest count of tokens.
# COMMAND ----------
# ANSWER
def findBiggestRecord(vendorRDD):
""" Find and return the record with the largest number of tokens
Args:
vendorRDD (RDD of (recordId, tokens)): input Pair Tuple of record ID and tokens
Returns:
list: a list of 1 Pair Tuple of record ID and tokens
"""
return(vendorRDD.takeOrdered(1, lambda s: -1 * len(s[1])))
biggestRecordAmazon = findBiggestRecord(amazonRecToToken)
print 'The Amazon record with ID "%s" has the most tokens (%s)' % (biggestRecordAmazon[0][0],
len(biggestRecordAmazon[0][1]))
# COMMAND ----------
# TEST Amazon record with the most tokens (1d)
Test.assertEquals(biggestRecordAmazon[0][0], 'b000o24l3q', 'incorrect biggestRecordAmazon')
Test.assertEquals(len(biggestRecordAmazon[0][1]), 1547, 'incorrect len for biggestRecordAmazon')
# COMMAND ----------
# PRIVATE_TEST Amazon record with the most tokens (1d)
Test.assertEquals(biggestRecordAmazon[0][0], 'b000o24l3q', 'incorrect biggestRecordAmazon')
Test.assertEquals(len(biggestRecordAmazon[0][1]), 1547, 'incorrect len for biggestRecordAmazon')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 2: ER as Text Similarity - Weighted Bag-of-Words using TF-IDF**
# MAGIC Bag-of-words comparisons are not very good when all tokens are treated the same: some tokens are more important than others. Weights give us a way to specify which tokens to favor. With weights, when we compare documents, instead of counting common tokens, we sum up the weights of common tokens. A good heuristic for assigning weights is called "Term-Frequency/Inverse-Document-Frequency," or [TF-IDF][tfidf] for short.
# MAGIC
# MAGIC **TF**
# MAGIC
# MAGIC TF rewards tokens that appear many times in the same document. It is computed as the frequency of a token in a document, that is, if document *d* contains 100 tokens and token *t* appears in *d* 5 times, then the TF weight of *t* in *d* is *5/100 = 1/20*. The intuition for TF is that if a word occurs often in a document, then it is more important to the meaning of the document.
# MAGIC
# MAGIC **IDF**
# MAGIC
# MAGIC IDF rewards tokens that are rare overall in a dataset. The intuition is that it is more significant if two documents share a rare word than a common one. IDF weight for a token, *t*, in a set of documents, *U*, is computed as follows:
# MAGIC * Let *N* be the total number of documents in *U*
# MAGIC * Find *n(t)*, the number of documents in *U* that contain *t*
# MAGIC * Then *IDF(t) = N/n(t)*.
# MAGIC
# MAGIC Note that *n(t)/N* is the frequency of *t* in *U*, and *N/n(t)* is the inverse frequency.
# MAGIC
# MAGIC > **Note on terminology**: Sometimes token weights depend on the document the token belongs to, that is, the same token may have a different weight when it's found in different documents. We call these weights *local* weights. TF is an example of a local weight, because it depends on the length of the source. On the other hand, some token weights only depend on the token, and are the same everywhere that token is found. We call these weights *global*, and IDF is one such weight.
# MAGIC
# MAGIC **TF-IDF**
# MAGIC
# MAGIC Finally, to bring it all together, the total TF-IDF weight for a token in a document is the product of its TF and IDF weights.
# MAGIC [tfidf]: https://en.wikipedia.org/wiki/Tf%E2%80%93idf
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2a) Implement a TF function**
# MAGIC
# MAGIC Implement `tf(tokens)` that takes a list of tokens and returns a Python [dictionary](https://docs.python.org/2/tutorial/datastructures.html#dictionaries) mapping tokens to TF weights.
# MAGIC
# MAGIC The steps your function should perform are:
# MAGIC * Create an empty Python dictionary
# MAGIC * For each of the tokens in the input `tokens` list, count 1 for each occurance and add the token to the dictionary
# MAGIC * For each of the tokens in the dictionary, divide the token's count by the total number of tokens in the input `tokens` list
# COMMAND ----------
# ANSWER
def tf(tokens):
""" Compute TF
Args:
tokens (list of str): input list of tokens from tokenize
Returns:
dictionary: a dictionary of tokens to its TF values
"""
counts = {}
length = len(tokens)
for t in tokens:
counts.setdefault(t, 0.0)
counts[t] += 1
return { t: counts[t] / length for t in counts }
print tf(tokenize(quickbrownfox)) # Should give { 'quick': 0.1666 ... }
# COMMAND ----------
# TEST Implement a TF function (2a)
tf_test = tf(tokenize(quickbrownfox))
Test.assertEquals(tf_test, {'brown': 0.16666666666666666, 'lazy': 0.16666666666666666,
'jumps': 0.16666666666666666, 'fox': 0.16666666666666666,
'dog': 0.16666666666666666, 'quick': 0.16666666666666666},
'incorrect result for tf on sample text')
tf_test2 = tf(tokenize('one_ one_ two!'))
Test.assertEquals(tf_test2, {'one_': 0.6666666666666666, 'two': 0.3333333333333333},
'incorrect result for tf test')
# COMMAND ----------
# PRIVATE_TEST Implement a TF function (2a)
tf_test = tf(tokenize(quickbrownfox))
Test.assertEquals(tf_test, {'brown': 0.16666666666666666, 'lazy': 0.16666666666666666,
'jumps': 0.16666666666666666, 'fox': 0.16666666666666666,
'dog': 0.16666666666666666, 'quick': 0.16666666666666666},
'incorrect result for tf on sample text')
tf_test2 = tf(tokenize('one_ one_ two!'))
Test.assertEquals(tf_test2, {'one_': 0.6666666666666666, 'two': 0.3333333333333333},
'incorrect result for tf test')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2b) Create a corpus**
# MAGIC Create a pair RDD called `corpusRDD`, consisting of a combination of the two small datasets, `amazonRecToToken` and `googleRecToToken`. Each element of the `corpusRDD` should be a pair consisting of a key from one of the small datasets (ID or URL) and the value is the associated value for that key from the small datasets.
# COMMAND ----------
# ANSWER
corpusRDD = amazonRecToToken.union(googleRecToToken)
# COMMAND ----------
# TEST Create a corpus (2b)
Test.assertEquals(corpusRDD.count(), 400, 'incorrect corpusRDD.count()')
# COMMAND ----------
# PRIVATE_TEST Create a corpus (2b)
Test.assertEquals(corpusRDD.count(), 400, 'incorrect corpusRDD.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2c) Implement an IDFs function**
# MAGIC Implement `idfs` that assigns an IDF weight to every unique token in an RDD called `corpus`. The function should return an pair RDD where the `key` is the unique token and value is the IDF weight for the token.
# MAGIC
# MAGIC Recall that the IDF weight for a token, *t*, in a set of documents, *U*, is computed as follows:
# MAGIC * Let *N* be the total number of documents in *U*.
# MAGIC * Find *n(t)*, the number of documents in *U* that contain *t*.
# MAGIC * Then *IDF(t) = N/n(t)*.
# MAGIC
# MAGIC The steps your function should perform are:
# MAGIC * Calculate *N*. Think about how you can calculate *N* from the input RDD.
# MAGIC * Create an RDD (*not a pair RDD*) containing the unique tokens from each document in the input `corpus`. For each document, you should only include a token once, *even if it appears multiple times in that document.*
# MAGIC * For each of the unique tokens, count how many times it appears in the document and then compute the IDF for that token: *N/n(t)*
# MAGIC
# MAGIC Use your `idfs` to compute the IDF weights for all tokens in `corpusRDD` (the combined small datasets).
# MAGIC How many unique tokens are there?
# COMMAND ----------
# ANSWER
def idfs(corpus):
""" Compute IDF
Args:
corpus (RDD): input corpus
Returns:
RDD: a RDD of (token, IDF value)
"""
uniqueTokens = corpus.flatMap(lambda s: list(set(s[1])))
tokenCountPairTuple = uniqueTokens.map(lambda token: (token, 1))
tokenSumPairTuple = tokenCountPairTuple.reduceByKey(lambda a, b : a + b)
N = float(corpus.count())
return (tokenSumPairTuple.map(lambda s: (s[0], float(N/s[1]))))
idfsSmall = idfs(amazonRecToToken.union(googleRecToToken))
uniqueTokenCount = idfsSmall.count()
print 'There are %s unique tokens in the small datasets.' % uniqueTokenCount
# COMMAND ----------
# TEST Implement an IDFs function (2c)
Test.assertEquals(uniqueTokenCount, 4772, 'incorrect uniqueTokenCount')
tokenSmallestIdf = idfsSmall.takeOrdered(1, lambda s: s[1])[0]
Test.assertEquals(tokenSmallestIdf[0], 'software', 'incorrect smallest IDF token')
Test.assertTrue(abs(tokenSmallestIdf[1] - 4.25531914894) < 0.0000000001,
'incorrect smallest IDF value')
# COMMAND ----------
# PRIVATE_TEST Implement an IDFs function (2c)
Test.assertEquals(uniqueTokenCount, 4772, 'incorrect uniqueTokenCount')
tokenSmallestIdf = idfsSmall.takeOrdered(1, lambda s: s[1])[0]
Test.assertEquals(tokenSmallestIdf[0], 'software', 'incorrect smallest IDF token')
Test.assertTrue(abs(tokenSmallestIdf[1] - 4.25531914894) < 0.0000000001,
'incorrect smallest IDF value')
firstElevenTokens = set(idfsSmall.takeOrdered(11, lambda s: s[1]))
Test.assertEquals(len(firstElevenTokens - set([('software', 4.25531914893617),('new', 6.896551724137931),('features', 6.896551724137931),('use', 7.017543859649122),('complete', 7.2727272727272725),('easy', 7.6923076923076925),('create', 8.333333333333334),('system', 8.333333333333334),('cd', 8.333333333333334),('1', 8.51063829787234), ('windows', 8.51063829787234)])), 0, 'incorrect firstTenTokens')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2d) Tokens with the smallest IDF**
# MAGIC Print out the 11 tokens with the smallest IDF in the combined small dataset.
# COMMAND ----------
smallIDFTokens = idfsSmall.takeOrdered(11, lambda s: s[1])
print smallIDFTokens
# COMMAND ----------
# ANSWER
#*answer*: The 10 smallest IDFs are for: (1) software, (2) new, (3) features, (4) use, (5) complete, (6) easy, (7 tie) cd, (7 tie) system, (7 tie) create, (10 tie) windows, (10 tie) 1.
#These terms not useful for entity resolution because they are generic terms for marketing, prices, and product categories.
# COMMAND ----------
# ANSWER
# Quiz question:
# For part (2d), do you think the terms are useful for entity resolution?
# ( ) Yes
# (*) No
#
# Why or why not?
# ( ) These terms are useful for entity resolution because they describe distinguishing tokens in product descriptions
# ( ) These terms not useful for entity resolution because they are generic terms for marketing, prices, and product categories.
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2e) IDF Histogram**
# MAGIC Plot a histogram of IDF values. Be sure to use appropriate scaling and bucketing for the data.
# MAGIC First plot the histogram using `matplotlib`
# COMMAND ----------
import matplotlib.pyplot as plt
small_idf_values = idfsSmall.map(lambda s: s[1]).collect()
fig = plt.figure(figsize=(8,3))
plt.hist(small_idf_values, 50, log=True)
display(fig)
pass
# COMMAND ----------
from pyspark.sql import Row
# Create a DataFrame and visualize using display()
idfsToCountRow = idfsSmall.map(lambda (x, y): Row(token=x, value=y))
idfsToCountDF = sqlContext.createDataFrame(idfsToCountRow)
display(idfsToCountDF)
# COMMAND ----------
# ANSWER
# Quiz question:
# Using the plot in (2e), what conclusions can you draw from the distribution of weights?
#
# *ANSWER:* There is a long tail of rare words in the corpus (these have large IDF values).
# [explanation]
# There are gaps between IDF values because IDF is a function of a discrete variable, i.e., a document count.
# [explanation]
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(2f) Implement a TF-IDF function**
# MAGIC Use your `tf` function to implement a `tfidf(tokens, idfs)` function that takes a list of tokens from a document and a Python dictionary of IDF weights and returns a Python dictionary mapping individual tokens to total TF-IDF weights.
# MAGIC
# MAGIC The steps your function should perform are:
# MAGIC * Calculate the token frequencies (TF) for `tokens`
# MAGIC * Create a Python dictionary where each token maps to the token's frequency times the token's IDF weight
# MAGIC
# MAGIC Use your `tfidf` function to compute the weights of Amazon product record 'b000hkgj8k'. To do this, we need to extract the record for the token from the tokenized small Amazon dataset and we need to convert the IDFs for the small dataset into a Python dictionary. We can do the first part, by using a `filter()` transformation to extract the matching record and a `collect()` action to return the value to the driver.
# MAGIC
# MAGIC For the second part, we use the [`collectAsMap()` action](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.collectAsMap) to return the IDFs to the driver as a Python dictionary.
# COMMAND ----------
# ANSWER
def tfidf(tokens, idfs):
""" Compute TF-IDF
Args:
tokens (list of str): input list of tokens from tokenize
idfs (dictionary): record to IDF value
Returns:
dictionary: a dictionary of records to TF-IDF values
"""
tfs = tf(tokens)
return { t: tfs[t] * idfs[t] for t in tfs }
rec_b000hkgj8k = amazonRecToToken.filter(lambda x: x[0] == 'b000hkgj8k').collect()[0][1]
idfsSmallWeights = idfsSmall.collectAsMap()
rec_b000hkgj8k_weights = tfidf(rec_b000hkgj8k, idfsSmallWeights)
print 'Amazon record "b000hkgj8k" has tokens and weights:\n%s' % rec_b000hkgj8k_weights
# COMMAND ----------
# TEST Implement a TF-IDF function (2f)
Test.assertEquals(rec_b000hkgj8k_weights,
{'autocad': 33.33333333333333, 'autodesk': 8.333333333333332,
'courseware': 66.66666666666666, 'psg': 33.33333333333333,
'2007': 3.5087719298245617, 'customizing': 16.666666666666664,
'interface': 3.0303030303030303}, 'incorrect rec_b000hkgj8k_weights')
# COMMAND ----------
# PRIVATE_TEST Implement a TF-IDF function (2f)
Test.assertEquals(rec_b000hkgj8k_weights, {'autocad': 33.33333333333333, 'autodesk': 8.333333333333332, 'courseware': 66.66666666666666, 'psg': 33.33333333333333, '2007': 3.5087719298245617, 'customizing': 16.666666666666664, 'interface': 3.0303030303030303}, 'incorrect rec_b000hkgj8k_weights')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 3: ER as Text Similarity - Cosine Similarity**
# MAGIC Now we are ready to do text comparisons in a formal way. The metric of string distance we will use is called **[cosine similarity][cosine]**. We will treat each document as a vector in some high dimensional space. Then, to compare two documents we compute the cosine of the angle between their two document vectors. This is *much* easier than it sounds.
# MAGIC
# MAGIC The first question to answer is how do we represent documents as vectors? The answer is familiar: bag-of-words! We treat each unique token as a dimension, and treat token weights as magnitudes in their respective token dimensions. For example, suppose we use simple counts as weights, and we want to interpret the string "Hello, world! Goodbye, world!" as a vector. Then in the "hello" and "goodbye" dimensions the vector has value 1, in the "world" dimension it has value 2, and it is zero in all other dimensions.
# MAGIC
# MAGIC The next question is: given two vectors how do we find the cosine of the angle between them? Recall the formula for the dot product of two vectors:
# MAGIC \\[ a \cdot b = \| a \| \| b \| \cos \theta \\]
# MAGIC Here \\( a \cdot b = \sum a_i b_i \\) is the ordinary dot product of two vectors, and \\( \|a\| = \sqrt{ \sum a_i^2 } \\) is the norm of \\( a \\).
# MAGIC
# MAGIC We can rearrange terms and solve for the cosine to find it is simply the normalized dot product of the vectors. With our vector model, the dot product and norm computations are simple functions of the bag-of-words document representations, so we now have a formal way to compute similarity:
# MAGIC \\[ similarity = \cos \theta = \frac{a \cdot b}{\|a\| \|b\|} = \frac{\sum a_i b_i}{\sqrt{\sum a_i^2} \sqrt{\sum b_i^2}} \\]
# MAGIC
# MAGIC Setting aside the algebra, the geometric interpretation is more intuitive. The angle between two document vectors is small if they share many tokens in common, because they are pointing in roughly the same direction. For that case, the cosine of the angle will be large. Otherwise, if the angle is large (and they have few words in common), the cosine is small. Therefore, cosine similarity scales proportionally with our intuitive sense of similarity.
# MAGIC [cosine]: https://en.wikipedia.org/wiki/Cosine_similarity
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(3a) Implement the components of a `cosineSimilarity` function**
# MAGIC Implement the components of a `cosineSimilarity` function.
# MAGIC Use the `tokenize` and `tfidf` functions, and the IDF weights from Part 2 for extracting tokens and assigning them weights.
# MAGIC The steps you should perform are:
# MAGIC * Define a function `dotprod` that takes two Python dictionaries and produces the dot product of them, where the dot product is defined as the sum of the product of values for tokens that appear in *both* dictionaries
# MAGIC * Define a function `norm` that returns the square root of the dot product of a dictionary and itself
# MAGIC * Define a function `cossim` that returns the dot product of two dictionaries divided by the norm of the first dictionary and then by the norm of the second dictionary
# COMMAND ----------
# ANSWER
import math
def dotprod(a, b):
return sum([a[t] * b[t] for t in a if t in b])
def norm(a):
return math.sqrt(dotprod(a, a))
def cossim(a, b):
return dotprod(a, b) / norm(a) / norm(b)
testVec1 = {'foo': 2, 'bar': 3, 'baz': 5 }
testVec2 = {'foo': 1, 'bar': 0, 'baz': 20 }
dp = dotprod(testVec1, testVec2)
nm = norm(testVec1)
print dp, nm
# COMMAND ----------
# TEST Implement the components of a cosineSimilarity function (3a)
Test.assertEquals(dp, 102, 'incorrect dp')
Test.assertTrue(abs(nm - 6.16441400297) < 0.0000001, 'incorrrect nm')
# COMMAND ----------
# PRIVATE_TEST Implement the components of a cosineSimilarity function (3a)
Test.assertEquals(dp, 102, 'incorrect dp')
Test.assertTrue(abs(nm - 6.16441400297) < 0.0000001, 'incorrrect nm')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(3b) Implement a `cosineSimilarity` function**
# MAGIC Implement a `cosineSimilarity(string1, string2, idfsDictionary)` function that takes two strings and a dictionary of IDF weights, and computes their cosine similarity in the context of some global IDF weights.
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC * Apply your `tfidf` function to the tokenized first and second strings, using the dictionary of IDF weights
# MAGIC * Compute and return your `cossim` function applied to the results of the two `tfidf` functions
# COMMAND ----------
# ANSWER
def cosineSimilarity(string1, string2, idfsDictionary):
""" Compute cosine similarity between two strings
Args:
string1 (str): first string
string2 (str): second string
idfsDictionary (dictionary): a dictionary of IDF values
Returns:
cossim: cosine similarity value
"""
w1 = tfidf(tokenize(string1), idfsDictionary)
w2 = tfidf(tokenize(string2), idfsDictionary)
return cossim(w1, w2)
cossimAdobe = cosineSimilarity('Adobe Photoshop',
'Adobe Illustrator',
idfsSmallWeights)
print cossimAdobe
# COMMAND ----------
# TEST Implement a cosineSimilarity function (3b)
Test.assertTrue(abs(cossimAdobe - 0.0577243382163) < 0.0000001, 'incorrect cossimAdobe')
# COMMAND ----------
# PRIVATE_TEST Implement a cosineSimilarity function (3b)
Test.assertTrue(abs(cossimAdobe - 0.0577243382163) < 0.0000001, 'incorrect cossimAdobe')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(3c) Perform Entity Resolution**
# MAGIC Now we can finally do some entity resolution!
# MAGIC For *every* product record in the small Google dataset, use your `cosineSimilarity` function to compute its similarity to every record in the small Amazon dataset. Then, build a dictionary mapping `(Google URL, Amazon ID)` tuples to similarity scores between 0 and 1.
# MAGIC We'll do this computation two different ways, first we'll do it without a broadcast variable, and then we'll use a broadcast variable
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC * Create an RDD that is a combination of the small Google and small Amazon datasets that has as elements all pairs of elements (a, b) where a is in self and b is in other. The result will be an RDD of the form: `[ ((Google URL1, Google String1), (Amazon ID1, Amazon String1)), ((Google URL1, Google String1), (Amazon ID2, Amazon String2)), ((Google URL2, Google String2), (Amazon ID1, Amazon String1)), ... ]`
# MAGIC * Define a worker function that given an element from the combination RDD computes the cosineSimlarity for the two records in the element
# MAGIC * Apply the worker function to every element in the RDD
# MAGIC
# MAGIC Now, compute the similarity between Amazon record `b000o24l3q` and Google record `http://www.google.com/base/feeds/snippets/17242822440574356561`.
# COMMAND ----------
# ANSWER
crossSmall = (googleSmall
.cartesian(amazonSmall)
.cache())
def computeSimilarity(record):
""" Compute similarity on a combination record
Args:
record: a pair, (google record, amazon record)
Returns:
pair: a pair, (google URL, amazon ID, cosine similarity value)
"""
googleRec = record[0]
amazonRec = record[1]
googleURL = googleRec[0]
amazonID = amazonRec[0]
googleValue = googleRec[1]
amazonValue = amazonRec[1]
cs = cosineSimilarity(googleValue, amazonValue, idfsSmallWeights)
return (googleURL, amazonID, cs)
similarities = (crossSmall
.map(computeSimilarity)
.cache())
def similar(amazonID, googleURL):
""" Return similarity value
Args:
amazonID: amazon ID
googleURL: google URL
Returns:
similar: cosine similarity value
"""
return (similarities
.filter(lambda record: (record[0] == googleURL and record[1] == amazonID))
.collect()[0][2])
similarityAmazonGoogle = similar('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561')
print 'Requested similarity is %s.' % similarityAmazonGoogle
# COMMAND ----------
# TEST Perform Entity Resolution (3c)
Test.assertTrue(abs(similarityAmazonGoogle - 0.000303171940451) < 0.0000001,
'incorrect similarityAmazonGoogle')
# COMMAND ----------
# PRIVATE_TEST Perform Entity Resolution (3c)
Test.assertTrue(abs(similarityAmazonGoogle - 0.000303171940451) < 0.0000001,
'incorrect similarityAmazonGoogle')
similarityAnother = similar('b000o24l3q', 'http://www.google.com/base/feeds/snippets/18274317756231697680')
Test.assertTrue(abs(similarityAnother - 0.093899589276) < 0.0000001, 'incorrect another similarity test')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(3d) Perform Entity Resolution with Broadcast Variables**
# MAGIC The solution in (3c) works well for small datasets, but it requires Spark to (automatically) send the `idfsSmallWeights` variable to all the workers. If we didn't `cache()` similarities, then it might have to be recreated if we run `similar()` multiple times. This would cause Spark to send `idfsSmallWeights` every time.
# MAGIC
# MAGIC Instead, we can use a broadcast variable - we define the broadcast variable in the driver and then we can refer to it in each worker. Spark saves the broadcast variable at each worker, so it is only sent once.
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC * Define a `computeSimilarityBroadcast` function that given an element from the combination RDD computes the cosine simlarity for the two records in the element. This will be the same as the worker function `computeSimilarity` in (3c) except that it uses a broadcast variable.
# MAGIC * Apply the worker function to every element in the RDD
# MAGIC
# MAGIC Again, compute the similarity between Amazon record `b000o24l3q` and Google record `http://www.google.com/base/feeds/snippets/17242822440574356561`.
# COMMAND ----------
# ANSWER
def computeSimilarityBroadcast(record):
""" Compute similarity on a combination record, using Broadcast variable
Args:
record: a pair, (google record, amazon record)
Returns:
pair: a pair, (google URL, amazon ID, cosine similarity value)
"""
googleRec = record[0]
amazonRec = record[1]
googleURL = googleRec[0]
amazonID = amazonRec[0]
googleValue = googleRec[1]
amazonValue = amazonRec[1]
cs = cosineSimilarity(googleValue, amazonValue, idfsSmallBroadcast.value)
return (googleURL, amazonID, cs)
idfsSmallBroadcast = sc.broadcast(idfsSmallWeights)
similaritiesBroadcast = (crossSmall
.map(computeSimilarityBroadcast)
.cache())
def similarBroadcast(amazonID, googleURL):
""" Return similarity value, computed using Broadcast variable
Args:
amazonID: amazon ID
googleURL: google URL
Returns:
similar: cosine similarity value
"""
return (similaritiesBroadcast
.filter(lambda record: (record[0] == googleURL and record[1] == amazonID))
.collect()[0][2])
similarityAmazonGoogleBroadcast = similarBroadcast('b000o24l3q', 'http://www.google.com/base/feeds/snippets/17242822440574356561')
print 'Requested similarity is %s.' % similarityAmazonGoogleBroadcast
# COMMAND ----------
# TEST Perform Entity Resolution with Broadcast Variables (3d)
from pyspark import Broadcast
Test.assertTrue(isinstance(idfsSmallBroadcast, Broadcast), 'incorrect idfsSmallBroadcast')
Test.assertEquals(len(idfsSmallBroadcast.value), 4772, 'incorrect idfsSmallBroadcast value')
Test.assertTrue(abs(similarityAmazonGoogleBroadcast - 0.000303171940451) < 0.0000001,
'incorrect similarityAmazonGoogle')
# COMMAND ----------
# PRIVATE_TEST Perform Entity Resolution with Broadcast Variables (3d)
from pyspark import Broadcast
Test.assertTrue(isinstance(idfsSmallBroadcast, Broadcast), 'incorrect idfsSmallBroadcast')
Test.assertEquals(len(idfsSmallBroadcast.value), 4772, 'incorrect idfsSmallBroadcast value')
Test.assertTrue(abs(similarityAmazonGoogleBroadcast - 0.000303171940451) < 0.0000001,
'incorrect similarityAmazonGoogle')
similarityAnotherBroadcast = similarBroadcast('b000o24l3q', 'http://www.google.com/base/feeds/snippets/18274317756231697680')
Test.assertTrue(abs(similarityAnotherBroadcast - 0.093899589276) < 0.0000001,
'incorrect another similarity test')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(3e) Perform a Gold Standard evaluation**
# MAGIC
# MAGIC First, we'll load the "gold standard" data and use it to answer several questions. We read and parse the Gold Standard data, where the format of each line is "Amazon Product ID","Google URL". The resulting RDD has elements of the form ("AmazonID GoogleURL", 'gold')
# COMMAND ----------
GOLDFILE_PATTERN = '^(.+),(.+)'
# Parse each line of a data file useing the specified regular expression pattern
def parse_goldfile_line(goldfile_line):
""" Parse a line from the 'golden standard' data file
Args:
goldfile_line: a line of data
Returns:
pair: ((key, 'gold', 1 if successful or else 0))
"""
match = re.search(GOLDFILE_PATTERN, goldfile_line)
if match is None:
print 'Invalid goldfile line: %s' % goldfile_line
return (goldfile_line, -1)
elif match.group(1) == '"idAmazon"':
print 'Header datafile line: %s' % goldfile_line
return (goldfile_line, 0)
else:
key = '%s %s' % (removeQuotes(match.group(1)), removeQuotes(match.group(2)))
return ((key, 'gold'), 1)
goldfile = os.path.join(baseDir, inputPath, GOLD_STANDARD_PATH)
gsRaw = (sc
.textFile(goldfile)
.map(parse_goldfile_line)
.cache())
gsFailed = (gsRaw
.filter(lambda s: s[1] == -1)
.map(lambda s: s[0]))
for line in gsFailed.take(10):
print 'Invalid goldfile line: %s' % line
goldStandard = (gsRaw
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (gsRaw.count(),
goldStandard.count(),
gsFailed.count())
assert (gsFailed.count() == 0)
assert (gsRaw.count() == (goldStandard.count() + 1))
# COMMAND ----------
# MAGIC %md
# MAGIC #### Using the "gold standard" data we can answer the following questions:
# MAGIC
# MAGIC * How many true duplicate pairs are there in the small datasets?
# MAGIC * What is the average similarity score for true duplicates?
# MAGIC * What about for non-duplicates?
# MAGIC The steps you should perform are:
# MAGIC * Create a new `sims` RDD from the `similaritiesBroadcast` RDD, where each element consists of a pair of the form ("AmazonID GoogleURL", cosineSimilarityScore). An example entry from `sims` is: ('b000bi7uqs http://www.google.com/base/feeds/snippets/18403148885652932189', 0.40202896125621296)
# MAGIC * Combine the `sims` RDD with the `goldStandard` RDD by creating a new `trueDupsRDD` RDD that has the just the cosine similarity scores for those "AmazonID GoogleURL" pairs that appear in both the `sims` RDD and `goldStandard` RDD. Hint: you can do this using the join() transformation.
# MAGIC * Count the number of true duplicate pairs in the `trueDupsRDD` dataset
# MAGIC * Compute the average similarity score for true duplicates in the `trueDupsRDD` datasets. Remember to use `float` for calculation
# MAGIC * Create a new `nonDupsRDD` RDD that has the just the cosine similarity scores for those "AmazonID GoogleURL" pairs from the `similaritiesBroadcast` RDD that **do not** appear in both the *sims* RDD and gold standard RDD.
# MAGIC * Compute the average similarity score for non-duplicates in the last datasets. Remember to use `float` for calculation
# COMMAND ----------
# ANSWER
sims = similaritiesBroadcast.map(lambda x: ("%s %s" % (x[1], x[0]), x[2]))
trueDupsRDD = (sims
.join(goldStandard)
.map(lambda a: a[1][0]))
trueDupsCount = trueDupsRDD.count()
avgSimDups = float(trueDupsRDD.reduce(lambda a, b: a + b)) / float(trueDupsCount)
nonDupsRDD = (sims
.leftOuterJoin(goldStandard)
.filter(lambda x: (x[1][1] is None))
.map(lambda a: a[1][0]))
avgSimNon = float(nonDupsRDD.reduce(lambda a, b: a + b)) / float(sims.count() - trueDupsCount)
print 'There are %s true duplicates.' % trueDupsCount
print 'The average similarity of true duplicates is %s.' % avgSimDups
print 'And for non duplicates, it is %s.' % avgSimNon
# COMMAND ----------
# TEST Perform a Gold Standard evaluation (3e)
Test.assertEquals(trueDupsCount, 146, 'incorrect trueDupsCount')
Test.assertTrue(abs(avgSimDups - 0.264332573435) < 0.0000001, 'incorrect avgSimDups')
Test.assertTrue(abs(avgSimNon - 0.00123476304656) < 0.0000001, 'incorrect avgSimNon')
# COMMAND ----------
# PRIVATE_TEST Perform a Gold Standard evaluation (3e)
Test.assertEquals(trueDupsCount, 146, 'incorrect trueDupsCount')
Test.assertTrue(abs(avgSimDups - 0.264332573435) < 0.0000001, 'incorrect avgSimDups')
Test.assertTrue(abs(avgSimNon - 0.00123476304656) < 0.0000001, 'incorrect avgSimNon')
# COMMAND ----------
# ANSWER
# Quiz question:
# Based on the answers to the questions in part (3e), is cosine similarity doing a good job, qualitatively speaking, of identifying duplicates?
# (*) Yes
# ( ) No
# *answer*: Cosine similarity looks useful, because duplicates on average are 250X more similar than non-duplicates. As long as variance isn't too high, that's a good signal.
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 4: Scalable ER**
# MAGIC In the previous parts, we built a text similarity function and used it for small scale entity resolution. Our implementation is limited by its quadratic run time complexity, and is not practical for even modestly sized datasets. In this part, we will implement a more scalable algorithm and use it to do entity resolution on the full dataset.
# MAGIC
# MAGIC #### Inverted Indices
# MAGIC To improve our ER algorithm from the earlier parts, we should begin by analyzing its running time. In particular, the algorithm above is quadratic in two ways. First, we did a lot of redundant computation of tokens and weights, since each record was reprocessed every time it was compared. Second, we made quadratically many token comparisons between records.
# MAGIC
# MAGIC The first source of quadratic overhead can be eliminated with precomputation and look-up tables, but the second source is a little more tricky. In the worst case, every token in every record in one dataset exists in every record in the other dataset, and therefore every token makes a non-zero contribution to the cosine similarity. In this case, token comparison is unavoidably quadratic.
# MAGIC
# MAGIC But in reality most records have nothing (or very little) in common. Moreover, it is typical for a record in one dataset to have at most one duplicate record in the other dataset (this is the case assuming each dataset has been de-duplicated against itself). In this case, the output is linear in the size of the input and we can hope to achieve linear running time.
# MAGIC
# MAGIC An [**inverted index**](https://en.wikipedia.org/wiki/Inverted_index) is a data structure that will allow us to avoid making quadratically many token comparisons. It maps each token in the dataset to the list of documents that contain the token. So, instead of comparing, record by record, each token to every other token to see if they match, we will use inverted indices to *look up* records that match on a particular token.
# MAGIC
# MAGIC > **Note on terminology**: In text search, a *forward* index maps documents in a dataset to the tokens they contain. An *inverted* index supports the inverse mapping.
# MAGIC
# MAGIC > **Note**: For this section, use the complete Google and Amazon datasets, not the samples
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4a) Tokenize the full dataset**
# MAGIC Tokenize each of the two full datasets for Google and Amazon.
# COMMAND ----------
# ANSWER
amazonFullRecToToken = amazon.map(lambda s: (s[0], tokenize(s[1])))
googleFullRecToToken = google.map(lambda s: (s[0], tokenize(s[1])))
print 'Amazon full dataset is %s products, Google full dataset is %s products' % (amazonFullRecToToken.count(),
googleFullRecToToken.count())
# COMMAND ----------
# TEST Tokenize the full dataset (4a)
Test.assertEquals(amazonFullRecToToken.count(), 1363, 'incorrect amazonFullRecToToken.count()')
Test.assertEquals(googleFullRecToToken.count(), 3226, 'incorrect googleFullRecToToken.count()')
# COMMAND ----------
# PRIVATE_TEST Tokenize the full dataset (4a)
Test.assertEquals(amazonFullRecToToken.count(), 1363, 'incorrect amazonFullRecToToken.count()')
Test.assertEquals(googleFullRecToToken.count(), 3226, 'incorrect googleFullRecToToken.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4b) Compute IDFs and TF-IDFs for the full datasets**
# MAGIC
# MAGIC We will reuse your code from above to compute IDF weights for the complete combined datasets.
# MAGIC The steps you should perform are:
# MAGIC * Create a new `fullCorpusRDD` that contains the tokens from the full Amazon and Google datasets.
# MAGIC * Apply your `idfs` function to the `fullCorpusRDD`
# MAGIC * Create a broadcast variable containing a dictionary of the IDF weights for the full dataset.
# MAGIC * For each of the Amazon and Google full datasets, create weight RDDs that map IDs/URLs to TF-IDF weighted token vectors.
# COMMAND ----------
# ANSWER
fullCorpusRDD = amazonFullRecToToken.union(googleFullRecToToken)
idfsFull = idfs(fullCorpusRDD)
idfsFullCount = idfsFull.count()
print 'There are %s unique tokens in the full datasets.' % idfsFullCount
# Recompute IDFs for full dataset
idfsFullWeights = idfsFull.collectAsMap()
idfsFullBroadcast = sc.broadcast(idfsFullWeights)
# Pre-compute TF-IDF weights. Build mappings from record ID weight vector.
amazonWeightsRDD = amazonFullRecToToken.map(lambda x: (x[0], tfidf(x[1], idfsFullBroadcast.value)))
googleWeightsRDD = googleFullRecToToken.map(lambda x: (x[0], tfidf(x[1], idfsFullBroadcast.value)))
print 'There are %s Amazon weights and %s Google weights.' % (amazonWeightsRDD.count(),
googleWeightsRDD.count())
# COMMAND ----------
# TEST Compute IDFs and TF-IDFs for the full datasets (4b)
Test.assertEquals(idfsFullCount, 17078, 'incorrect idfsFullCount')
Test.assertEquals(amazonWeightsRDD.count(), 1363, 'incorrect amazonWeightsRDD.count()')
Test.assertEquals(googleWeightsRDD.count(), 3226, 'incorrect googleWeightsRDD.count()')
# COMMAND ----------
# PRIVATE_TEST Compute IDFs and TF-IDFs for the full datasets (4b)
Test.assertEquals(idfsFullCount, 17078, 'incorrect idfsFullCount')
Test.assertEquals(amazonWeightsRDD.count(), 1363, 'incorrect amazonWeightsRDD.count()')
Test.assertEquals(googleWeightsRDD.count(), 3226, 'incorrect googleWeightsRDD.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4c) Compute Norms for the weights from the full datasets**
# MAGIC
# MAGIC We will reuse your code from above to compute norms of the IDF weights for the complete combined dataset.
# MAGIC The steps you should perform are:
# MAGIC * Create two collections, one for each of the full Amazon and Google datasets, where IDs/URLs map to the norm of the associated TF-IDF weighted token vectors.
# MAGIC * Convert each collection into a broadcast variable, containing a dictionary of the norm of IDF weights for the full dataset
# COMMAND ----------
# ANSWER
amazonNorms = amazonWeightsRDD.map(lambda x: (x[0], norm(x[1]))).collectAsMap()
amazonNormsBroadcast = sc.broadcast(amazonNorms)
googleNorms = googleWeightsRDD.map(lambda x: (x[0], norm(x[1]))).collectAsMap()
googleNormsBroadcast = sc.broadcast(googleNorms)
print 'There are %s Amazon norms and %s Google norms.' % (len(amazonNorms), len(googleNorms))
# COMMAND ----------
# TEST Compute Norms for the weights from the full datasets (4c)
Test.assertTrue(isinstance(amazonNormsBroadcast, Broadcast), 'incorrect amazonNormsBroadcast')
Test.assertEquals(len(amazonNormsBroadcast.value), 1363, 'incorrect amazonNormsBroadcast.value')
Test.assertTrue(isinstance(googleNormsBroadcast, Broadcast), 'incorrect googleNormsBroadcast')
Test.assertEquals(len(googleNormsBroadcast.value), 3226, 'incorrect googleNormsBroadcast.value')
# COMMAND ----------
# PRIVATE_TEST Compute Norms for the weights from the full datasets (4c)
Test.assertTrue(isinstance(amazonNormsBroadcast, Broadcast), 'incorrect amazonNormsBroadcast')
Test.assertEquals(len(amazonNormsBroadcast.value), 1363, 'incorrect amazonNormsBroadcast.value')
Test.assertTrue(isinstance(googleNormsBroadcast, Broadcast), 'incorrect googleNormsBroadcast')
Test.assertEquals(len(googleNormsBroadcast.value), 3226, 'incorrect googleNormsBroadcast.value')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4d) Create inverted indicies from the full datasets**
# MAGIC
# MAGIC Build inverted indices of both data sources.
# MAGIC The steps you should perform are:
# MAGIC * Create an invert function that given a pair of (ID/URL, TF-IDF weighted token vector), returns a list of pairs of (token, ID/URL). Recall that the TF-IDF weighted token vector is a Python dictionary with keys that are tokens and values that are weights.
# MAGIC * Use your invert function to convert the full Amazon and Google TF-IDF weighted token vector datasets into two RDDs where each element is a pair of a token and an ID/URL that contain that token. These are inverted indicies.
# COMMAND ----------
# ANSWER
def invert(record):
""" Invert (ID, tokens) to a list of (token, ID)
Args:
record: a pair, (ID, token vector)
Returns:
pairs: a list of pairs of token to ID
"""
value = record[0]
keys = record[1].keys()
pairs = []
for key in keys:
pairs.append((key, value))
return (pairs)
amazonInvPairsRDD = (amazonWeightsRDD
.flatMap(invert)
.cache())
googleInvPairsRDD = (googleWeightsRDD
.flatMap(invert)
.cache())
print 'There are %s Amazon inverted pairs and %s Google inverted pairs.' % (amazonInvPairsRDD.count(),
googleInvPairsRDD.count())
# COMMAND ----------
# TEST Create inverted indicies from the full datasets (4d)
invertedPair = invert((1, {'foo': 2}))
Test.assertEquals(invertedPair[0][1], 1, 'incorrect invert result')
Test.assertEquals(amazonInvPairsRDD.count(), 111387, 'incorrect amazonInvPairsRDD.count()')
Test.assertEquals(googleInvPairsRDD.count(), 77678, 'incorrect googleInvPairsRDD.count()')
# COMMAND ----------
# PRIVATE_TEST Create inverted indicies from the full datasets (4d)
invertedPair = invert((1, {'foo': 2}))
Test.assertEquals(invertedPair[0][1], 1, 'incorrect invert result')
Test.assertEquals(amazonInvPairsRDD.count(), 111387, 'incorrect amazonInvPairsRDD.count()')
Test.assertEquals(googleInvPairsRDD.count(), 77678, 'incorrect googleInvPairsRDD.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4e) Identify common tokens from the full dataset**
# MAGIC
# MAGIC We are now in position to efficiently perform ER on the full datasets. Implement the following algorithm to build an RDD that maps a pair of (ID, URL) to a list of tokens they share in common:
# MAGIC * Using the two inverted indicies (RDDs where each element is a pair of a token and an ID or URL that contains that token), create a new RDD that contains only tokens that appear in both datasets. This will yield an RDD of pairs of (token, iterable(ID, URL)).
# MAGIC * We need a mapping from (ID, URL) to token, so create a function that will swap the elements of the RDD you just created to create this new RDD consisting of ((ID, URL), token) pairs.
# MAGIC * Finally, create an RDD consisting of pairs mapping (ID, URL) to all the tokens the pair shares in common
# COMMAND ----------
# ANSWER
def swap(record):
""" Swap (token, (ID, URL)) to ((ID, URL), token)
Args:
record: a pair, (token, (ID, URL))
Returns:
pair: ((ID, URL), token)
"""
token = record[0]
keys = (record[1][0], record[1][1])
return (keys, token)
commonTokens = (amazonInvPairsRDD.join(googleInvPairsRDD)
.map(swap)
.groupByKey()
.map(lambda rec: (rec[0], list(rec[1])))
.cache())
print 'Found %d common tokens' % commonTokens.count()
# COMMAND ----------
# TEST Identify common tokens from the full dataset (4e)
Test.assertEquals(commonTokens.count(), 2441100, 'incorrect commonTokens.count()')
# COMMAND ----------
# PRIVATE_TEST Identify common tokens from the full dataset (4e)
Test.assertEquals(commonTokens.count(), 2441100, 'incorrect commonTokens.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(4f) Identify common tokens from the full dataset**
# MAGIC
# MAGIC Use the data structures from parts **(4a)** and **(4e)** to build a dictionary to map record pairs to cosine similarity scores.
# MAGIC The steps you should perform are:
# MAGIC * Create two broadcast dictionaries from the amazonWeights and googleWeights RDDs
# MAGIC * Create a `fastCosinesSimilarity` function that takes in a record consisting of the pair ((Amazon ID, Google URL), tokens list) and computes the sum for each of the tokens in the token list of the products of the Amazon weight for the token times the Google weight for the token. The sum should then be divided by the norm for the Google URL and then divided by the norm for the Amazon ID. The function should return this value in a pair with the key being the (Amazon ID, Google URL). *Make sure you use broadcast variables you created for both the weights and norms*
# MAGIC * Apply your `fastCosinesSimilarity` function to the common tokens from the full dataset
# COMMAND ----------
# ANSWER
amazonWeightsBroadcast = sc.broadcast(amazonWeightsRDD.collectAsMap())
googleWeightsBroadcast = sc.broadcast(googleWeightsRDD.collectAsMap())
def fastCosineSimilarity(record):
""" Compute Cosine Similarity using Broadcast variables
Args:
record: ((ID, URL), token)
Returns:
pair: ((ID, URL), cosine similarity value)
"""
amazonRec = record[0][0]
googleRec = record[0][1]
tokens = record[1]
s = sum([amazonWeightsBroadcast.value[amazonRec][t] * googleWeightsBroadcast.value[googleRec][t]
for t in tokens])
value = s / googleNormsBroadcast.value[googleRec] / amazonNormsBroadcast.value[amazonRec]
key = (amazonRec, googleRec)
return (key, value)
similaritiesFullRDD = (commonTokens
.map(fastCosineSimilarity)
.cache())
print similaritiesFullRDD.count()
# COMMAND ----------
# TEST Identify common tokens from the full dataset (4f)
similarityTest = similaritiesFullRDD.filter(lambda ((aID, gURL), cs): aID == 'b00005lzly' and gURL == 'http://www.google.com/base/feeds/snippets/13823221823254120257').collect()
Test.assertEquals(len(similarityTest), 1, 'incorrect len(similarityTest)')
Test.assertTrue(abs(similarityTest[0][1] - 4.286548414e-06) < 0.000000000001, 'incorrect similarityTest fastCosineSimilarity')
Test.assertEquals(similaritiesFullRDD.count(), 2441100, 'incorrect similaritiesFullRDD.count()')
# COMMAND ----------
# PRIVATE_TEST Identify common tokens from the full dataset (4f)
similarityTest = similaritiesFullRDD.filter(lambda ((aID, gURL), cs): aID == 'b00005lzly' and gURL == 'http://www.google.com/base/feeds/snippets/13823221823254120257').collect()
Test.assertEquals(len(similarityTest), 1, 'incorrect len(similarityTest)')
Test.assertTrue(abs(similarityTest[0][1] - 4.286548414e-06) < 0.000000000001, 'incorrect similarityTest fastCosineSimilarity')
Test.assertEquals(similaritiesFullRDD.count(), 2441100, 'incorrect similaritiesFullRDD.count()')
# COMMAND ----------
# MAGIC %md
# MAGIC #### **Part 5: Analysis**
# MAGIC
# MAGIC Now we have an authoritative list of record-pair similarities, but we need a way to use those similarities to decide if two records are duplicates or not. The simplest approach is to pick a **threshold**. Pairs whose similarity is above the threshold are declared duplicates, and pairs below the threshold are declared distinct.
# MAGIC
# MAGIC To decide where to set the threshold we need to understand what kind of errors result at different levels. If we set the threshold too low, we get more **false positives**, that is, record-pairs we say are duplicates that in reality are not. If we set the threshold too high, we get more **false negatives**, that is, record-pairs that really are duplicates but that we miss.
# MAGIC
# MAGIC ER algorithms are evaluated by the common metrics of information retrieval and search called **precision** and **recall**. Precision asks of all the record-pairs marked duplicates, what fraction are true duplicates? Recall asks of all the true duplicates in the data, what fraction did we successfully find? As with false positives and false negatives, there is a trade-off between precision and recall. A third metric, called **F-measure**, takes the harmonic mean of precision and recall to measure overall goodness in a single value:
# MAGIC \\[ Fmeasure = 2 \frac{precision * recall}{precision + recall} \\]
# MAGIC
# MAGIC > **Note**: In this part, we use the "gold standard" mapping from the included file to look up true duplicates, and the results of Part 4.
# MAGIC
# MAGIC > **Note**: In this part, you will not be writing any code. We've written all of the code for you. Run each cell and then answer the quiz questions on Studio.
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(5a) Counting True Positives, False Positives, and False Negatives**
# MAGIC
# MAGIC We need functions that count True Positives (true duplicates above the threshold), and False Positives and False Negatives:
# MAGIC * We start with creating the `simsFullRDD` from our `similaritiesFullRDD` that consists of a pair of ((Amazon ID, Google URL), simlarity score)
# MAGIC * From this RDD, we create an RDD consisting of only the similarity scores
# MAGIC * To look up the similarity scores for true duplicates, we perform a left outer join using the `goldStandard` RDD and `simsFullRDD` and extract the
# COMMAND ----------
# Create an RDD of ((Amazon ID, Google URL), similarity score)
simsFullRDD = similaritiesFullRDD.map(lambda x: ("%s %s" % (x[0][0], x[0][1]), x[1]))
assert (simsFullRDD.count() == 2441100)
# Create an RDD of just the similarity scores
simsFullValuesRDD = (simsFullRDD
.map(lambda x: x[1])
.cache())
assert (simsFullValuesRDD.count() == 2441100)
# Look up all similarity scores for true duplicates
# This helper function will return the similarity score for records that are in the gold standard and the simsFullRDD (True positives), and will return 0 for records that are in the gold standard but not in simsFullRDD (False Negatives).
def gs_value(record):
if (record[1][1] is None):
return 0
else:
return record[1][1]
# Join the gold standard and simsFullRDD, and then extract the similarities scores using the helper function
trueDupSimsRDD = (goldStandard
.leftOuterJoin(simsFullRDD)
.map(gs_value)
.cache())
print 'There are %s true duplicates.' % trueDupSimsRDD.count()
assert(trueDupSimsRDD.count() == 1300)
# COMMAND ----------
# MAGIC %md
# MAGIC The next step is to pick a threshold between 0 and 1 for the count of True Positives (true duplicates above the threshold). However, we would like to explore many different thresholds.
# MAGIC
# MAGIC To do this, we divide the space of thresholds into 100 bins, and take the following actions:
# MAGIC * We use Spark Accumulators to implement our counting function. We define a custom accumulator type, `VectorAccumulatorParam`, along with functions to initialize the accumulator's vector to zero, and to add two vectors. Note that we have to use the += operator because you can only add to an accumulator.
# MAGIC * We create a helper function to create a list with one entry (bit) set to a value and all others set to 0.
# MAGIC * We create 101 bins for the 100 threshold values between 0 and 1.
# MAGIC * Now, for each similarity score, we can compute the false positives. We do this by adding each similarity score to the appropriate bin of the vector. Then we remove true positives from the vector by using the gold standard data.
# MAGIC * We define functions for computing false positive and negative and true positives, for a given threshold.
# COMMAND ----------
from pyspark.accumulators import AccumulatorParam
class VectorAccumulatorParam(AccumulatorParam):
# Initialize the VectorAccumulator to 0
def zero(self, value):
return [0] * len(value)
# Add two VectorAccumulator variables
def addInPlace(self, val1, val2):
for i in xrange(len(val1)):
val1[i] += val2[i]
return val1
# Return a list with entry x set to value and all other entries set to 0
def set_bit(x, value, length):
bits = []
for y in xrange(length):
if (x == y):
bits.append(value)
else:
bits.append(0)
return bits
# Pre-bin counts of false positives for different threshold ranges
BINS = 101
nthresholds = 100
def bin(similarity):
return int(similarity * nthresholds)
# fpCounts[i] = number of entries (possible false positives) where bin(similarity) == i
zeros = [0] * BINS
fpCounts = sc.accumulator(zeros, VectorAccumulatorParam())
def add_element(score):
global fpCounts
b = bin(score)
fpCounts += set_bit(b, 1, BINS)
simsFullValuesRDD.foreach(add_element)
# Remove true positives from FP counts
def sub_element(score):
global fpCounts
b = bin(score)
fpCounts += set_bit(b, -1, BINS)
trueDupSimsRDD.foreach(sub_element)
def falsepos(threshold):
fpList = fpCounts.value
return sum([fpList[b] for b in range(0, BINS) if float(b) / nthresholds >= threshold])
def falseneg(threshold):
return trueDupSimsRDD.filter(lambda x: x < threshold).count()
def truepos(threshold):
return trueDupSimsRDD.count() - falsenegDict[threshold]
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(5b) Precision, Recall, and F-measures**
# MAGIC We define functions so that we can compute the [Precision](https://en.wikipedia.org/wiki/Precision_and_recall), [Recall](https://en.wikipedia.org/wiki/Precision_and_recall), and [F-measure](https://en.wikipedia.org/wiki/Precision_and_recall#F-measure) as a function of threshold value:
# MAGIC * Precision = true-positives / (true-positives + false-positives)
# MAGIC * Recall = true-positives / (true-positives + false-negatives)
# MAGIC * F-measure = 2 x Recall x Precision / (Recall + Precision)
# COMMAND ----------
# Precision = true-positives / (true-positives + false-positives)
# Recall = true-positives / (true-positives + false-negatives)
# F-measure = 2 x Recall x Precision / (Recall + Precision)
def precision(threshold):
tp = trueposDict[threshold]
return float(tp) / (tp + falseposDict[threshold])
def recall(threshold):
tp = trueposDict[threshold]
return float(tp) / (tp + falsenegDict[threshold])
def fmeasure(threshold):
r = recall(threshold)
p = precision(threshold)
return 2 * r * p / (r + p)
# COMMAND ----------
# MAGIC %md
# MAGIC #### **(5c) Line Plots**
# MAGIC We can make line plots of precision, recall, and F-measure as a function of threshold value, for thresholds between 0.0 and 1.0. You can change `nthresholds` (above in part **(5a)**) to change the threshold values to plot.
# COMMAND ----------
thresholds = [float(n) / nthresholds for n in range(0, nthresholds)]
falseposDict = dict([(t, falsepos(t)) for t in thresholds])
falsenegDict = dict([(t, falseneg(t)) for t in thresholds])
trueposDict = dict([(t, truepos(t)) for t in thresholds])
precisions = [precision(t) for t in thresholds]
recalls = [recall(t) for t in thresholds]
fmeasures = [fmeasure(t) for t in thresholds]
print precisions[0], fmeasures[0]
assert (abs(precisions[0] - 0.000532546802671) < 0.0000001)
assert (abs(fmeasures[0] - 0.00106452669505) < 0.0000001)
fig = plt.figure()
plt.plot(thresholds, precisions)
plt.plot(thresholds, recalls)
plt.plot(thresholds, fmeasures)
plt.legend(['Precision', 'Recall', 'F-measure'])
display(fig)
pass
# COMMAND ----------
# Create a DataFrame and visualize using display()
graph = [(t, precision(t), recall(t),fmeasure(t)) for t in thresholds]
graphRDD = sc.parallelize(graph)
graphRow = graphRDD.map(lambda (t, x, y, z): Row(threshold=t, precision=x, recall=y, fmeasure=z))
graphDF = sqlContext.createDataFrame(graphRow)
display(graphDF)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Discussion
# MAGIC
# MAGIC State-of-the-art tools can get an F-measure of about 60% on this dataset. In this lab exercise, our best F-measure is closer to 40%. Look at some examples of errors (both False Positives and False Negatives) and think about what went wrong.
# MAGIC
# MAGIC #### There are several ways we might improve our simple classifier, including:
# MAGIC * Using additional attributes
# MAGIC * Performing better featurization of our textual data (e.g., stemming, n-grams, etc.)
# MAGIC * Using different similarity functions | unlicense |
wilvk/ansible | lib/ansible/modules/cloud/amazon/ecs_service_facts.py | 14 | 8170 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_service_facts
short_description: list or describe services in ecs
notes:
- for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
description:
- Lists or describes services in ecs.
version_added: "2.1"
author:
- "Mark Chance (@java1guy)"
- "Darek Kaczynski (@kaczynskid)"
requirements: [ json, botocore, boto3 ]
options:
details:
description:
- Set this to true if you want detailed information about the services.
required: false
default: 'false'
choices: ['true', 'false']
cluster:
description:
- The cluster ARNS in which to list the services.
required: false
default: 'default'
service:
description:
- The service to get details for (required if details is true)
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
service: console-test-service
details: true
# Basic listing example
- ecs_service_facts:
cluster: test-cluster
'''
RETURN = '''
services:
description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
returned: success
type: complex
contains:
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
returned: always
type: string
desiredCount:
description: The desired number of instantiations of the task definition to keep running on the service.
returned: always
type: int
loadBalancers:
description: A list of load balancer objects
returned: always
type: complex
contains:
loadBalancerName:
description: the name
returned: always
type: string
containerName:
description: The name of the container to associate with the load balancer.
returned: always
type: string
containerPort:
description: The port on the container to associate with the load balancer.
returned: always
type: int
pendingCount:
description: The number of tasks in the cluster that are in the PENDING state.
returned: always
type: int
runningCount:
description: The number of tasks in the cluster that are in the RUNNING state.
returned: always
type: int
serviceArn:
description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
returned: always
type: string
serviceName:
description: A user-generated string used to identify the service
returned: always
type: string
status:
description: The valid values are ACTIVE, DRAINING, or INACTIVE.
returned: always
type: string
taskDefinition:
description: The ARN of a task definition to use for tasks in the service.
returned: always
type: string
deployments:
description: list of service deployments
returned: always
type: list of complex
events:
description: lost of service events
returned: always
type: list of complex
''' # NOQA
try:
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
class EcsServiceManager:
"""Handles ECS Services"""
def __init__(self, module):
self.module = module
# self.ecs = boto3.client('ecs')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
# def list_clusters(self):
# return self.client.list_clusters()
# {'failures': [],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
# 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default',
# 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]}
# {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
# 'clusters': []}
def list_services(self, cluster):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
response = self.ecs.list_services(**fn_args)
relevant_response = dict(services=response['serviceArns'])
return relevant_response
def describe_services(self, cluster, services):
fn_args = dict()
if cluster and cluster is not None:
fn_args['cluster'] = cluster
fn_args['services'] = services.split(",")
response = self.ecs.describe_services(**fn_args)
relevant_response = dict(services=map(self.extract_service_from, response['services']))
if 'failures' in response and len(response['failures']) > 0:
relevant_response['services_not_running'] = response['failures']
return relevant_response
def extract_service_from(self, service):
# some fields are datetime which is not JSON serializable
# make them strings
if 'deployments' in service:
for d in service['deployments']:
if 'createdAt' in d:
d['createdAt'] = str(d['createdAt'])
if 'updatedAt' in d:
d['updatedAt'] = str(d['updatedAt'])
if 'events' in service:
for e in service['events']:
if 'createdAt' in e:
e['createdAt'] = str(e['createdAt'])
return service
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
details=dict(required=False, type='bool', default=False),
cluster=dict(required=False, type='str'),
service=dict(required=False, type='str')
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
show_details = module.params.get('details', False)
task_mgr = EcsServiceManager(module)
if show_details:
if 'service' not in module.params or not module.params['service']:
module.fail_json(msg="service must be specified for ecs_service_facts")
ecs_facts = task_mgr.describe_services(module.params['cluster'], module.params['service'])
else:
ecs_facts = task_mgr.list_services(module.params['cluster'])
ecs_facts_result = dict(changed=False, ansible_facts=ecs_facts)
module.exit_json(**ecs_facts_result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Muxi-X/muxi_site | muxiwebsite/api/signup.py | 2 | 1117 | # -*- coding: utf-8 -*-
"""
signup.py
~~~~~~~~~
木犀官网注册API
"""
from flask import jsonify, g, request
from . import api
from muxiwebsite.models import User
from .authentication import auth
from muxiwebsite import db
from werkzeug.security import generate_password_hash
import base64
@api.route('/signup/', methods=['POST'])
def signup():
"""用户注册"""
un = request.get_json().get("username")
email = request.get_json().get("email")
password = request.get_json().get("password")
if User.query.filter_by(username=un).first() is not None:
return jsonify ({}), 401
if User.query.filter_by(email=email).first() is not None:
return jsonify ({}), 402
if un is None or email is None or password is None:
return jsonify ({}), 403
user = User(
username = un,
email = email,
password = base64.b64encode(password),
avatar_url = "http://7xrvvt.com1.z0.glb.clouddn.com/shakedog.gif",
)
db.session.add(user)
db.session.commit()
return jsonify({
"created": user.id ,
}), 200
| mit |
mlucchini/electricitymap | parsers/CA_YT.py | 1 | 4487 | import arrow
from bs4 import BeautifulSoup
import requests
timezone = 'Canada/Pacific'
def fetch_production(country_code='CA-YT', session=None):
"""Requests the last known production mix (in MW) of a given region
Arguments:
country_code -- ignored here, only information for CA-YT is returned
session (optional) -- request session passed in order to re-use an existing session
"""
"""
We are using Yukon Energy's data from
http://www.yukonenergy.ca/energy-in-yukon/electricity-101/current-energy-consumption
Generation in Yukon is done with hydro, diesel oil, and LNG.
There are two companies, Yukon Energy and ATCO aka Yukon Electric aka YECL.
Yukon Energy does most of the generation and feeds into Yukon's grid.
ATCO does operations, billing, and generation in some of the off-grid communities.
See schema of the grid at http://www.atcoelectricyukon.com/About-Us/
Per https://en.wikipedia.org/wiki/Yukon#Municipalities_by_population
of total population 35874 (2016 census), 28238 are in municipalities
that are connected to the grid - that is 78.7%.
Off-grid generation is with diesel generators, this is not reported online as of 2017-06-23
and is not included in this calculation.
Yukon Energy reports only "hydro" and "thermal" generation.
Per http://www.yukonenergy.ca/ask-janet/lng-and-boil-off-gas,
in 2016 the thermal generation was about 50% diesel and 50% LNG.
But since Yukon Energy doesn't break it down on their website,
we return all thermal as "unknown".
Per https://en.wikipedia.org/wiki/List_of_generating_stations_in_Yukon
Yukon Energy operates about 98% of Yukon's hydro capacity, the only exception is
the small 1.3 MW Fish Lake dam operated by ATCO/Yukon Electrical. That's small enough
to not matter, I think.
There is also a small 0.81 MW wind farm, its current generation is not available.
"""
requests_obj = session or requests.session()
url = 'http://www.yukonenergy.ca/consumption/chart_current.php?chart=current&width=420'
response = requests_obj.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
def find_div_by_class(soup_obj, cls):
return soup_obj.find('div', attrs={'class': cls})
def parse_mw(text):
try:
return float(text[:text.index('MW')])
except ValueError:
return 0
# date is specified like "Thursday, June 22, 2017"
source_date = find_div_by_class(soup, 'current_date').text
# time is specified like "11:55 pm" or "2:25 am"
source_time = find_div_by_class(soup, 'current_time').text
datetime_text = '{} {}'.format(source_date, source_time)
datetime_arrow = arrow.get(datetime_text, 'dddd, MMMM D, YYYY h:mm A')
datetime_datetime = arrow.get(datetime_arrow.datetime, timezone).datetime
# generation is specified like "37.69 MW - hydro"
hydro_div = find_div_by_class(soup, 'load_hydro')
hydro_text = hydro_div.div.text
hydro_generation = parse_mw(hydro_text)
hydro_cap_div = find_div_by_class(soup, 'avail_hydro')
if hydro_cap_div:
hydro_cap_text = hydro_cap_div.div.text
hydro_capacity = parse_mw(hydro_cap_text)
else:
# hydro capacity is not provided when thermal is used
hydro_capacity = None
thermal_div = find_div_by_class(soup, 'load_thermal')
if thermal_div.div:
thermal_text = thermal_div.div.text
thermal_generation = parse_mw(thermal_text)
else:
# thermal is not always used and when it's not used, it's not specified in HTML
thermal_generation = 0
data = {
'datetime': datetime_datetime,
'countryCode': country_code,
'production': {
'unknown': thermal_generation,
'hydro': hydro_generation,
# specify some sources that aren't present in Yukon as zero,
# this allows the analyzer to better estimate CO2eq
'coal': 0,
'nuclear': 0,
'geothermal': 0
},
'storage': {},
'source': 'www.yukonenergy.ca'
}
if hydro_capacity:
data.update({
'capacity': {
'hydro': hydro_capacity
}
})
return data
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
| gpl-3.0 |
stamhe/zulip | zproject/test_settings.py | 115 | 1817 | from settings import *
import os
DATABASES["default"] = {"NAME": "zulip_test",
"USER": "zulip_test",
"PASSWORD": LOCAL_DATABASE_PASSWORD,
"HOST": "localhost",
"SCHEMA": "zulip",
"ENGINE": "django.db.backends.postgresql_psycopg2",
"TEST_NAME": "django_zulip_tests",
"OPTIONS": {"connection_factory": TimeTrackingConnection },}
if "TORNADO_SERVER" in os.environ:
TORNADO_SERVER = os.environ["TORNADO_SERVER"]
else:
TORNADO_SERVER = None
# Decrease the get_updates timeout to 1 second.
# This allows CasperJS to proceed quickly to the next test step.
POLL_TIMEOUT = 1000
# Don't use the real message log for tests
EVENT_LOG_DIR = '/tmp/zulip-test-event-log'
# Print our emails rather than sending them
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
# The test suite uses EmailAuthBackend
AUTHENTICATION_BACKENDS += ('zproject.backends.EmailAuthBackend',)
TEST_SUITE = True
RATE_LIMITING = False
# Don't use rabbitmq from the test suite -- the user_profile_ids for
# any generated queue elements won't match those being used by the
# real app.
USING_RABBITMQ = False
# Disable the tutorial because it confuses the client tests.
TUTORIAL_ENABLED = False
# Disable use of memcached for caching
CACHES['database'] = {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'LOCATION': 'zulip-database-test-cache',
'TIMEOUT': 3600,
'CONN_MAX_AGE': 600,
'OPTIONS': {
'MAX_ENTRIES': 100000
}
}
LOGGING['loggers']['zulip.requests']['level'] = 'CRITICAL'
LOGGING['loggers']['zulip.management']['level'] = 'CRITICAL'
CAMO_URI = 'https://external-content.zulipcdn.net/'
CAMO_KEY = 'dummy'
| apache-2.0 |
fusionpig/ansible | lib/ansible/plugins/action/win_template.py | 117 | 1168 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.plugins.action.template import ActionModule as TemplateActionModule
# Even though TemplateActionModule inherits from ActionBase, we still need to
# directly inherit from ActionBase to appease the plugin loader.
class ActionModule(TemplateActionModule, ActionBase):
pass
| gpl-3.0 |
elkingtonmcb/rethinkdb | external/v8_3.30.33.16/tools/testrunner/local/progress.py | 41 | 10716 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import sys
import time
from . import junit_output
ABS_PATH_PREFIX = os.getcwd() + os.sep
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class ProgressIndicator(object):
def __init__(self):
self.runner = None
def Starting(self):
pass
def Done(self):
pass
def AboutToRun(self, test):
pass
def HasRun(self, test, has_unexpected_output):
pass
def PrintFailureHeader(self, test):
if test.suite.IsNegativeTest(test):
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
class SimpleProgressIndicator(ProgressIndicator):
"""Abstract base class for {Verbose,Dots}ProgressIndicator"""
def Starting(self):
print 'Running %i tests' % self.runner.total
def Done(self):
print
for failed in self.runner.failed:
self.PrintFailureHeader(failed)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
if failed.output.HasCrashed():
print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
if failed.output.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.runner.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.runner.failed)
if self.runner.crashed > 0:
print "=== %i tests CRASHED" % self.runner.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, test):
print 'Starting %s...' % test.GetLabel()
sys.stdout.flush()
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
if test.output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def HasRun(self, test, has_unexpected_output):
total = self.runner.succeeded + len(self.runner.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if has_unexpected_output:
if test.output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif test.output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
"""Abstract base class for {Color,Monochrome}ProgressIndicator"""
def __init__(self, templates):
super(CompactProgressIndicator, self).__init__()
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Done(self):
self.PrintProgress('Done')
print "" # Line break.
def AboutToRun(self, test):
self.PrintProgress(test.GetLabel())
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(test)
stdout = test.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = test.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
if test.output.HasCrashed():
print "exit code: %d" % test.output.exit_code
print "--- CRASHED ---"
if test.output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, string, length):
if length and (len(string) > (length - 3)):
return string[:(length - 3)] + "..."
else:
return string
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.runner.succeeded,
'remaining': (((self.runner.total - self.runner.remaining) * 100) //
self.runner.total),
'failed': len(self.runner.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|"
"\033[34m%%%(remaining) 4d\033[0m|"
"\033[32m+%(passed) 4d\033[0m|"
"\033[31m-%(failed) 4d\033[0m]: %(test)s"),
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|%%%(remaining) 4d|"
"+%(passed) 4d|-%(failed) 4d]: %(test)s"),
'stdout': '%s',
'stderr': '%s',
}
super(MonochromeProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
class JUnitTestProgressIndicator(ProgressIndicator):
def __init__(self, progress_indicator, junitout, junittestsuite):
self.progress_indicator = progress_indicator
self.outputter = junit_output.JUnitTestOutput(junittestsuite)
if junitout:
self.outfile = open(junitout, "w")
else:
self.outfile = sys.stdout
def Starting(self):
self.progress_indicator.runner = self.runner
self.progress_indicator.Starting()
def Done(self):
self.progress_indicator.Done()
self.outputter.FinishAndWrite(self.outfile)
if self.outfile != sys.stdout:
self.outfile.close()
def AboutToRun(self, test):
self.progress_indicator.AboutToRun(test)
def HasRun(self, test, has_unexpected_output):
self.progress_indicator.HasRun(test, has_unexpected_output)
fail_text = ""
if has_unexpected_output:
stdout = test.output.stdout.strip()
if len(stdout):
fail_text += "stdout:\n%s\n" % stdout
stderr = test.output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
fail_text += "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
if test.output.HasCrashed():
fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
if test.output.HasTimedOut():
fail_text += "--- TIMEOUT ---"
self.outputter.HasRunTest(
[test.GetLabel()] + self.runner.context.mode_flags + test.flags,
test.duration,
fail_text)
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, progress_indicator, json_test_results, arch, mode):
self.progress_indicator = progress_indicator
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
self.results = []
def Starting(self):
self.progress_indicator.runner = self.runner
self.progress_indicator.Starting()
def Done(self):
self.progress_indicator.Done()
complete_results = []
if os.path.exists(self.json_test_results):
with open(self.json_test_results, "r") as f:
# Buildbot might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
complete_results.append({
"arch": self.arch,
"mode": self.mode,
"results": self.results,
})
with open(self.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
def AboutToRun(self, test):
self.progress_indicator.AboutToRun(test)
def HasRun(self, test, has_unexpected_output):
self.progress_indicator.HasRun(test, has_unexpected_output)
if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well.
return
self.results.append({
"name": test.GetLabel(),
"flags": test.flags,
"command": EscapeCommand(self.runner.GetCommand(test)).replace(
ABS_PATH_PREFIX, ""),
"run": test.run,
"stdout": test.output.stdout,
"stderr": test.output.stderr,
"exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test),
})
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
| agpl-3.0 |
ScienceStacks/SciSheets | mysite/scisheets/core/helpers/block_execution_controller.py | 2 | 7374 | """
This class interacts with the code generated for evaluating a
scisheet to control the execution of blocks of code. A block
of code (hereafter, just block) can be a formulas, prologue, or
epilogue.
"""
from Files.logger import Logger
from mysite import settings
import inspect
import os
import sys
class BlockExecutionController(object):
"""
Assists with:
1. Controlling the execution of a code block, such as
making exceptions precise by identifying the
code block and line number at which an exception occurs.
See: startBlock, endBlock, exceptionForBlock
2. Managing loop iterations.
See: initializeLoop, startIteration, endIteration
"""
def __init__(self, scisheets_api, is_logging=False, debug=False):
"""
:param ApiFormula scisheets_api:
:param bool is_logging: creates a log file
"""
self.debug = debug
self._api = scisheets_api
self._block_linenumber = None # Where exception occurred in block
self._block_name = None
self._block_start_linenumber = None # Start of block in source
self._caller_filename = None
self._exception = None
self._exception_filename = None
if is_logging:
self._logger = Logger(settings.SCISHEETS_LOG,
"controller")
else:
self._logger = None
self._iterations = 0
self._is_first = True
self._table = None
if self._api is not None:
self._table = self._api.getTable()
def _log(self, name, details):
if self._logger is not None:
self._logger.log(name, details=details)
# TODO: Handle different file for caller
def startBlock(self, name):
"""
Called at the start of a block that is being evaluated.
:param str name: User oriented identifier of the code block
"""
if self.debug:
if name == 'V_MAX':
import pdb; pdb.set_trace()
self._block_name = name
context = inspect.getouterframes(inspect.currentframe())[1]
linenumber = context[2]
self._caller_filename = context[1]
self._block_start_linenumber = linenumber + 1
self._exception_filename = None
self._log("start/%s" % self._block_name, "")
def endBlock(self):
"""
Called at the end of a block
"""
self._log("end/%s" % self._block_name, "")
self._block_start_linenumber = None
self._caller_filename = None
self._exception_filename = None
self._block_name = None
def exceptionForBlock(self, exception):
"""
Called when an exception has occurred.
:param Exception exception:
:return str, int: block name, line number in the block
:raises RuntimeError: if not within a block
"""
if self.debug:
import pdb; pdb.set_trace()
if self._block_name is None:
self._block_name = "Unknown"
self._exception = exception
_, _, exc_tb = sys.exc_info()
self._exception_filename = exc_tb.tb_frame.f_code.co_filename
# Check for compile error
if 'lineno' in dir(self._exception):
abs_linenumber = self._exception.lineno
is_runtime_error = False
# Must be runtime error
else:
abs_linenumber = exc_tb.tb_lineno
is_runtime_error = True
# Compute the line number of the exception
if is_runtime_error and \
self._exception_filename == self._caller_filename:
self._block_linenumber = abs_linenumber \
- self._block_start_linenumber + 1
else:
self._block_linenumber = abs_linenumber
self._log("exception/%s" % self._block_name, self.formatError())
def formatError(self,
is_absolute_linenumber=False,
is_use_block_name=True):
"""
Formats the exception to include the block and line number.
:param bool is_absolute_linenumber: Forces message to be
an absolute line number
:param bool is_use_block_name: Use the block name in the message
:return str/None:
"""
if self._exception is None:
return None
if is_use_block_name:
if (not is_absolute_linenumber) \
and self._caller_filename == self._exception_filename:
if not "Computing" in str(self._exception):
msg = "Computing %s near line %d: %s" % (self._block_name,
self._block_linenumber, str(self._exception))
else:
msg = str(self._exception)
else:
msg = "In %s near line %d: %s" % (self._exception_filename,
self._block_linenumber, str(self._exception))
else:
msg = "near line %d: %s" % (self._block_linenumber, str(self._exception))
return msg
def initializeLoop(self):
"""
Initializes variables before loop begins
"""
self._iterations = 0
self._log("initializeLoop", "")
def startAnIteration(self):
"""
Beginning of a loop iteration
"""
self._iterations += 1
self._exception = None
for cv in self._api.getColumnVariables():
try:
cv.setIterationStartValue()
except Exception as err:
import pdb; pdb.set_trace()
pass
self._log("startAnIteration", "iterations=%d" % self._iterations)
def endAnIteration(self):
"""
End of a loop iteration
"""
self._log("endAnIteration", "iterations=%d" % self._iterations)
def endProgram(self, details=""):
"""
End of a loop iteration
"""
self._log("endProgram", details)
def _isEquivalentValues(self):
"""
Checks if not namespace variable has changed since the start of the iteration.
:return bool, cv/None: True if no change; cv of first ColumnVariable that failed
"""
for cv in self._api.getColumnVariables():
if not cv.isNamespaceValueEquivalentToIterationStartValue():
return False, cv
return True, None
def isTerminateLoop(self):
"""
Determines if the loop should terminate
:return bool: terminate loop if True
"""
num_formula_columns = len(self._table.getFormulaColumns())
outcome = ""
done = None
is_first = self._is_first
if is_first:
self._is_first = False
done = False
is_not_evaluate = None
is_not_except= None
is_equiv = None
is_large = None
cv_bad = None
else:
is_not_evaluate = not self._table.getIsEvaluateFormulas()
is_not_except= self._exception is None
is_equiv, cv_bad = self._isEquivalentValues()
is_large = self._iterations >= num_formula_columns
if is_not_evaluate:
outcome = "True - not isEvaluateFormulas"
done = True
elif is_not_except and is_equiv:
outcome = "True - not exception & equivalent values"
done = True
elif is_large:
outcome = "True - iterations >= num_formula_columns"
done = True
else:
outcome = "False"
done = False
details = "%s: not_evaluate: %s; not_except: %s;" \
% (outcome, is_not_evaluate, is_not_except)
cv_msg = str(is_equiv)
if cv_bad is not None:
cv_msg = "%s,col=%s" % (is_equiv, cv_bad.getColumn().getName())
details = "%s equiv: %s; first: %s; large: %s." \
% (details, cv_msg, is_first, is_large)
self._log("isTerminateLoop", details)
return done
def getException(self):
return self._exception
def getExceptionLineNumber(self):
return self._block_linenumber
def setTable(self, table):
self._table = table
| apache-2.0 |
yukim/cassandra-dtest | udtencoding_test.py | 2 | 2105 | from dtest import Tester
from assertions import assert_invalid
from tools import since
import os, sys, time
from ccmlib.cluster import Cluster
@since('2.1')
class TestUDTEncoding(Tester):
def udt_test(self):
""" Test (somewhat indirectly) that user queries involving UDT's are properly encoded (due to driver not recognizing UDT syntax) """
cluster = self.cluster
cluster.populate(3).start()
[node1, node2, node3] = cluster.nodelist()
time.sleep(.5)
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 3)
# create udt and insert correctly (should be successful)
cursor.execute('CREATE TYPE address (city text,zip int);')
cursor.execute('CREATE TABLE user_profiles (login text PRIMARY KEY, addresses map<text, frozen<address>>);')
cursor.execute("INSERT INTO user_profiles(login, addresses) VALUES ('tsmith', { 'home': {city: 'San Fransisco',zip: 94110 }});")
#note here address looks likes a map -> which is what the driver thinks it is. udt is encoded server side, we test that if addresses is changed slightly whether encoder recognizes the errors
# try adding a field - see if will be encoded to a udt (should return error)
assert_invalid(cursor, "INSERT INTO user_profiles(login, addresses) VALUES ('jsmith', { 'home': {street: 'El Camino Real', city: 'San Fransisco', zip: 94110 }});", "Unknown field 'street' in value of user defined type address")
# try modifying a field name - see if will be encoded to a udt (should return error)
assert_invalid(cursor, "INSERT INTO user_profiles(login, addresses) VALUES ('fsmith', { 'home': {cityname: 'San Fransisco', zip: 94110 }});", "Unknown field 'cityname' in value of user defined type address")
# try modifying a type within the collection - see if will be encoded to a udt (should return error)
assert_invalid(cursor, "INSERT INTO user_profiles(login, addresses) VALUES ('fsmith', { 'home': {city: 'San Fransisco', zip: '94110' }});", "Invalid map literal for addresses")
| apache-2.0 |
lyarwood/bugwarrior | tests/test_config.py | 2 | 3581 | # coding: utf-8
from __future__ import unicode_literals
import os
import configparser
from unittest import TestCase
import bugwarrior.config as config
from .base import ConfigTest
class TestGetConfigPath(ConfigTest):
def create(self, path):
"""
Create an empty file in the temporary directory, return the full path.
"""
fpath = os.path.join(self.tempdir, path)
if not os.path.exists(os.path.dirname(fpath)):
os.makedirs(os.path.dirname(fpath))
open(fpath, 'a').close()
return fpath
def test_default(self):
"""
If it exists, use the file at $XDG_CONFIG_HOME/bugwarrior/bugwarriorrc
"""
rc = self.create('.config/bugwarrior/bugwarriorrc')
self.assertEquals(config.get_config_path(), rc)
def test_legacy(self):
"""
Falls back on .bugwarriorrc if it exists
"""
rc = self.create('.bugwarriorrc')
self.assertEquals(config.get_config_path(), rc)
def test_xdg_first(self):
"""
If both files above exist, the one in $XDG_CONFIG_HOME takes precedence
"""
self.create('.bugwarriorrc')
rc = self.create('.config/bugwarrior/bugwarriorrc')
self.assertEquals(config.get_config_path(), rc)
def test_no_file(self):
"""
If no bugwarriorrc exist anywhere, the path to the prefered one is
returned.
"""
self.assertEquals(
config.get_config_path(),
os.path.join(self.tempdir, '.config/bugwarrior/bugwarriorrc'))
def test_BUGWARRIORRC(self):
"""
If $BUGWARRIORRC is set, it takes precedence over everything else (even
if the file doesn't exist).
"""
rc = os.path.join(self.tempdir, 'my-bugwarriorc')
os.environ['BUGWARRIORRC'] = rc
self.create('.bugwarriorrc')
self.create('.config/bugwarrior/bugwarriorrc')
self.assertEquals(config.get_config_path(), rc)
def test_BUGWARRIORRC_empty(self):
"""
If $BUGWARRIORRC is set but emty, it is not used and the default file
is used instead.
"""
os.environ['BUGWARRIORRC'] = ''
rc = self.create('.config/bugwarrior/bugwarriorrc')
self.assertEquals(config.get_config_path(), rc)
class TestGetDataPath(ConfigTest):
def setUp(self):
super(TestGetDataPath, self).setUp()
self.config = configparser.RawConfigParser()
self.config.add_section('general')
def assertDataPath(self, expected_datapath):
self.assertEqual(
expected_datapath, config.get_data_path(self.config, 'general'))
def test_TASKDATA(self):
"""
TASKDATA should be respected, even when taskrc's data.location is set.
"""
datapath = os.environ['TASKDATA'] = os.path.join(self.tempdir, 'data')
self.assertDataPath(datapath)
def test_taskrc_datalocation(self):
"""
When TASKDATA is not set, data.location in taskrc should be respected.
"""
os.environ['TASKDATA'] = ''
self.assertDataPath(self.lists_path)
def test_unassigned(self):
"""
When data path is not assigned, use default location.
"""
# Empty taskrc.
with open(self.taskrc, 'w'):
pass
os.environ['TASKDATA'] = ''
self.assertDataPath(os.path.expanduser('~/.task'))
class TestOracleEval(TestCase):
def test_echo(self):
self.assertEqual(config.oracle_eval("echo fööbår"), "fööbår")
| gpl-3.0 |
TangXT/GreatCatMOOC | lms/djangoapps/courseware/tests/test_model_data.py | 3 | 14099 | """
Test for lms courseware app, module data (runtime data storage for XBlocks)
"""
import json
from mock import Mock, patch
from functools import partial
from courseware.model_data import DjangoKeyValueStore
from courseware.model_data import InvalidScopeError, FieldDataCache
from courseware.models import StudentModule, XModuleUserStateSummaryField
from courseware.models import XModuleStudentInfoField, XModuleStudentPrefsField
from student.tests.factories import UserFactory
from courseware.tests.factories import StudentModuleFactory as cmfStudentModuleFactory
from courseware.tests.factories import UserStateSummaryFactory
from courseware.tests.factories import StudentPrefsFactory, StudentInfoFactory
from xblock.fields import Scope, BlockScope
from xmodule.modulestore import Location
from django.test import TestCase
from django.db import DatabaseError
from xblock.core import KeyValueMultiSaveError
def mock_field(scope, name):
field = Mock()
field.scope = scope
field.name = name
return field
def mock_descriptor(fields=[]):
descriptor = Mock()
descriptor.location = location('def_id')
descriptor.module_class.fields.values.return_value = fields
descriptor.fields.values.return_value = fields
descriptor.module_class.__name__ = 'MockProblemModule'
return descriptor
location = partial(Location, 'i4x', 'edX', 'test_course', 'problem')
course_id = 'edX/test_course/test'
# The user ids here are 1 because we make a student in the setUp functions, and
# they get an id of 1. There's an assertion in setUp to ensure that assumption
# is still true.
user_state_summary_key = partial(DjangoKeyValueStore.Key, Scope.user_state_summary, None, location('def_id'))
settings_key = partial(DjangoKeyValueStore.Key, Scope.settings, None, location('def_id'))
user_state_key = partial(DjangoKeyValueStore.Key, Scope.user_state, 1, location('def_id'))
prefs_key = partial(DjangoKeyValueStore.Key, Scope.preferences, 1, 'MockProblemModule')
user_info_key = partial(DjangoKeyValueStore.Key, Scope.user_info, 1, None)
class StudentModuleFactory(cmfStudentModuleFactory):
module_state_key = location('def_id').url()
course_id = course_id
class TestInvalidScopes(TestCase):
def setUp(self):
self.user = UserFactory.create(username='user')
self.field_data_cache = FieldDataCache([mock_descriptor([mock_field(Scope.user_state, 'a_field')])], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_invalid_scopes(self):
for scope in (Scope(user=True, block=BlockScope.DEFINITION),
Scope(user=False, block=BlockScope.TYPE),
Scope(user=False, block=BlockScope.ALL)):
key = DjangoKeyValueStore.Key(scope, None, None, 'field')
self.assertRaises(InvalidScopeError, self.kvs.get, key)
self.assertRaises(InvalidScopeError, self.kvs.set, key, 'value')
self.assertRaises(InvalidScopeError, self.kvs.delete, key)
self.assertRaises(InvalidScopeError, self.kvs.has, key)
self.assertRaises(InvalidScopeError, self.kvs.set_many, {key: 'value'})
class TestStudentModuleStorage(TestCase):
def setUp(self):
student_module = StudentModuleFactory(state=json.dumps({'a_field': 'a_value', 'b_field': 'b_value'}))
self.user = student_module.student
self.assertEqual(self.user.id, 1) # check our assumption hard-coded in the key functions above.
self.field_data_cache = FieldDataCache([mock_descriptor([mock_field(Scope.user_state, 'a_field')])], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_get_existing_field(self):
"Test that getting an existing field in an existing StudentModule works"
self.assertEquals('a_value', self.kvs.get(user_state_key('a_field')))
def test_get_missing_field(self):
"Test that getting a missing field from an existing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.get, user_state_key('not_a_field'))
def test_set_existing_field(self):
"Test that setting an existing user_state field changes the value"
self.kvs.set(user_state_key('a_field'), 'new_value')
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_set_missing_field(self):
"Test that setting a new user_state field changes the value"
self.kvs.set(user_state_key('not_a_field'), 'new_value')
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value', 'not_a_field': 'new_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_delete_existing_field(self):
"Test that deleting an existing field removes it from the StudentModule"
self.kvs.delete(user_state_key('a_field'))
self.assertEquals(1, StudentModule.objects.all().count())
self.assertRaises(KeyError, self.kvs.get, user_state_key('not_a_field'))
def test_delete_missing_field(self):
"Test that deleting a missing field from an existing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.delete, user_state_key('not_a_field'))
self.assertEquals(1, StudentModule.objects.all().count())
self.assertEquals({'b_field': 'b_value', 'a_field': 'a_value'}, json.loads(StudentModule.objects.all()[0].state))
def test_has_existing_field(self):
"Test that `has` returns True for existing fields in StudentModules"
self.assertTrue(self.kvs.has(user_state_key('a_field')))
def test_has_missing_field(self):
"Test that `has` returns False for missing fields in StudentModule"
self.assertFalse(self.kvs.has(user_state_key('not_a_field')))
def construct_kv_dict(self):
"""Construct a kv_dict that can be passed to set_many"""
key1 = user_state_key('field_a')
key2 = user_state_key('field_b')
new_value = 'new value'
newer_value = 'newer value'
return {key1: new_value, key2: newer_value}
def test_set_many(self):
"Test setting many fields that are scoped to Scope.user_state"
kv_dict = self.construct_kv_dict()
self.kvs.set_many(kv_dict)
for key in kv_dict:
self.assertEquals(self.kvs.get(key), kv_dict[key])
def test_set_many_failure(self):
"Test failures when setting many fields that are scoped to Scope.user_state"
kv_dict = self.construct_kv_dict()
# because we're patching the underlying save, we need to ensure the
# fields are in the cache
for key in kv_dict:
self.kvs.set(key, 'test_value')
with patch('django.db.models.Model.save', side_effect=DatabaseError):
with self.assertRaises(KeyValueMultiSaveError) as exception_context:
self.kvs.set_many(kv_dict)
self.assertEquals(len(exception_context.exception.saved_field_names), 0)
class TestMissingStudentModule(TestCase):
def setUp(self):
self.user = UserFactory.create(username='user')
self.assertEqual(self.user.id, 1) # check our assumption hard-coded in the key functions above.
self.field_data_cache = FieldDataCache([mock_descriptor()], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_get_field_from_missing_student_module(self):
"Test that getting a field from a missing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.get, user_state_key('a_field'))
def test_set_field_in_missing_student_module(self):
"Test that setting a field in a missing StudentModule creates the student module"
self.assertEquals(0, len(self.field_data_cache.cache))
self.assertEquals(0, StudentModule.objects.all().count())
self.kvs.set(user_state_key('a_field'), 'a_value')
self.assertEquals(1, len(self.field_data_cache.cache))
self.assertEquals(1, StudentModule.objects.all().count())
student_module = StudentModule.objects.all()[0]
self.assertEquals({'a_field': 'a_value'}, json.loads(student_module.state))
self.assertEquals(self.user, student_module.student)
self.assertEquals(location('def_id').url(), student_module.module_state_key)
self.assertEquals(course_id, student_module.course_id)
def test_delete_field_from_missing_student_module(self):
"Test that deleting a field from a missing StudentModule raises a KeyError"
self.assertRaises(KeyError, self.kvs.delete, user_state_key('a_field'))
def test_has_field_for_missing_student_module(self):
"Test that `has` returns False for missing StudentModules"
self.assertFalse(self.kvs.has(user_state_key('a_field')))
class StorageTestBase(object):
"""
A base class for that gets subclassed when testing each of the scopes.
"""
# Disable pylint warnings that arise because of the way the child classes call
# this base class -- pylint's static analysis can't keep up with it.
# pylint: disable=E1101, E1102
factory = None
scope = None
key_factory = None
storage_class = None
def setUp(self):
field_storage = self.factory.create()
if hasattr(field_storage, 'student'):
self.user = field_storage.student
else:
self.user = UserFactory.create()
self.mock_descriptor = mock_descriptor([
mock_field(self.scope, 'existing_field'),
mock_field(self.scope, 'other_existing_field')])
self.field_data_cache = FieldDataCache([self.mock_descriptor], course_id, self.user)
self.kvs = DjangoKeyValueStore(self.field_data_cache)
def test_set_and_get_existing_field(self):
self.kvs.set(self.key_factory('existing_field'), 'test_value')
self.assertEquals('test_value', self.kvs.get(self.key_factory('existing_field')))
def test_get_existing_field(self):
"Test that getting an existing field in an existing Storage Field works"
self.assertEquals('old_value', self.kvs.get(self.key_factory('existing_field')))
def test_get_missing_field(self):
"Test that getting a missing field from an existing Storage Field raises a KeyError"
self.assertRaises(KeyError, self.kvs.get, self.key_factory('missing_field'))
def test_set_existing_field(self):
"Test that setting an existing field changes the value"
self.kvs.set(self.key_factory('existing_field'), 'new_value')
self.assertEquals(1, self.storage_class.objects.all().count())
self.assertEquals('new_value', json.loads(self.storage_class.objects.all()[0].value))
def test_set_missing_field(self):
"Test that setting a new field changes the value"
self.kvs.set(self.key_factory('missing_field'), 'new_value')
self.assertEquals(2, self.storage_class.objects.all().count())
self.assertEquals('old_value', json.loads(self.storage_class.objects.get(field_name='existing_field').value))
self.assertEquals('new_value', json.loads(self.storage_class.objects.get(field_name='missing_field').value))
def test_delete_existing_field(self):
"Test that deleting an existing field removes it"
self.kvs.delete(self.key_factory('existing_field'))
self.assertEquals(0, self.storage_class.objects.all().count())
def test_delete_missing_field(self):
"Test that deleting a missing field from an existing Storage Field raises a KeyError"
self.assertRaises(KeyError, self.kvs.delete, self.key_factory('missing_field'))
self.assertEquals(1, self.storage_class.objects.all().count())
def test_has_existing_field(self):
"Test that `has` returns True for an existing Storage Field"
self.assertTrue(self.kvs.has(self.key_factory('existing_field')))
def test_has_missing_field(self):
"Test that `has` return False for an existing Storage Field"
self.assertFalse(self.kvs.has(self.key_factory('missing_field')))
def construct_kv_dict(self):
"""Construct a kv_dict that can be passed to set_many"""
key1 = self.key_factory('existing_field')
key2 = self.key_factory('other_existing_field')
new_value = 'new value'
newer_value = 'newer value'
return {key1: new_value, key2: newer_value}
def test_set_many(self):
"""Test that setting many regular fields at the same time works"""
kv_dict = self.construct_kv_dict()
self.kvs.set_many(kv_dict)
for key in kv_dict:
self.assertEquals(self.kvs.get(key), kv_dict[key])
def test_set_many_failure(self):
"""Test that setting many regular fields with a DB error """
kv_dict = self.construct_kv_dict()
for key in kv_dict:
self.kvs.set(key, 'test value')
with patch('django.db.models.Model.save', side_effect=[None, DatabaseError]):
with self.assertRaises(KeyValueMultiSaveError) as exception_context:
self.kvs.set_many(kv_dict)
exception = exception_context.exception
self.assertEquals(len(exception.saved_field_names), 1)
self.assertEquals(exception.saved_field_names[0], 'existing_field')
class TestContentStorage(StorageTestBase, TestCase):
factory = UserStateSummaryFactory
scope = Scope.user_state_summary
key_factory = user_state_summary_key
storage_class = XModuleUserStateSummaryField
class TestStudentPrefsStorage(StorageTestBase, TestCase):
factory = StudentPrefsFactory
scope = Scope.preferences
key_factory = prefs_key
storage_class = XModuleStudentPrefsField
class TestStudentInfoStorage(StorageTestBase, TestCase):
factory = StudentInfoFactory
scope = Scope.user_info
key_factory = user_info_key
storage_class = XModuleStudentInfoField
| agpl-3.0 |
zero323/spark | python/pyspark/mllib/random.py | 22 | 19517 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for random data generation.
"""
import sys
from functools import wraps
from pyspark.mllib.common import callMLlibFunc
__all__ = ['RandomRDDs', ]
def toArray(f):
@wraps(f)
def func(sc, *a, **kw):
rdd = f(sc, *a, **kw)
return rdd.map(lambda vec: vec.toArray())
return func
class RandomRDDs(object):
"""
Generator methods for creating RDDs comprised of i.i.d samples from
some distribution.
.. versionadded:: 1.1.0
"""
@staticmethod
def uniformRDD(sc, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the
uniform distribution U(0.0, 1.0).
To transform the distribution in the generated RDD from U(0.0, 1.0)
to U(a, b), use
``RandomRDDs.uniformRDD(sc, n, p, seed).map(lambda v: a + (b - a) * v)``
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
used to create the RDD.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ `U(0.0, 1.0)`.
Examples
--------
>>> x = RandomRDDs.uniformRDD(sc, 100).collect()
>>> len(x)
100
>>> max(x) <= 1.0 and min(x) >= 0.0
True
>>> RandomRDDs.uniformRDD(sc, 100, 4).getNumPartitions()
4
>>> parts = RandomRDDs.uniformRDD(sc, 100, seed=4).getNumPartitions()
>>> parts == sc.defaultParallelism
True
"""
return callMLlibFunc("uniformRDD", sc._jsc, size, numPartitions, seed)
@staticmethod
def normalRDD(sc, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the standard normal
distribution.
To transform the distribution in the generated RDD from standard normal
to some other normal N(mean, sigma^2), use
``RandomRDDs.normal(sc, n, p, seed).map(lambda v: mean + sigma * v)``
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
used to create the RDD.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ N(0.0, 1.0).
Examples
--------
>>> x = RandomRDDs.normalRDD(sc, 1000, seed=1)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - 0.0) < 0.1
True
>>> abs(stats.stdev() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalRDD", sc._jsc, size, numPartitions, seed)
@staticmethod
def logNormalRDD(sc, mean, std, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the log normal
distribution with the input mean and standard distribution.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
used to create the RDD.
mean : float
mean for the log Normal distribution
std : float
std for the log Normal distribution
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
RDD of float comprised of i.i.d. samples ~ log N(mean, std).
Examples
--------
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> x = RandomRDDs.logNormalRDD(sc, mean, std, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - expStd) < 0.5
True
"""
return callMLlibFunc("logNormalRDD", sc._jsc, float(mean), float(std),
size, numPartitions, seed)
@staticmethod
def poissonRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Poisson
distribution with the input mean.
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or lambda, for the Poisson distribution.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ Pois(mean).
Examples
--------
>>> mean = 100.0
>>> x = RandomRDDs.poissonRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("poissonRDD", sc._jsc, float(mean), size, numPartitions, seed)
@staticmethod
def exponentialRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or 1 / lambda, for the Exponential distribution.
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ Exp(mean).
Examples
--------
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("exponentialRDD", sc._jsc, float(mean), size, numPartitions, seed)
@staticmethod
def gammaRDD(sc, shape, scale, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Gamma
distribution with the input shape and scale.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
shape : float
shape (> 0) parameter for the Gamma distribution
scale : float
scale (> 0) parameter for the Gamma distribution
size : int
Size of the RDD.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of float comprised of i.i.d. samples ~ Gamma(shape, scale).
Examples
--------
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> x = RandomRDDs.gammaRDD(sc, shape, scale, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - expMean) < 0.5
True
>>> abs(stats.stdev() - expStd) < 0.5
True
"""
return callMLlibFunc("gammaRDD", sc._jsc, float(shape),
float(scale), size, numPartitions, seed)
@staticmethod
@toArray
def uniformVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the uniform distribution U(0.0, 1.0).
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD.
seed : int, optional
Seed for the RNG that generates the seed for the generator in each partition.
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d samples ~ `U(0.0, 1.0)`.
Examples
--------
>>> import numpy as np
>>> mat = np.matrix(RandomRDDs.uniformVectorRDD(sc, 10, 10).collect())
>>> mat.shape
(10, 10)
>>> mat.max() <= 1.0 and mat.min() >= 0.0
True
>>> RandomRDDs.uniformVectorRDD(sc, 10, 10, 4).getNumPartitions()
4
"""
return callMLlibFunc("uniformVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
@staticmethod
@toArray
def normalVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the standard normal distribution.
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ `N(0.0, 1.0)`.
Examples
--------
>>> import numpy as np
>>> mat = np.matrix(RandomRDDs.normalVectorRDD(sc, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - 0.0) < 0.1
True
>>> abs(mat.std() - 1.0) < 0.1
True
"""
return callMLlibFunc("normalVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
@staticmethod
@toArray
def logNormalVectorRDD(sc, mean, std, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the log normal distribution.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean of the log normal distribution
std : float
Standard Deviation of the log normal distribution
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ log `N(mean, std)`.
Examples
--------
>>> import numpy as np
>>> from math import sqrt, exp
>>> mean = 0.0
>>> std = 1.0
>>> expMean = exp(mean + 0.5 * std * std)
>>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std))
>>> m = RandomRDDs.logNormalVectorRDD(sc, mean, std, 100, 100, seed=1).collect()
>>> mat = np.matrix(m)
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("logNormalVectorRDD", sc._jsc, float(mean), float(std),
numRows, numCols, numPartitions, seed)
@staticmethod
@toArray
def poissonVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Poisson distribution with the input mean.
.. versionadded:: 1.1.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or lambda, for the Poisson distribution.
numRows : float
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`)
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ Pois(mean).
Examples
--------
>>> import numpy as np
>>> mean = 100.0
>>> rdd = RandomRDDs.poissonVectorRDD(sc, mean, 100, 100, seed=1)
>>> mat = np.mat(rdd.collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(mat.std() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("poissonVectorRDD", sc._jsc, float(mean), numRows, numCols,
numPartitions, seed)
@staticmethod
@toArray
def exponentialVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Exponential distribution with the input mean.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
mean : float
Mean, or 1 / lambda, for the Exponential distribution.
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`)
seed : int, optional
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ Exp(mean).
Examples
--------
>>> import numpy as np
>>> mean = 0.5
>>> rdd = RandomRDDs.exponentialVectorRDD(sc, mean, 100, 100, seed=1)
>>> mat = np.mat(rdd.collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(mat.std() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("exponentialVectorRDD", sc._jsc, float(mean), numRows, numCols,
numPartitions, seed)
@staticmethod
@toArray
def gammaVectorRDD(sc, shape, scale, numRows, numCols, numPartitions=None, seed=None):
"""
Generates an RDD comprised of vectors containing i.i.d. samples drawn
from the Gamma distribution.
.. versionadded:: 1.3.0
Parameters
----------
sc : :py:class:`pyspark.SparkContext`
SparkContext used to create the RDD.
shape : float
Shape (> 0) of the Gamma distribution
scale : float
Scale (> 0) of the Gamma distribution
numRows : int
Number of Vectors in the RDD.
numCols : int
Number of elements in each Vector.
numPartitions : int, optional
Number of partitions in the RDD (default: `sc.defaultParallelism`).
seed : int, optional,
Random seed (default: a random long integer).
Returns
-------
:py:class:`pyspark.RDD`
RDD of Vector with vectors containing i.i.d. samples ~ Gamma(shape, scale).
Examples
--------
>>> import numpy as np
>>> from math import sqrt
>>> shape = 1.0
>>> scale = 2.0
>>> expMean = shape * scale
>>> expStd = sqrt(shape * scale * scale)
>>> mat = np.matrix(RandomRDDs.gammaVectorRDD(sc, shape, scale, 100, 100, seed=1).collect())
>>> mat.shape
(100, 100)
>>> abs(mat.mean() - expMean) < 0.1
True
>>> abs(mat.std() - expStd) < 0.1
True
"""
return callMLlibFunc("gammaVectorRDD", sc._jsc, float(shape), float(scale),
numRows, numCols, numPartitions, seed)
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.random tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
toolforger/sympy | sympy/plotting/pygletplot/tests/test_plotting.py | 109 | 2653 | from sympy.external.importtools import import_module
disabled = False
# if pyglet.gl fails to import, e.g. opengl is missing, we disable the tests
pyglet_gl = import_module("pyglet.gl", catch=(OSError,))
pyglet_window = import_module("pyglet.window", catch=(OSError,))
if not pyglet_gl or not pyglet_window:
disabled = True
from sympy import symbols, sin, cos
x, y, z = symbols('x, y, z')
def test_import():
from sympy.plotting.pygletplot import PygletPlot
def test_plot_2d():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(x, [x, -5, 5, 4], visible=False)
p.wait_for_calculations()
def test_plot_2d_discontinuous():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(1/x, [x, -1, 1, 2], visible=False)
p.wait_for_calculations()
def test_plot_3d():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(x*y, [x, -5, 5, 5], [y, -5, 5, 5], visible=False)
p.wait_for_calculations()
def test_plot_3d_discontinuous():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(1/x, [x, -3, 3, 6], [y, -1, 1, 1], visible=False)
p.wait_for_calculations()
def test_plot_2d_polar():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(1/x, [x, -1, 1, 4], 'mode=polar', visible=False)
p.wait_for_calculations()
def test_plot_3d_cylinder():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(
1/y, [x, 0, 6.282, 4], [y, -1, 1, 4], 'mode=polar;style=solid',
visible=False)
p.wait_for_calculations()
def test_plot_3d_spherical():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(
1, [x, 0, 6.282, 4], [y, 0, 3.141,
4], 'mode=spherical;style=wireframe',
visible=False)
p.wait_for_calculations()
def test_plot_2d_parametric():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(sin(x), cos(x), [x, 0, 6.282, 4], visible=False)
p.wait_for_calculations()
def test_plot_3d_parametric():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(sin(x), cos(x), x/5.0, [x, 0, 6.282, 4], visible=False)
p.wait_for_calculations()
def _test_plot_log():
from sympy.plotting.pygletplot import PygletPlot
p = PygletPlot(log(x), [x, 0, 6.282, 4], 'mode=polar', visible=False)
p.wait_for_calculations()
def test_plot_integral():
# Make sure it doesn't treat x as an independent variable
from sympy.plotting.pygletplot import PygletPlot
from sympy import Integral
p = PygletPlot(Integral(z*x, (x, 1, z), (z, 1, y)), visible=False)
p.wait_for_calculations()
| bsd-3-clause |
dvliman/jaikuengine | .google_appengine/lib/django-1.5/tests/regressiontests/syndication/tests.py | 47 | 13086 | from __future__ import absolute_import, unicode_literals
from xml.dom import minidom
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from .models import Entry
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
urls = 'regressiontests.syndication.urls'
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).date
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': '[email protected] (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'category', 'updated', 'rights', 'author'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'ministry', 'rights', 'author', 'updated', 'category'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, 'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
self.assertEqual(updated[-6:], '+00:42')
def test_feed_last_modified_time(self):
response = self.client.get('/syndication/naive-dates/')
self.assertEqual(response['Last-Modified'], 'Thu, 03 Jan 2008 19:30:00 GMT')
# No last-modified when feed has no item_pubdate
response = self.client.get('/syndication/no_pubdate/')
self.assertFalse(response.has_header('Last-Modified'))
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:[email protected]'),
'mailto:[email protected]'
)
self.assertEqual(
views.add_domain('example.com', '//example.com/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
| apache-2.0 |
AntonKhorev/BudgetSpb | main.py | 1 | 1072 | #!/usr/bin/env python3
from linker import Linker
import htmlPage
import content.index,content.db,content.fincom
# TODO put into config
spbBudgetXlsPath='../spb-budget-xls'
if __name__=='__main__':
linker=Linker('filelists',{
'csv':['csv'],
'xls':['xls'],
'db':['zip','sql','xlsx'],
})
htmlPage.HtmlPage('index.html','Данные бюджета Санкт-Петербурга',content.index.content,linker).write('output/index.html')
htmlPage.HtmlPage('xls.html','Ведомственная структура расходов бюджета Санкт-Петербурга в csv и xls',htmlPage.importContent(spbBudgetXlsPath+'/index.html'),linker).write('output/xls.html')
htmlPage.HtmlPage('db.html','БД и таблицы расходов бюджета Санкт-Петербурга из разных источников',content.db.content,linker).write('output/db.html')
htmlPage.HtmlPage('fincom.html','Что можно найти на сайте Комитета финансов',content.fincom.content,linker).write('output/fincom.html')
| bsd-2-clause |
postla/e2-gui | lib/python/Plugins/SystemPlugins/WirelessAccessPoint/plugin.py | 6 | 26631 | from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen, ConfigList
from Components.config import config, ConfigSubsection, getConfigListEntry, ConfigSelection, ConfigIP, ConfigInteger
from Components.config import ConfigText, ConfigYesNo, NoSave, ConfigPassword, ConfigNothing, ConfigSequence
from Components.ActionMap import ActionMap
from Screens.MessageBox import MessageBox
from Components.Sources.StaticText import StaticText
from Plugins.Plugin import PluginDescriptor
from Tools.Directories import fileExists
from math import pow as math_pow
from Components.Network import iNetwork
from Components.PluginComponent import plugins
from Components.Console import Console
from os import path as os_path, system as os_system, listdir
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from enigma import eTimer
import wirelessap
debug_msg_on = False
def printDebugMsg(msg):
global debug_msg_on
if debug_msg_on:
print "[Wireless Access Point] ", msg
class fixedValue:
def __init__(self, value = ""):
self.value = value
apModeConfig = ConfigSubsection()
apModeConfig.useap = ConfigYesNo(default = False)
apModeConfig.setupmode = ConfigSelection(default = "simple", choices = [ ("simple", "Simple"), ("advanced", "Advanced") ] )
#apModeConfig.wirelessdevice = fixedValue(value = "")
apModeConfig.branch = fixedValue(value = "br0")
apModeConfig.driver = fixedValue(value = "nl80211")
apModeConfig.wirelessmode = ConfigSelection(default = "g", choices = [ ("b", "802.11b"), ("a", "802.11a"), ("g", "802.11g") ] )
apModeConfig.channel = ConfigInteger(default = 1, limits = (1,13) )
apModeConfig.ssid = ConfigText(default = "Input SSID", visible_width = 50, fixed_size = False)
apModeConfig.beacon = ConfigInteger(default = 100, limits = (15,65535))
apModeConfig.rts_threshold = ConfigInteger(default = 2347, limits = (0,2347) )
apModeConfig.fragm_threshold = ConfigInteger(default = 2346, limits = (256,2346) )
apModeConfig.preamble = ConfigSelection(default = "0", choices = [ ("0", "Long"), ("1", "Short") ] )
apModeConfig.ignore_broadcast_ssid = ConfigSelection(default = "0", choices = [ ("0", _("disabled")), ("1", _("enabled")) ])
apModeConfig.encrypt = ConfigYesNo(default = False)
apModeConfig.method = ConfigSelection(default = "0", choices = [
("0", _("WEP")), ("1", _("WPA")), ("2", _("WPA2")),("3", _("WPA/WPA2"))])
apModeConfig.wep = ConfigYesNo(default = False)
#apModeConfig.wep_default_key = ConfigSelection(default = "0", choices = [ ("0", "0"), ("1", "1"), ("2", "2"), ("3", "3") ] )
apModeConfig.wep_default_key = fixedValue(value = "0")
apModeConfig.wepType = ConfigSelection(default = "64", choices = [
("64", _("Enable 64 bit (Input 10 hex keys)")), ("128", _("Enable 128 bit (Input 26 hex keys)"))])
apModeConfig.wep_key0 = ConfigPassword(default = "", visible_width = 50, fixed_size = False)
apModeConfig.wpa = ConfigSelection(default = "0", choices = [
("0", _("not set")), ("1", _("WPA")), ("2", _("WPA2")),("3", _("WPA/WPA2"))])
apModeConfig.wpa_passphrase = ConfigPassword(default = "", visible_width = 50, fixed_size = False)
apModeConfig.wpagrouprekey = ConfigInteger(default = 600, limits = (0,3600))
apModeConfig.wpa_key_mgmt = fixedValue(value = "WPA-PSK")
apModeConfig.wpa_pairwise = fixedValue(value = "TKIP CCMP")
apModeConfig.rsn_pairwise = fixedValue(value = "CCMP")
apModeConfig.usedhcp = ConfigYesNo(default=True)
apModeConfig.address = ConfigIP(default = [0,0,0,0])
apModeConfig.netmask = ConfigIP(default = [255,0,0,0])
apModeConfig.gateway = ConfigIP(default = [0,0,0,0])
class WirelessAccessPoint(Screen,ConfigListScreen):
skin = """
<screen position="center,center" size="590,450" title="Wireless Access Point" >
<ePixmap pixmap="skin_default/buttons/red.png" position="20,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="160,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="300,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="440,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="20,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="160,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="300,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="440,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" foregroundColor="#ffffff" backgroundColor="#18188b" transparent="1" />
<widget name="config" zPosition="2" position="10,70" size="580,270" scrollbarMode="showOnDemand" transparent="1" />
<widget source="current_settings" render="Label" position="10,340" size="570,20" font="Regular;19" halign="center" valign="center" transparent="1" />
<widget source="IPAddress_text" render="Label" position="130,370" size="190,21" font="Regular;19" transparent="1" />
<widget source="Netmask_text" render="Label" position="130,395" size="190,21" font="Regular;19" transparent="1" />
<widget source="Gateway_text" render="Label" position="130,420" size="190,21" font="Regular;19" transparent="1" />
<widget source="IPAddress" render="Label" position="340,370" size="240,21" font="Regular;19" transparent="1" />
<widget source="Netmask" render="Label" position="340,395" size="240,21" font="Regular;19" transparent="1" />
<widget source="Gateway" render="Label" position="340,420" size="240,21" font="Regular;19" transparent="1" />
</screen>"""
def __init__(self,session):
Screen.__init__(self,session)
self.session = session
self["shortcuts"] = ActionMap(["ShortcutActions", "SetupActions" ],
{
"ok": self.doConfigMsg,
"cancel": self.keyCancel,
"red": self.keyCancel,
"green": self.doConfigMsg,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list,session = self.session)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Ok"))
self["key_yellow"] = StaticText(_(" "))
self["key_blue"] = StaticText(_(" "))
self["current_settings"] = StaticText(_("Current settings (interface : br0)"))
self["IPAddress_text"] = StaticText(_("IP Address"))
self["Netmask_text"] = StaticText(_("Netmask"))
self["Gateway_text"] = StaticText(_("Gateway"))
self["IPAddress"] = StaticText(_("N/A"))
self["Netmask"] = StaticText(_("N/A"))
self["Gateway"] = StaticText(_("N/A"))
self.wirelessAP = wirelessap.wirelessAP()
self.checkRunHostapd()
self.checkWirelessDevices()
self.makeConfigList()
self.loadInterfacesConfig()
self.loadHostapConfig()
self.setupCurrentEncryption()
self.createConfigEntry()
self.createConfig()
self.onClose.append(self.__onClose)
self.onLayoutFinish.append(self.checkwlanDeviceList)
self.onLayoutFinish.append(self.currentNetworkSettings)
self.checkwlanDeviceListTimer = eTimer()
self.checkwlanDeviceListTimer.callback.append(self.WirelessDeviceNotDetectedMsg)
def checkwlanDeviceList(self):
if len(self.wlanDeviceList) == 0:
self.checkwlanDeviceListTimer.start(100,True)
def WirelessDeviceNotDetectedMsg(self):
self.session.openWithCallback(self.close ,MessageBox, _("Wireless Lan Device is not detected."), MessageBox.TYPE_ERROR)
def currentNetworkSettings(self):
self["IPAddress"].setText(self.formatAddr(iNetwork.getAdapterAttribute("br0", "ip")))
self["Netmask"].setText(self.formatAddr(iNetwork.getAdapterAttribute("br0", "netmask")))
self["Gateway"].setText(self.formatAddr(iNetwork.getAdapterAttribute("br0", "gateway")))
def formatAddr(self, address = [0,0,0,0]):
if address is None:
return "N/A"
return "%d:%d:%d:%d"%(address[0],address[1],address[2],address[3])
def checkRunHostapd(self):
global apModeConfig
if fileExists("/var/run/hostapd", 0):
apModeConfig.useap.value = True
def makeConfigList(self):
global apModeConfig
self.hostapdConfigList = {}
self.hostapdConfigList["interface"] = apModeConfig.wirelessdevice
self.hostapdConfigList["bridge"] = apModeConfig.branch # "br0"
self.hostapdConfigList["driver"] = apModeConfig.driver # "nl80211"
self.hostapdConfigList["hw_mode"] = apModeConfig.wirelessmode
self.hostapdConfigList["channel"] = apModeConfig.channel
self.hostapdConfigList["ssid"] = apModeConfig.ssid
self.hostapdConfigList["beacon_int"] = apModeConfig.beacon
self.hostapdConfigList["rts_threshold"] = apModeConfig.rts_threshold
self.hostapdConfigList["fragm_threshold"] = apModeConfig.fragm_threshold
self.hostapdConfigList["preamble"] = apModeConfig.preamble
# self.hostapdConfigList["macaddr_acl"] = "" # fix to add Access Control List Editer
# self.hostapdConfigList["accept_mac_file"] = "" # fix to add Access Control List Editer
# self.hostapdConfigList["deny_mac_file"] = "" # fix to add Access Control List Editer
self.hostapdConfigList["ignore_broadcast_ssid"] = apModeConfig.ignore_broadcast_ssid
# self.hostapdConfigList["wmm_enabled"] = ""
# self.hostapdConfigList["ieee80211n"] = ""
# self.hostapdConfigList["ht_capab"] = ""
self.hostapdConfigList["wep_default_key"] = apModeConfig.wep_default_key
self.hostapdConfigList["wep_key0"] = apModeConfig.wep_key0
self.hostapdConfigList["wpa"] = apModeConfig.wpa
self.hostapdConfigList["wpa_passphrase"] = apModeConfig.wpa_passphrase
self.hostapdConfigList["wpa_key_mgmt"] = apModeConfig.wpa_key_mgmt # "WPA-PSK"
self.hostapdConfigList["wpa_pairwise"] = apModeConfig.wpa_pairwise # "TKIP CCMP"
self.hostapdConfigList["rsn_pairwise"] = apModeConfig.rsn_pairwise # "CCMP"
self.hostapdConfigList["wpa_group_rekey"] = apModeConfig.wpagrouprekey
def loadInterfacesConfig(self):
global apModeConfig
try:
fp = file('/etc/network/interfaces', 'r')
datas = fp.readlines()
fp.close()
except:
printDebugMsg("interfaces - file open failed")
# check br0 configuration
current_iface = ""
ifaceConf = {}
try:
for line in datas:
split = line.strip().split(' ')
if (split[0] == "iface"):
current_iface = split[1]
if (current_iface == "br0") and (len(split) == 4 and split[3] == "dhcp"):
apModeConfig.usedhcp.value = True
else:
apModeConfig.usedhcp.value = False
if (current_iface == "br0" or current_iface == "eth0"):
if (split[0] == "address"):
apModeConfig.address.value = map(int, split[1].split('.'))
if (split[0] == "netmask"):
apModeConfig.netmask.value = map(int, split[1].split('.'))
if (split[0] == "gateway"):
apModeConfig.gateway.value = map(int, split[1].split('.'))
except:
printDebugMsg("configuration parsing error! - /etc/network/interfaces")
def loadHostapConfig(self):
hostapdConf = { }
ret = self.wirelessAP.loadHostapConfig(hostapdConf)
if ret != 0:
printDebugMsg("configuration opening failed!!")
return
for (key,value) in hostapdConf.items():
if key == "config.wep":
apModeConfig.wep.value = int(value)
elif key in ["channel", "beacon_int", "rts_threshold", "fragm_threshold", "wpa_group_rekey"]:
self.hostapdConfigList[key].value = int(value)
elif key in self.hostapdConfigList.keys():
self.hostapdConfigList[key].value = value
if key == "channel" and int(value) not in range(14):
self.hostapdConfigList[key].value = 1
# for key in self.hostapdConfigList.keys():
# printDebugMsg("[cofigList] key : %s, value : %s"%(key, str(self.hostapdConfigList[key].value)) )
def setupCurrentEncryption(self):
if apModeConfig.wpa.value is not "0" and apModeConfig.wpa_passphrase.value: # (1,WPA), (2,WPA2), (3,WPA/WPA2)
apModeConfig.encrypt.value = True
apModeConfig.method.value = apModeConfig.wpa.value
elif apModeConfig.wep.value and apModeConfig.wep_key0.value:
apModeConfig.encrypt.value = True
apModeConfig.method.value = "0"
if len(apModeConfig.wep_key0.value) > 10:
apModeConfig.wepType.value = "128"
else:
apModeConfig.encrypt.value = False
def createConfigEntry(self):
global apModeConfig
#hostap settings
self.useApEntry = getConfigListEntry(_("Use AP Mode"), apModeConfig.useap)
self.setupModeEntry = getConfigListEntry(_("Setup Mode"), apModeConfig.setupmode)
self.wirelessDeviceEntry = getConfigListEntry(_("AP Device"), apModeConfig.wirelessdevice)
self.wirelessModeEntry = getConfigListEntry(_("AP Mode"), apModeConfig.wirelessmode)
self.channelEntry = getConfigListEntry(_("Channel (1~13)"), apModeConfig.channel)
self.ssidEntry = getConfigListEntry(_("SSID (1~32 Characters)"), apModeConfig.ssid)
self.beaconEntry = getConfigListEntry(_("Beacon (15~65535)"), apModeConfig.beacon)
self.rtsThresholdEntry = getConfigListEntry(_("RTS Threshold (0~2347)"), apModeConfig.rts_threshold)
self.fragmThresholdEntry = getConfigListEntry(_("FRAGM Threshold (256~2346)"), apModeConfig.fragm_threshold)
self.prambleEntry = getConfigListEntry(_("Preamble"), apModeConfig.preamble)
self.ignoreBroadcastSsid = getConfigListEntry(_("Ignore Broadcast SSID"), apModeConfig.ignore_broadcast_ssid)
# hostap encryption
self.encryptEntry = getConfigListEntry(_("Encrypt"), apModeConfig.encrypt)
self.methodEntry = getConfigListEntry(_("Method"), apModeConfig.method)
self.wepKeyTypeEntry = getConfigListEntry(_("KeyType"), apModeConfig.wepType)
self.wepKey0Entry = getConfigListEntry(_("WEP Key (HEX)"), apModeConfig.wep_key0)
self.wpaKeyEntry = getConfigListEntry(_("KEY (8~63 Characters)"), apModeConfig.wpa_passphrase)
self.groupRekeyEntry = getConfigListEntry(_("Group Rekey Interval"), apModeConfig.wpagrouprekey)
# interface settings
self.usedhcpEntry = getConfigListEntry(_("Use DHCP"), apModeConfig.usedhcp)
self.ipEntry = getConfigListEntry(_("IP Address"), apModeConfig.address)
self.netmaskEntry = getConfigListEntry(_("NetMask"), apModeConfig.netmask)
self.gatewayEntry = getConfigListEntry(_("Gateway"), apModeConfig.gateway)
def createConfig(self):
global apModeConfig
self.configList = []
self.configList.append( self.useApEntry )
if apModeConfig.useap.value is True:
self.configList.append( self.setupModeEntry )
self.configList.append( self.wirelessDeviceEntry )
self.configList.append( self.wirelessModeEntry )
self.configList.append( self.channelEntry )
self.configList.append( self.ssidEntry )
if apModeConfig.setupmode.value is "advanced":
self.configList.append( self.beaconEntry )
self.configList.append( self.rtsThresholdEntry )
self.configList.append( self.fragmThresholdEntry )
self.configList.append( self.prambleEntry )
self.configList.append( self.ignoreBroadcastSsid )
self.configList.append( self.encryptEntry )
if apModeConfig.encrypt.value is True:
self.configList.append( self.methodEntry )
if apModeConfig.method.value is "0": # wep
self.configList.append( self.wepKeyTypeEntry )
self.configList.append( self.wepKey0Entry )
else:
self.configList.append( self.wpaKeyEntry )
if apModeConfig.setupmode.value is "advanced":
self.configList.append( self.groupRekeyEntry )
## set network interfaces
self.configList.append( self.usedhcpEntry )
if apModeConfig.usedhcp.value is False:
self.configList.append( self.ipEntry )
self.configList.append( self.netmaskEntry )
self.configList.append( self.gatewayEntry )
self["config"].list = self.configList
self["config"].l.setList(self.configList)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.newConfig()
def newConfig(self):
if self["config"].getCurrent() in [ self.encryptEntry, self.methodEntry, self.useApEntry, self.usedhcpEntry, self.setupModeEntry]:
self.createConfig()
def doConfigMsg(self):
try:
self.session.openWithCallback(self.doConfig, MessageBox, (_("Are you sure you want to setup your AP?\n\n") ) )
except:
printDebugMsg("doConfig failed")
def doConfig(self, ret = False):
global apModeConfig
if ret is not True:
return
if apModeConfig.useap.value is True and apModeConfig.encrypt.value is True:
if not self.checkEncrypKey():
return
if not self.checkConfig():
return
self.configStartMsg = self.session.openWithCallback(self.ConfigFinishedMsg, MessageBox, _("Please wait for AP Configuration....\n") , type = MessageBox.TYPE_INFO, enable_input = False)
if apModeConfig.useap.value is True:
self.networkRestart( nextFunc = self.makeConf )
else:
self.networkRestart( nextFunc = self.removeConf )
def checkEncrypKey(self):
if apModeConfig.method.value == "0":
if self.checkWep(apModeConfig.wep_key0.value) is False:
self.session.open(MessageBox, _("Invalid WEP key\n\n"), type = MessageBox.TYPE_ERROR, timeout = 10 )
else:
return True
else:
if not len(apModeConfig.wpa_passphrase.value) in range(8,65):
self.session.open(MessageBox, _("Invalid WPA key\n\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
else:
return True
return False
def checkWep(self, key):
length = len(key)
if length == 0:
return False
elif apModeConfig.wepType.value == "64" and length == 10:
return True
elif apModeConfig.wepType.value == "128" and length == 26:
return True
else:
return False
def checkConfig(self):
# ssid Check
if len(apModeConfig.ssid.value) == 0 or len(apModeConfig.ssid.value) > 32:
self.session.open(MessageBox, _("Invalid SSID\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
elif apModeConfig.channel.value not in range(1,14):
self.session.open(MessageBox, _("Invalid channel\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
elif apModeConfig.beacon.value < 15 or apModeConfig.beacon.value > 65535:
self.session.open(MessageBox, _("Invalid beacon\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
elif apModeConfig.rts_threshold.value < 0 or apModeConfig.rts_threshold.value > 2347:
self.session.open(MessageBox, _("Invalid RTS Threshold\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
elif apModeConfig.fragm_threshold.value < 256 or apModeConfig.fragm_threshold.value > 2346:
self.session.open(MessageBox, _("Invalid Fragm Threshold\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
elif apModeConfig.wpagrouprekey.value < 0 or apModeConfig.wpagrouprekey.value > 3600:
self.session.open(MessageBox, _("Invalid wpagrouprekey\n"), type = MessageBox.TYPE_ERROR, timeout = 10)
return False;
return True;
def networkRestart(self, nextFunc = None ):
self.networkRestart_stop( nextFunc = nextFunc )
def networkRestart_stop(self, nextFunc = None ):
printDebugMsg("networkRestart_stop")
self.msgPlugins(False)
self.commands = [] # stop current network
self.networkRestartConsole = Console()
self.commands.append("/etc/init.d/avahi-daemon stop")
for iface in iNetwork.getAdapterList():
if iface != 'eth0' or not iNetwork.onRemoteRootFS():
self.commands.append("ifdown " + iface)
self.commands.append("ip addr flush dev " + iface)
self.commands.append("/etc/init.d/hostapd stop")
self.commands.append("/etc/init.d/networking stop")
self.commands.append("killall -9 udhcpc")
self.commands.append("rm /var/run/udhcpc*")
self.networkRestartConsole.eBatch(self.commands, nextFunc, debug = True)
def makeConf(self,extra_args):
printDebugMsg("makeConf")
self.writeNetworkInterfaces()
result = self.writeHostapdConfig()
if result == -1:
self.configStartMsg.close(False)
return
self.setIpForward(1)
self.networkRestart_start()
def removeConf(self,extra_args):
printDebugMsg("removeConf")
if fileExists("/etc/hostapd.conf", 0):
os_system("mv /etc/hostapd.conf /etc/hostapd.conf.linuxap.back")
fp = file("/etc/network/interfaces", 'w')
fp.write("# automatically generated by AP Setup Plugin\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
# eth0 setup
fp.write("auto eth0\n")
if apModeConfig.usedhcp.value is True:
fp.write("iface eth0 inet dhcp\n")
else:
fp.write("iface eth0 inet static\n")
fp.write(" address %d.%d.%d.%d\n" % tuple(apModeConfig.address.value) )
fp.write(" netmask %d.%d.%d.%d\n" % tuple(apModeConfig.netmask.value) )
fp.write(" gateway %d.%d.%d.%d\n" % tuple(apModeConfig.gateway.value) )
fp.close()
self.setIpForward(0)
self.networkRestart_start()
def networkRestart_start(self):
printDebugMsg("networkRestart_start")
self.restartConsole = Console()
self.commands = []
self.commands.append("/etc/init.d/networking start")
self.commands.append("/etc/init.d/avahi-daemon start")
self.commands.append("/etc/init.d/hostapd start")
self.restartConsole.eBatch(self.commands, self.networkRestartFinished, debug=True)
def networkRestartFinished(self, data):
printDebugMsg("networkRestartFinished")
iNetwork.removeAdapterAttribute('br0',"ip")
iNetwork.removeAdapterAttribute('br0',"netmask")
iNetwork.removeAdapterAttribute('br0',"gateway")
iNetwork.getInterfaces(self.getInterfacesDataAvail)
def getInterfacesDataAvail(self, data):
if data is True and self.configStartMsg is not None:
self.configStartMsg.close(True)
def ConfigFinishedMsg(self, ret):
if ret is True:
self.session.openWithCallback(self.ConfigFinishedMsgCallback ,MessageBox, _("Configuration your AP is finished"), type = MessageBox.TYPE_INFO, timeout = 5, default = False)
else:
self.session.openWithCallback(self.close ,MessageBox, _("Invalid model or Image."), MessageBox.TYPE_ERROR)
def ConfigFinishedMsgCallback(self,data):
self.close()
def msgPlugins(self,reason = False):
for p in plugins.getPlugins(PluginDescriptor.WHERE_NETWORKCONFIG_READ):
p(reason=reason)
def writeNetworkInterfaces(self):
global apModeConfig
fp = file("/etc/network/interfaces", 'w')
fp.write("# automatically generated by AP Setup Plugin\n# do NOT change manually!\n\n")
fp.write("auto lo\n")
fp.write("iface lo inet loopback\n\n")
# eth0 setup
fp.write("auto eth0\n")
fp.write("iface eth0 inet manual\n")
fp.write(" up ip link set $IFACE up\n")
fp.write(" down ip link set $IFACE down\n\n")
# Wireless device setup
fp.write("auto %s\n" % apModeConfig.wirelessdevice.value)
fp.write("iface %s inet manual\n" % apModeConfig.wirelessdevice.value)
fp.write(" up ip link set $IFACE up\n")
fp.write(" down ip link set $IFACE down\n")
# branch setup
fp.write("auto br0\n")
if apModeConfig.usedhcp.value is True:
fp.write("iface br0 inet dhcp\n")
else:
fp.write("iface br0 inet static\n")
fp.write(" address %d.%d.%d.%d\n" % tuple(apModeConfig.address.value) )
fp.write(" netmask %d.%d.%d.%d\n" % tuple(apModeConfig.netmask.value) )
fp.write(" gateway %d.%d.%d.%d\n" % tuple(apModeConfig.gateway.value) )
fp.write(" pre-up brctl addbr br0\n")
fp.write(" pre-up brctl addif br0 eth0\n")
# fp.write(" pre-up brctl addif br0 wlan0\n") // runned by hostpad
fp.write(" post-down brctl delif br0 eth0\n")
# fp.write(" post-down brctl delif br0 wlan0\n") // runned by hostpad
fp.write(" post-down brctl delbr br0\n\n")
fp.write("\n")
fp.close()
def writeHostapdConfig(self): #c++
global apModeConfig
configDict = {}
for key in self.hostapdConfigList.keys():
configDict[key] = str(self.hostapdConfigList[key].value)
configDict["config.encrypt"] = str(int(apModeConfig.encrypt.value))
configDict["config.method"] = apModeConfig.method.value
ret = self.wirelessAP.writeHostapdConfig(configDict)
if(ret != 0):
return -1
return 0
def setIpForward(self, setValue = 0):
ipForwardFilePath = "/proc/sys/net/ipv4/ip_forward"
if not fileExists(ipForwardFilePath):
return -1
printDebugMsg("set %s to %d" % (ipForwardFilePath, setValue))
f = open(ipForwardFilePath, "w")
f.write("%d" % setValue)
f.close()
sysctlPath = "/etc/sysctl.conf"
sysctlLines = []
if fileExists(sysctlPath):
fp = file(sysctlPath, "r")
sysctlLines = fp.readlines()
fp.close()
sysctlList = {}
for line in sysctlLines:
line = line.strip()
(key,value) = line.split("=")
key=key.strip()
value=value.strip()
sysctlList[key] = value
sysctlList["net.ipv4.ip_forward"] = str(setValue)
fp = file(sysctlPath, "w")
for (key,value) in sysctlList.items():
fp.write("%s=%s\n"%(key,value))
fp.close()
return 0
def checkWirelessDevices(self):
global apModeConfig
self.wlanDeviceList = []
wlanIfaces =[]
for x in iNetwork.getInstalledAdapters():
if x.startswith('eth') or x.startswith('br') or x.startswith('mon'):
continue
wlanIfaces.append(x)
description=self.getAdapterDescription(x)
if description == "Unknown network adapter":
self.wlanDeviceList.append((x, x))
else:
self.wlanDeviceList.append(( x, description + " (%s)"%x ))
apModeConfig.wirelessdevice = ConfigSelection( choices = self.wlanDeviceList )
def getAdapterDescription(self, iface):
classdir = "/sys/class/net/" + iface + "/device/"
driverdir = "/sys/class/net/" + iface + "/device/driver/"
if os_path.exists(classdir):
files = listdir(classdir)
if 'driver' in files:
if os_path.realpath(driverdir).endswith('rtw_usb_drv'):
return _("Realtek")+ " " + _("WLAN adapter.")
elif os_path.realpath(driverdir).endswith('ath_pci'):
return _("Atheros")+ " " + _("WLAN adapter.")
elif os_path.realpath(driverdir).endswith('zd1211b'):
return _("Zydas")+ " " + _("WLAN adapter.")
elif os_path.realpath(driverdir).endswith('rt73'):
return _("Ralink")+ " " + _("WLAN adapter.")
elif os_path.realpath(driverdir).endswith('rt73usb'):
return _("Ralink")+ " " + _("WLAN adapter.")
else:
return str(os_path.basename(os_path.realpath(driverdir))) + " " + _("WLAN adapter")
else:
return _("Unknown network adapter")
else:
return _("Unknown network adapter")
def __onClose(self):
for x in self["config"].list:
x[1].cancel()
apModeConfig.wpa.value = "0"
apModeConfig.wep.value = False
def keyCancel(self):
self.close()
def main(session, **kwargs):
session.open(WirelessAccessPoint)
def Plugins(**kwargs):
return [PluginDescriptor(name=_("Wireless Access Point"), description="Using a Wireless module as access point.", where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = True, fnc=main)]
| gpl-2.0 |
Remper/learningbyreading | src/mappings.py | 2 | 1612 | import logging as log
import os
import re
# builds a dictionary of frame names indexed by wordnet synset id
offset2bn = dict()
bn2offset = dict()
offset2wn = dict()
wn2offset = dict()
wn2bn = dict()
bn2wn = dict()
wn30wn31 = dict()
wn31wn30 = dict()
bn2dbpedia = dict()
dbpedia2bn = dict()
# the mapping is in a tabular file, e.g.:
# s00069798n Scout-n#2-n 110582611-n
with open(os.path.join(os.path.dirname(__file__), '../resources/bn35-wn31.map')) as f:
for line in f:
bn_id, wn_id, wn_offset = line.rstrip().split(' ')
if wn_offset.endswith("-s"): wn_offset = wn_offset.replace("-s", "-a")# To use only the tag "a" for adjetives
if wn_id.endswith("-s"): wn_id = re.sub("(-s)(#\d+)(-s)", "-a\\2-a", wn_id)# To use only the tag "a" for adjetives
offset2bn[wn_offset[1:]] = bn_id
bn2offset[bn_id] = wn_offset[1:]
offset2wn[wn_offset[1:]] = wn_id
wn2offset[wn_id] = wn_offset[1:]
wn2bn[wn_id] = bn_id
bn2wn[bn_id] = wn_id
# Mapping different WN versions
# 00013662-a 00013681-a
with open(os.path.join(os.path.dirname(__file__), '../resources/wn30-31')) as f:
for line in f:
wn30, wn31 = line.rstrip().split(' ')
wn30wn31[wn30] = wn31
wn31wn30[wn31] = wn30
# Mapping BabelNet-DBpedia
# s00000006n Dodecanol
for i in range(4):
filename = os.path.join(os.path.dirname(__file__), '../resources/bn-dbpedia{0}'.format(i+1))
with open(filename) as f:
for line in f:
bn_id, dbpedia_id = line.rstrip().split(' ')
dbpedia2bn[dbpedia_id] = bn_id
bn2dbpedia[bn_id] = dbpedia_id | gpl-2.0 |
hofschroeer/gnuradio | gr-filter/examples/resampler.py | 7 | 4489 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print("Resampling from %f to %f by %f " %(fs_in, fs_out, rerate))
# Creating our own taps
taps = filter.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = analog.sig_source_c(fs_in, analog.GR_SIN_WAVE, fc, 1)
#self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = filter.pfb.arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = filter.pfb.arb_resampler_ccf(rerate)
self.snk_in = blocks.vector_sink_c()
self.snk_0 = blocks.vector_sink_c()
self.snk_1 = blocks.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pyplot.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize / 4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in / 1000.0)))
sp1.set_xlim([-fs_in / 2, fs_in / 2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize / 4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize / 4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out / 1000.0)))
sp2.set_xlim([-fs_out / 2, fs_out / 2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0 / fs_in
Ts_out = 1.0 / fs_out
t_in = numpy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = numpy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pyplot.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in / 1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out / 1000.0)))
r = float(fs_out) / float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
fusionapp/entropy | entropy/ientropy.py | 1 | 3968 | """
@copyright: 2007-2014 Quotemaster cc. See LICENSE for details.
Interface definitions for Entropy.
"""
from zope.interface import Interface, Attribute
class IContentObject(Interface):
"""
Immutable content object.
"""
hash = Attribute("""The hash function used to calculate the content digest.""")
contentDigest = Attribute("""A digest of the object content.""")
contentType = Attribute("""The MIME type describing the content of this object.""")
created = Attribute("""Creation timestamp of this object.""")
metadata = Attribute("""Object metadata.""")
def getContent():
"""
Get the data contained in this object.
@rtype: C{str}
"""
class IContentStore(Interface):
"""
Interface for storing and retrieving immutable content objects.
"""
def storeObject(content, contentType, metadata={}, created=None):
"""
Store an object.
@param content: the data to store.
@type content: C{str}
@param contentType: the MIME type of the content.
@type contentType: C{unicode}
@param metadata: a dictionary of metadata entries.
@type metadata: C{dict} of C{unicode}:C{unicode}
@param created: the creation timestamp; defaults to the current time.
@type created: L{epsilon.extime.Time} or C{None}
@returns: the object identifier.
@rtype: C{Deferred<unicode>}
"""
def getObject(objectID):
"""
Retrieve an object.
@param objectId: the object identifier.
@type objectId: C{unicode}
@returns: the content object.
@rtype: C{Deferred<IContentObject>}
"""
def migrateTo(destination):
"""
Initiate a migration to another content store.
All objects present in this content store at the moment the migration
is initiated MUST be replicated to the destination store before the
migration is considered complete. Objects created after the migration
is initiated MUST NOT be replicated.
NOTE: This method is optional, as some storage backends may be unable
to support enumerating all objects which is usually necessary to
implement migration.
@type destination: L{IContentStore}
@param destination: The destination store.
@rtype: L{IMigration}
@return: The migration powerup tracking the requested migration.
@raise NotImplementedError: if this implementation does not support
migration.
"""
class ISiblingStore(IContentStore):
"""
Sibling content store.
"""
class IBackendStore(IContentStore):
"""
Backend content store.
"""
class IUploadScheduler(Interface):
"""
Manager of pending uploads.
"""
def scheduleUpload(objectId, backend):
"""
Notify the scheduler that an object needs to be uploaded to a backend.
"""
class IMigrationManager(Interface):
"""
Manager for migrations from one content store to another.
"""
def migrate(source, destination):
"""
Initiate a migration between two content stores. Some content stores
may not support migration, as some storage backends cannot support
enumerating all stored objects.
@type source: L{IContentStore}
@param source: The source content store; must support migration.
@type destination: L{IContentStore}
@param destination: The destination store; does not need any special
support for migration.
@rtype: L{IMigration}
@return: The migration powerup responsible for tracking the requested
migration.
"""
class IMigration(Interface):
"""
Powerup tracking a migration in progress.
"""
def run():
"""
Run this migration.
If the migration is already running, this is a noop.
"""
| mit |
ruibarreira/linuxtrail | usr/lib/python3/dist-packages/orca/scripts/apps/gcalctool/script.py | 5 | 3292 | # Orca
#
# Copyright 2004-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Provides a custom script for gcalctool."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2008 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import orca.scripts.toolkits.gtk as gtk
import orca.messages as messages
########################################################################
# #
# The GCalcTool script class. #
# #
########################################################################
class Script(gtk.Script):
def __init__(self, app):
"""Creates a new script for the given application. Callers
should use the getScript factory method instead of calling
this constructor directly.
Arguments:
- app: the application to create a script for.
"""
gtk.Script.__init__(self, app)
self._resultsDisplay = None
self._statusLine = None
def onWindowActivated(self, event):
"""Called whenever one of gcalctool's toplevel windows is activated.
Arguments:
- event: the window activated Event
"""
if self._resultsDisplay and self._statusLine:
gtk.Script.onWindowActivated(self, event)
return
obj = event.source
role = obj.getRole()
if role != pyatspi.ROLE_FRAME:
gtk.Script.onWindowActivated(self, event)
return
isEditbar = lambda x: x and x.getRole() == pyatspi.ROLE_EDITBAR
self._resultsDisplay = pyatspi.findDescendant(obj, isEditbar)
if not self._resultsDisplay:
self.presentMessage(messages.CALCULATOR_DISPLAY_NOT_FOUND)
isStatusLine = lambda x: x and x.getRole() == pyatspi.ROLE_TEXT \
and not x.getState().contains(pyatspi.STATE_EDITABLE)
self._statusLine = pyatspi.findDescendant(obj, isStatusLine)
gtk.Script.onWindowActivated(self, event)
def onTextInserted(self, event):
"""Called whenever text is inserted into gcalctool's text display.
Arguments:
- event: the text inserted Event
"""
if self.utilities.isSameObject(event.source, self._statusLine):
self.presentMessage(self.utilities.displayedText(self._statusLine))
return
gtk.Script.onTextInserted(self, event)
| gpl-3.0 |
vponomaryov/manila | manila/tests/share/drivers/netapp/dataontap/client/test_client_base.py | 1 | 6337 | # Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_log import log
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_base
from manila import test
from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake
@ddt.ddt
class NetAppBaseClientTestCase(test.TestCase):
def setUp(self):
super(NetAppBaseClientTestCase, self).setUp()
# Mock loggers as themselves to allow logger arg validation
mock_logger = log.getLogger('mock_logger')
self.mock_object(client_base.LOG,
'error',
mock.Mock(side_effect=mock_logger.error))
self.mock_object(client_base.LOG,
'exception',
mock.Mock(side_effect=mock_logger.error))
self.client = client_base.NetAppBaseClient(**fake.CONNECTION_INFO)
self.client.connection = mock.MagicMock()
self.connection = self.client.connection
def test_get_ontapi_version(self):
version_response = netapp_api.NaElement(fake.ONTAPI_VERSION_RESPONSE)
self.connection.invoke_successfully.return_value = version_response
major, minor = self.client.get_ontapi_version(cached=False)
self.assertEqual('1', major)
self.assertEqual('19', minor)
def test_get_ontapi_version_cached(self):
self.connection.get_api_version.return_value = (1, 20)
major, minor = self.client.get_ontapi_version()
self.assertEqual(1, self.connection.get_api_version.call_count)
self.assertEqual(1, major)
self.assertEqual(20, minor)
def test_get_system_version(self):
version_response = netapp_api.NaElement(
fake.SYSTEM_GET_VERSION_RESPONSE)
self.connection.invoke_successfully.return_value = version_response
result = self.client.get_system_version()
self.assertEqual(fake.VERSION, result['version'])
self.assertEqual(('8', '2', '1'), result['version-tuple'])
def test_init_features(self):
self.client._init_features()
self.assertSetEqual(set(), self.client.features.defined_features)
@ddt.data('tag_name', '{http://www.netapp.com/filer/admin}tag_name')
def test_strip_xml_namespace(self, element):
result = self.client._strip_xml_namespace(element)
self.assertEqual('tag_name', result)
def test_send_request(self):
element = netapp_api.NaElement('fake-api')
self.client.send_request('fake-api')
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertTrue(self.connection.invoke_successfully.call_args[0][1])
def test_send_request_no_tunneling(self):
element = netapp_api.NaElement('fake-api')
self.client.send_request('fake-api', enable_tunneling=False)
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertFalse(self.connection.invoke_successfully.call_args[0][1])
def test_send_request_with_args(self):
element = netapp_api.NaElement('fake-api')
api_args = {'arg1': 'data1', 'arg2': 'data2'}
element.translate_struct(api_args)
self.client.send_request('fake-api', api_args=api_args)
self.assertEqual(
element.to_string(),
self.connection.invoke_successfully.call_args[0][0].to_string())
self.assertTrue(self.connection.invoke_successfully.call_args[0][1])
def test_get_licenses(self):
api_response = netapp_api.NaElement(fake.LICENSE_V2_LIST_INFO_RESPONSE)
self.mock_object(
self.client, 'send_request', mock.Mock(return_value=api_response))
response = self.client.get_licenses()
self.assertSequenceEqual(fake.LICENSES, response)
def test_get_licenses_api_error(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=netapp_api.NaApiError))
self.assertRaises(netapp_api.NaApiError, self.client.get_licenses)
self.assertEqual(1, client_base.LOG.exception.call_count)
def test_send_ems_log_message(self):
self.assertRaises(NotImplementedError,
self.client.send_ems_log_message,
{})
@ddt.ddt
class FeaturesTestCase(test.TestCase):
def setUp(self):
super(FeaturesTestCase, self).setUp()
self.features = client_base.Features()
def test_init(self):
self.assertSetEqual(set(), self.features.defined_features)
def test_add_feature_default(self):
self.features.add_feature('FEATURE_1')
self.assertTrue(self.features.FEATURE_1)
self.assertIn('FEATURE_1', self.features.defined_features)
@ddt.data(True, False)
def test_add_feature(self, value):
self.features.add_feature('FEATURE_2', value)
self.assertEqual(value, self.features.FEATURE_2)
self.assertIn('FEATURE_2', self.features.defined_features)
@ddt.data('True', 'False', 0, 1, 1.0, None, [], {}, (True,))
def test_add_feature_type_error(self, value):
self.assertRaises(TypeError,
self.features.add_feature,
'FEATURE_3',
value)
self.assertNotIn('FEATURE_3', self.features.defined_features)
def test_get_attr_missing(self):
self.assertRaises(AttributeError, getattr, self.features, 'FEATURE_4')
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.