repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
terranodo/geonode | geonode/security/tests.py | 13 | 19666 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
from django.core.urlresolvers import reverse
from django.test import TestCase
from tastypie.test import ResourceTestCase
from django.contrib.auth import get_user_model
from guardian.shortcuts import get_anonymous_user, assign_perm, remove_perm
from geonode.base.populate_test_data import create_models, all_public
from geonode.maps.tests_populate_maplayers import create_maplayers
from geonode.people.models import Profile
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.layers.populate_layers_data import create_layer_data
from geonode.groups.models import Group
class BulkPermissionsTests(ResourceTestCase):
fixtures = ['initial_data.json', 'bobby']
def setUp(self):
super(BulkPermissionsTests, self).setUp()
self.user = 'admin'
self.passwd = 'admin'
self.list_url = reverse(
'api_dispatch_list',
kwargs={
'api_name': 'api',
'resource_name': 'layers'})
self.bulk_perms_url = reverse('bulk_permissions')
create_models(type='layer')
all_public()
self.perm_spec = {
"users": {"admin": ["view_resourcebase"]}, "groups": {}}
def test_set_bulk_permissions(self):
"""Test that after restrict view permissions on two layers
bobby is unable to see them"""
layers = Layer.objects.all()[:2].values_list('id', flat=True)
layers_id = map(lambda x: str(x), layers)
self.client.login(username='admin', password='admin')
resp = self.client.get(self.list_url)
self.assertEquals(len(self.deserialize(resp)['objects']), 8)
data = {
'permissions': json.dumps(self.perm_spec),
'resources': layers_id
}
resp = self.client.post(self.bulk_perms_url, data)
self.assertHttpOK(resp)
self.client.logout()
self.client.login(username='bobby', password='bob')
resp = self.client.get(self.list_url)
self.assertEquals(len(self.deserialize(resp)['objects']), 6)
def test_bobby_cannot_set_all(self):
"""Test that Bobby can set the permissions only only on the ones
for which he has the right"""
layer = Layer.objects.all()[0]
self.client.login(username='admin', password='admin')
# give bobby the right to change the layer permissions
assign_perm('change_resourcebase', Profile.objects.get(username='bobby'), layer.get_self_resource())
self.client.logout()
self.client.login(username='bobby', password='bob')
layer2 = Layer.objects.all()[1]
data = {
'permissions': json.dumps({"users": {"bobby": ["view_resourcebase"]}, "groups": {}}),
'resources': [layer.id, layer2.id]
}
resp = self.client.post(self.bulk_perms_url, data)
self.assertTrue(layer2.title in json.loads(resp.content)['not_changed'])
class PermissionsTest(TestCase):
"""Tests GeoNode permissions
"""
fixtures = ['initial_data.json', 'bobby']
perm_spec = {
"users": {
"admin": [
"change_resourcebase",
"change_resourcebase_permissions",
"view_resourcebase"]},
"groups": {}}
# Permissions Tests
# Users
# - admin (pk=2)
# - bobby (pk=1)
def setUp(self):
self.user = 'admin'
self.passwd = 'admin'
create_models(type='layer')
create_layer_data()
self.anonymous_user = get_anonymous_user()
def test_layer_set_default_permissions(self):
"""Verify that Layer.set_default_permissions is behaving as expected
"""
# Get a Layer object to work with
layer = Layer.objects.all()[0]
# Set the default permissions
layer.set_default_permissions()
# Test that the anonymous user can read
self.assertTrue(
self.anonymous_user.has_perm(
'view_resourcebase',
layer.get_self_resource()))
# Test that the owner user can read
self.assertTrue(
layer.owner.has_perm(
'view_resourcebase',
layer.get_self_resource()))
# Test that the owner user can download it
self.assertTrue(
layer.owner.has_perm(
'download_resourcebase',
layer.get_self_resource()))
# Test that the owner user can edit metadata
self.assertTrue(
layer.owner.has_perm(
'change_resourcebase_metadata',
layer.get_self_resource()))
# Test that the owner user can edit data if is vector type
if layer.storeType == 'dataStore':
self.assertTrue(
layer.owner.has_perm(
'change_layer_data',
layer))
# Test that the owner user can edit styles
self.assertTrue(
layer.owner.has_perm(
'change_layer_style',
layer))
# Test that the owner can manage the layer
self.assertTrue(
layer.owner.has_perm(
'change_resourcebase',
layer.get_self_resource()))
self.assertTrue(
layer.owner.has_perm(
'delete_resourcebase',
layer.get_self_resource()))
self.assertTrue(
layer.owner.has_perm(
'change_resourcebase_permissions',
layer.get_self_resource()))
self.assertTrue(
layer.owner.has_perm(
'publish_resourcebase',
layer.get_self_resource()))
def test_set_layer_permissions(self):
"""Verify that the set_layer_permissions view is behaving as expected
"""
# Get a layer to work with
layer = Layer.objects.all()[0]
# FIXME Test a comprehensive set of permissions specifications
# Set the Permissions
layer.set_permissions(self.perm_spec)
# Test that the Permissions for anonymous user is are set
self.assertFalse(
self.anonymous_user.has_perm(
'view_resourcebase',
layer.get_self_resource()))
# Test that previous permissions for users other than ones specified in
# the perm_spec (and the layers owner) were removed
current_perms = layer.get_all_level_info()
self.assertEqual(len(current_perms['users'].keys()), 2)
# Test that the User permissions specified in the perm_spec were
# applied properly
for username, perm in self.perm_spec['users'].items():
user = get_user_model().objects.get(username=username)
self.assertTrue(user.has_perm(perm, layer.get_self_resource()))
def test_ajax_layer_permissions(self):
"""Verify that the ajax_layer_permissions view is behaving as expected
"""
# Setup some layer names to work with
valid_layer_typename = Layer.objects.all()[0].id
invalid_layer_id = 9999999
# Test that an invalid layer.typename is handled for properly
response = self.client.post(
reverse(
'resource_permissions', args=(
invalid_layer_id,)), data=json.dumps(
self.perm_spec), content_type="application/json")
self.assertEquals(response.status_code, 404)
# Test that GET returns permissions
response = self.client.get(
reverse(
'resource_permissions',
args=(
valid_layer_typename,
)))
assert('permissions' in response.content)
# Test that a user is required to have maps.change_layer_permissions
# First test un-authenticated
response = self.client.post(
reverse(
'resource_permissions', args=(
valid_layer_typename,)), data=json.dumps(
self.perm_spec), content_type="application/json")
self.assertEquals(response.status_code, 401)
# Next Test with a user that does NOT have the proper perms
logged_in = self.client.login(username='bobby', password='bob')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse(
'resource_permissions', args=(
valid_layer_typename,)), data=json.dumps(
self.perm_spec), content_type="application/json")
self.assertEquals(response.status_code, 401)
# Login as a user with the proper permission and test the endpoint
logged_in = self.client.login(username='admin', password='admin')
self.assertEquals(logged_in, True)
response = self.client.post(
reverse(
'resource_permissions', args=(
valid_layer_typename,)), data=json.dumps(
self.perm_spec), content_type="application/json")
# Test that the method returns 200
self.assertEquals(response.status_code, 200)
# Test that the permissions specification is applied
# Should we do this here, or assume the tests in
# test_set_layer_permissions will handle for that?
def test_perms_info(self):
""" Verify that the perms_info view is behaving as expected
"""
# Test with a Layer object
layer = Layer.objects.all()[0]
layer.set_default_permissions()
# Test that the anonymous user can read
self.assertTrue(
self.anonymous_user.has_perm(
'view_resourcebase',
layer.get_self_resource()))
# Test that layer owner can edit layer
self.assertTrue(
layer.owner.has_perm(
'change_resourcebase',
layer.get_self_resource()))
# TODO Much more to do here once jj0hns0n understands the ACL system
# better
# Test with a Map object
# TODO
# now we test permissions, first on an authenticated user and then on the
# anonymous user
# 1. view_resourcebase
# 2. change_resourcebase
# 3. delete_resourcebase
# 4. change_resourcebase_metadata
# 5. change_resourcebase_permissions
# 6. change_layer_data
# 7. change_layer_style
def test_not_superuser_permissions(self):
# grab bobby
bob = get_user_model().objects.get(username='bobby')
# grab a layer
layer = Layer.objects.all()[0]
layer.set_default_permissions()
# verify bobby has view/change permissions on it but not manage
self.assertFalse(
bob.has_perm(
'change_resourcebase_permissions',
layer.get_self_resource()))
self.assertTrue(self.client.login(username='bobby', password='bob'))
# 1. view_resourcebase
# 1.1 has view_resourcebase: verify that bobby can access the layer
# detail page
self.assertTrue(
bob.has_perm(
'view_resourcebase',
layer.get_self_resource()))
response = self.client.get(reverse('layer_detail', args=(layer.typename,)))
self.assertEquals(response.status_code, 200)
# 1.2 has not view_resourcebase: verify that bobby can not access the
# layer detail page
remove_perm('view_resourcebase', bob, layer.get_self_resource())
anonymous_group = Group.objects.get(name='anonymous')
remove_perm('view_resourcebase', anonymous_group, layer.get_self_resource())
response = self.client.get(reverse('layer_detail', args=(layer.typename,)))
self.assertEquals(response.status_code, 401)
# 2. change_resourcebase
# 2.1 has not change_resourcebase: verify that bobby cannot access the
# layer replace page
response = self.client.get(reverse('layer_replace', args=(layer.typename,)))
self.assertEquals(response.status_code, 401)
# 2.2 has change_resourcebase: verify that bobby can access the layer
# replace page
assign_perm('change_resourcebase', bob, layer.get_self_resource())
self.assertTrue(
bob.has_perm(
'change_resourcebase',
layer.get_self_resource()))
response = self.client.get(reverse('layer_replace', args=(layer.typename,)))
self.assertEquals(response.status_code, 200)
# 3. delete_resourcebase
# 3.1 has not delete_resourcebase: verify that bobby cannot access the
# layer delete page
response = self.client.get(reverse('layer_remove', args=(layer.typename,)))
self.assertEquals(response.status_code, 401)
# 3.2 has delete_resourcebase: verify that bobby can access the layer
# delete page
assign_perm('delete_resourcebase', bob, layer.get_self_resource())
self.assertTrue(
bob.has_perm(
'delete_resourcebase',
layer.get_self_resource()))
response = self.client.get(reverse('layer_remove', args=(layer.typename,)))
self.assertEquals(response.status_code, 200)
# 4. change_resourcebase_metadata
# 4.1 has not change_resourcebase_metadata: verify that bobby cannot
# access the layer metadata page
response = self.client.get(reverse('layer_metadata', args=(layer.typename,)))
self.assertEquals(response.status_code, 401)
# 4.2 has delete_resourcebase: verify that bobby can access the layer
# delete page
assign_perm('change_resourcebase_metadata', bob, layer.get_self_resource())
self.assertTrue(
bob.has_perm(
'change_resourcebase_metadata',
layer.get_self_resource()))
response = self.client.get(reverse('layer_metadata', args=(layer.typename,)))
self.assertEquals(response.status_code, 200)
# 5. change_resourcebase_permissions
# should be impossible for the user without change_resourcebase_permissions
# to change permissions as the permission form is not available in the
# layer detail page?
# 6. change_layer_data
# must be done in integration test sending a WFS-T request with CURL
# 7. change_layer_style
# 7.1 has not change_layer_style: verify that bobby cannot access
# the layer style page
response = self.client.get(reverse('layer_style_manage', args=(layer.typename,)))
self.assertEquals(response.status_code, 401)
# 7.2 has change_layer_style: verify that bobby can access the
# change layer style page
assign_perm('change_layer_style', bob, layer)
self.assertTrue(
bob.has_perm(
'change_layer_style',
layer))
response = self.client.get(reverse('layer_style_manage', args=(layer.typename,)))
self.assertEquals(response.status_code, 200)
def test_anonymus_permissions(self):
# grab a layer
layer = Layer.objects.all()[0]
layer.set_default_permissions()
# 1. view_resourcebase
# 1.1 has view_resourcebase: verify that anonymous user can access
# the layer detail page
self.assertTrue(
self.anonymous_user.has_perm(
'view_resourcebase',
layer.get_self_resource()))
response = self.client.get(reverse('layer_detail', args=(layer.typename,)))
self.assertEquals(response.status_code, 200)
# 1.2 has not view_resourcebase: verify that anonymous user can not
# access the layer detail page
remove_perm('view_resourcebase', self.anonymous_user, layer.get_self_resource())
anonymous_group = Group.objects.get(name='anonymous')
remove_perm('view_resourcebase', anonymous_group, layer.get_self_resource())
response = self.client.get(reverse('layer_detail', args=(layer.typename,)))
self.assertEquals(response.status_code, 302)
# 2. change_resourcebase
# 2.1 has not change_resourcebase: verify that anonymous user cannot
# access the layer replace page but redirected to login
response = self.client.get(reverse('layer_replace', args=(layer.typename,)))
self.assertEquals(response.status_code, 302)
# 3. delete_resourcebase
# 3.1 has not delete_resourcebase: verify that anonymous user cannot
# access the layer delete page but redirected to login
response = self.client.get(reverse('layer_remove', args=(layer.typename,)))
self.assertEquals(response.status_code, 302)
# 4. change_resourcebase_metadata
# 4.1 has not change_resourcebase_metadata: verify that anonymous user
# cannot access the layer metadata page but redirected to login
response = self.client.get(reverse('layer_metadata', args=(layer.typename,)))
self.assertEquals(response.status_code, 302)
# 5 N\A? 6 is an integration test...
# 7. change_layer_style
# 7.1 has not change_layer_style: verify that anonymous user cannot access
# the layer style page but redirected to login
response = self.client.get(reverse('layer_style_manage', args=(layer.typename,)))
self.assertEquals(response.status_code, 302)
def test_map_download(self):
"""Test the correct permissions on layers on map download"""
create_models(type='map')
create_maplayers()
# Get a Map
the_map = Map.objects.get(title='GeoNode Default Map')
# Get a MapLayer and set the parameters as it is local and not a background
# and leave it alone in the map
map_layer = the_map.layer_set.get(name='geonode:CA')
map_layer.local = True
map_layer.group = 'overlay'
map_layer.save()
the_map.layer_set.all().delete()
the_map.layer_set.add(map_layer)
# Get the Layer and set the permissions for bobby to it and the map
bobby = Profile.objects.get(username='bobby')
the_layer = Layer.objects.get(typename='geonode:CA')
remove_perm('download_resourcebase', bobby, the_layer.get_self_resource())
remove_perm('download_resourcebase', Group.objects.get(name='anonymous'),
the_layer.get_self_resource())
assign_perm('view_resourcebase', bobby, the_layer.get_self_resource())
assign_perm('download_resourcebase', bobby, the_map.get_self_resource())
self.client.login(username='bobby', password='bob')
response = self.client.get(reverse('map_download', args=(the_map.id,)))
self.assertTrue('Could not find downloadable layers for this map' in response.content)
| gpl-3.0 |
keitaroyam/yamtbx | dxtbx_formats/FormatEMD.py | 1 | 7738 | # Class for reading .emd file by Velox.
# This code was written based on FormatSER.py from https://github.com/cctbx/dxtbx/blob/master/format/FormatSER.py
from __future__ import absolute_import, division, print_function
import struct
import h5py
import numpy
import os
import json
from scitbx.array_family import flex
from dxtbx.format.Format import Format
from dxtbx.format.FormatMultiImage import FormatMultiImage
def get_metadata(metadata):
mds = []
for i in range(metadata.shape[1]):
metadata_array = metadata[:, i].T
mdata_string = metadata_array.tostring().decode("utf-8")
mds.append(json.loads(mdata_string.rstrip('\x00')))
return mds
# get_metadata()
def analyse_angle(metadata):
alphas = []
for i, md in enumerate(metadata):
alpha = numpy.rad2deg(float(md["Stage"]["AlphaTilt"]))
alphas.append(alpha)
if len(alphas) < 2:
return [0,0], 0.
d_alphas = numpy.diff(alphas)
q25, q50, q75 = numpy.percentile(d_alphas, [25, 50, 75])
iqr = q75-q25
iqrc = 1.5
lowlim, highlim = q25 - iqrc*iqr, q75 + iqrc*iqr
d_alphas2 = d_alphas[numpy.where(numpy.logical_and(d_alphas>lowlim, d_alphas<highlim))] # outlier rejected
d_alpha_z = abs(d_alphas-numpy.mean(d_alphas2))/numpy.std(d_alphas2)
valid_range = [0, len(metadata)-1]
for i in range(len(metadata)-1):
if d_alpha_z[i] < 3: break
valid_range[0] = i+1
for i in reversed(range(len(metadata)-1)):
if d_alpha_z[i] < 3: break
valid_range[1] = i
if valid_range[0] > valid_range[1]:
valid_range = [0, len(metadata)-1] # reset
mean_alpha_step = (alphas[valid_range[1]] - alphas[valid_range[0]])/(valid_range[1]-valid_range[0])
return valid_range, mean_alpha_step
# analyse_angle()
class FormatEMD(FormatMultiImage, Format):
def __init__(self, image_file, **kwargs):
from dxtbx import IncorrectFormatError
if not self.understand(image_file):
raise IncorrectFormatError(self, image_file)
FormatMultiImage.__init__(self, **kwargs)
Format.__init__(self, image_file, **kwargs)
@staticmethod
def understand(image_file):
try:
h = h5py.File(image_file, "r")
except IOError:
return False
if not "/Data/Image" in h:
return False
keys = list(h["/Data/Image"].keys())
if len(keys) > 1: return False
d = h["/Data/Image"][keys[0]]
if "Data" in d and "Metadata" in d and len(d["Data"].shape) == 3:
return True
return False
@staticmethod
def _read_metadata(image_file):
h = h5py.File(image_file, "r")
ret = {}
image_path = h["/Data/Image"]
assert len(image_path.keys()) == 1
k = list(image_path.keys())[0]
ret["image_file"] = image_file
ret["file_handle"] = h
ret["data_path"] = "/Data/Image/%s/Data" % k
ret["metadata_path"] = "/Data/Image/%s/Metadata" % k
metadata = get_metadata(h[ret["metadata_path"]])
valid_range, mean_alpha_step = analyse_angle(metadata)
data = h[ret["data_path"]]
ret["n_frames"] = data.shape[2]
ret["valid_range"] = valid_range
ret["mean_alpha_step"] = mean_alpha_step
ret["width"], ret["height"] = data.shape[:2]
ret["binning"] = int(metadata[0]["BinaryResult"]["ImageSize"]["width"])//ret["width"]
h, m0, e, c = 6.62607004e-34, 9.10938356e-31, 1.6021766208e-19, 299792458.0
voltage = float(metadata[0]["Optics"]["AccelerationVoltage"])
ret["wavelength"] = h/numpy.sqrt(2*m0*e*voltage*(1.+e*voltage/2./m0/c**2)) * 1.e10
return ret
def _start(self):
"""Open the image file, read useful metadata into an internal dictionary
self._header_dictionary"""
self._header_dictionary = self._read_metadata(self._image_file)
return
def _goniometer(self):
"""Dummy goniometer, 'vertical' as the images are viewed. Not completely
sure about the handedness yet"""
if self._header_dictionary["mean_alpha_step"] > 0: # XXX is this really OK??
return self._goniometer_factory.known_axis((0, -1, 0))
else:
return self._goniometer_factory.known_axis((0, 1, 0))
def _detector(self):
"""Dummy detector"""
image_size = (self._header_dictionary["width"], self._header_dictionary["height"])
binning = self._header_dictionary["binning"]
pixel_size = 0.014*binning, 0.014*binning
distance = 2000
trusted_range = (-4, 65535)
beam_centre = [(p * i) / 2 for p, i in zip(pixel_size, image_size)]
d = self._detector_factory.simple(
"PAD",
distance,
beam_centre,
"+x",
"-y",
pixel_size,
image_size,
trusted_range,
)
# Not sure what the gain is
# for p in d: p.set_gain(8)
return d
def _beam(self):
return self._beam_factory.make_polarized_beam(
sample_to_source=(0.0, 0.0, 1.0),
wavelength=self._header_dictionary["wavelength"],
polarization=(0, 1, 0),
polarization_fraction=0.5,
)
def _scan(self):
"""Dummy scan for this stack"""
vr = self._header_dictionary["valid_range"]
image_range = (vr[0]+1, vr[1]+1)
print("Recommended image_raneg=", image_range)
image_range = (1, self._header_dictionary["n_frames"])
exposure_times = 0.0
nframes = self._header_dictionary["n_frames"] #vr[1]-vr[0]+1
#nframes = vr[1]-vr[0]+1
osc_step = abs(self._header_dictionary["mean_alpha_step"])
oscillation = (osc_step*(vr[0]-1), osc_step)
# FIXME we do actually have acquisition times, might as well use them
epochs = [0] * nframes
#print(len(epochs), self.get_num_images())
return self._scan_factory.make_scan(
image_range, exposure_times, oscillation, epochs, deg=True
)
def get_num_images(self):
#h = self._header_dictionary["file_handle"]
#data_path = self._header_dictionary["data_path"]
#return h[data_path].shape[2]
#vr = self._header_dictionary["valid_range"]
return self._header_dictionary["n_frames"] # vr[1] - vr[0] + 1
#return vr[1] - vr[0] + 1
# This is still required for dials_regression/test.py
def get_detectorbase(self):
pass
def get_goniometer(self, index=None):
return Format.get_goniometer(self)
def get_detector(self, index=None):
return Format.get_detector(self)
def get_beam(self, index=None):
return Format.get_beam(self)
def get_scan(self, index=None):
if index == None:
return Format.get_scan(self)
else:
scan = Format.get_scan(self)
return scan[index]
def get_image_file(self, index=None):
return Format.get_image_file(self)
def get_raw_data(self, index):
#print(self._header_dictionary["valid_range"][0])
#index += self._header_dictionary["valid_range"][0]
h = h5py.File(self._header_dictionary["image_file"], "r")
data_path = self._header_dictionary["data_path"]
raw_data = h[data_path][:,:,index].astype(numpy.int32) # flex.int does not suppert int16
offset_key = "DXTBX_EMD_OFFSET"
if os.environ.get(offset_key):
print("DEBUG:: adding %s for %d"%(os.environ[offset_key], index))
raw_data += int(os.environ[offset_key])
return flex.int(raw_data)
| bsd-3-clause |
ec-geolink/glharvest | glharvest/tests/test_scenarios.py | 1 | 6841 | """test_scenarios.py
End-end-end tests for the Harvester.
"""
import sys
import os
import RDF
from glharvest import jobs, registry, void
def test_can_update_a_provider_with_a_new_resource(repository):
"""This test tests the case where a provider gives informationa about one
resource at time t0 then, at time t1, their data dump no longer contains
information about the old resource. In this case, we keep the previous
knowledge and add the new knowledge because we don't allow providers to
completely remove a resource.
"""
# Setup
repository.clear()
provider = 'test'
infile_fmt = 'turtle'
base_uri = 'http://example.org/test/'
parser = RDF.TurtleParser()
state_t0 = """
@prefix void: <http://rdfs.org/ns/void#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix test: <http://example.org/test/> .
test:A a test:Thing ;
test:someProperty 'some property' .
"""
state_t1 = """@prefix void: <http://rdfs.org/ns/void#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix test: <http://example.org/test/> .
test:B a test:Thing ;
test:someProperty 'some other property' .
"""
# State t0
for statement in parser.parse_string_as_stream(state_t0, base_uri=base_uri):
print statement.subject
repository.delete_triples_about(statement.subject, context=provider)
repository.import_from_string(state_t0, context=provider, fmt=infile_fmt)
assert repository.size() == 2
# State t1
for statement in parser.parse_string_as_stream(state_t1, base_uri=base_uri):
print statement.subject
repository.delete_triples_about(statement.subject, context=provider)
repository.import_from_string(state_t1, context=provider, fmt=infile_fmt)
assert repository.size() == 4
def test_provide_can_change_knowledge_about_a_previous_resource(repository):
"""This test tests the case where a provider wishes to change the knowledge
about a resource. They do this by making an update datadump with at least
one statement about that resource.
"""
# Setup
repository.clear()
provider = 'test'
infile_fmt = 'turtle'
base_uri = 'http://example.org/test/'
parser = RDF.TurtleParser()
state_t0 = """
@prefix void: <http://rdfs.org/ns/void#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix test: <http://example.org/test/> .
test:A a test:Thing ;
test:someProperty 'some property' ;
test:anotherProperty 'just another thing' .
"""
state_t1 = """@prefix void: <http://rdfs.org/ns/void#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix test: <http://example.org/test/> .
test:A a test:Thing ;
test:someProperty 'some other property' .
"""
# State t0
for statement in parser.parse_string_as_stream(state_t0, base_uri=base_uri):
repository.delete_triples_about(statement.subject, context=provider)
repository.import_from_string(state_t0, context=provider, fmt=infile_fmt)
assert repository.size() == 3
# State t1
for statement in parser.parse_string_as_stream(state_t1, base_uri=base_uri):
repository.delete_triples_about(statement.subject, context=provider)
assert repository.size() == 0
repository.import_from_string(state_t1, context=provider, fmt=infile_fmt)
assert repository.size() == 2
def test_can_handle_multiple_duplicate_updates(repository):
"""This tests the case where a provider's datadump is updated but contains
the same information as the datadump at a previous time. We'd assume the
result would be that all statements would be first removed then just added
again so the size would go from N to 0 back to N.
"""
# Setup
repository.clear()
provider = 'test'
infile_fmt = 'turtle'
base_uri = 'http://example.org/test/'
parser = RDF.TurtleParser()
state_t0 = """
@prefix void: <http://rdfs.org/ns/void#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix test: <http://example.org/test/> .
test:A a test:Thing ;
test:someProperty 'some property' ;
test:anotherProperty 'just another thing' .
"""
state_t1 = """
@prefix void: <http://rdfs.org/ns/void#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix test: <http://example.org/test/> .
test:A a test:Thing ;
test:someProperty 'some property' ;
test:anotherProperty 'just another thing' .
"""
# State t0
for statement in parser.parse_string_as_stream(state_t0, base_uri=base_uri):
repository.delete_triples_about(statement.subject, context=provider)
repository.import_from_string(state_t0, context=provider, fmt=infile_fmt)
assert repository.size() == 3
# State t1
for statement in parser.parse_string_as_stream(state_t1, base_uri=base_uri):
repository.delete_triples_about(statement.subject, context=provider)
assert repository.size() == 0
repository.import_from_string(state_t1, context=provider, fmt=infile_fmt)
assert repository.size() == 3
def test_can_handle_multiple_providers(repository):
"""This test tests the case where there are two registered providers. Each
provider should have triples in their respective named graph.
"""
# Setup
repository.clear()
infile_fmt = 'turtle'
base_uri = 'http://example.org/test/'
parser = RDF.TurtleParser()
state_t0 = """
@prefix void: <http://rdfs.org/ns/void#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix test: <http://example.org/test/> .
test:A a test:Thing ;
test:someProperty 'some property' ;
test:anotherProperty 'just another thing' .
"""
state_t1 = """
@prefix void: <http://rdfs.org/ns/void#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix test: <http://example.org/test/> .
test:A a test:Thing ;
test:someProperty 'some property' ;
test:anotherProperty 'just another thing' .
"""
# State t0
provider = 'providerA'
for statement in parser.parse_string_as_stream(state_t0, base_uri=base_uri):
repository.delete_triples_about(statement.subject, context=provider)
repository.import_from_string(state_t0, context=provider, fmt=infile_fmt)
assert repository.size() == 3
# State t1
provider = 'providerB'
for statement in parser.parse_string_as_stream(state_t1, base_uri=base_uri):
repository.delete_triples_about(statement.subject, context=provider)
assert repository.size() == 3
repository.import_from_string(state_t1, context=provider, fmt=infile_fmt)
assert repository.size() == 6
| apache-2.0 |
Duoxilian/home-assistant | homeassistant/helpers/event_decorators.py | 7 | 2746 | """Event Decorators for custom components."""
import functools
import logging
# pylint: disable=unused-import
from typing import Optional # NOQA
from homeassistant.core import HomeAssistant # NOQA
from homeassistant.helpers import event
HASS = None # type: Optional[HomeAssistant]
_LOGGER = logging.getLogger(__name__)
_MSG = 'Event decorators are deprecated. Support will be removed in 0.40.'
def track_state_change(entity_ids, from_state=None, to_state=None):
"""Decorator factory to track state changes for entity id."""
_LOGGER.warning(_MSG)
def track_state_change_decorator(action):
"""Decorator to track state changes."""
event.track_state_change(HASS, entity_ids,
functools.partial(action, HASS),
from_state, to_state)
return action
return track_state_change_decorator
def track_sunrise(offset=None):
"""Decorator factory to track sunrise events."""
_LOGGER.warning(_MSG)
def track_sunrise_decorator(action):
"""Decorator to track sunrise events."""
event.track_sunrise(HASS,
functools.partial(action, HASS),
offset)
return action
return track_sunrise_decorator
def track_sunset(offset=None):
"""Decorator factory to track sunset events."""
_LOGGER.warning(_MSG)
def track_sunset_decorator(action):
"""Decorator to track sunset events."""
event.track_sunset(HASS,
functools.partial(action, HASS),
offset)
return action
return track_sunset_decorator
def track_time_change(year=None, month=None, day=None, hour=None, minute=None,
second=None):
"""Decorator factory to track time changes."""
_LOGGER.warning(_MSG)
def track_time_change_decorator(action):
"""Decorator to track time changes."""
event.track_time_change(HASS,
functools.partial(action, HASS),
year, month, day, hour, minute, second)
return action
return track_time_change_decorator
def track_utc_time_change(year=None, month=None, day=None, hour=None,
minute=None, second=None):
"""Decorator factory to track time changes."""
_LOGGER.warning(_MSG)
def track_utc_time_change_decorator(action):
"""Decorator to track time changes."""
event.track_utc_time_change(HASS,
functools.partial(action, HASS),
year, month, day, hour, minute, second)
return action
return track_utc_time_change_decorator
| mit |
imply/chuu | build/android/pylib/constants.py | 23 | 3998 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines a set of constants shared by test runners and other scripts."""
import os
import subprocess
import sys
DIR_SOURCE_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir))
ISOLATE_DEPS_DIR = os.path.join(DIR_SOURCE_ROOT, 'isolate_deps_dir')
EMULATOR_SDK_ROOT = os.path.abspath(os.path.join(DIR_SOURCE_ROOT, os.pardir,
os.pardir))
CHROME_PACKAGE = 'com.google.android.apps.chrome'
CHROME_ACTIVITY = 'com.google.android.apps.chrome.Main'
CHROME_DEVTOOLS_SOCKET = 'chrome_devtools_remote'
CHROME_TESTS_PACKAGE = 'com.google.android.apps.chrome.tests'
LEGACY_BROWSER_PACKAGE = 'com.google.android.browser'
LEGACY_BROWSER_ACTIVITY = 'com.android.browser.BrowserActivity'
CONTENT_SHELL_PACKAGE = 'org.chromium.content_shell_apk'
CONTENT_SHELL_ACTIVITY = 'org.chromium.content_shell_apk.ContentShellActivity'
CHROME_SHELL_PACKAGE = 'org.chromium.chrome.browser.test'
CHROMIUM_TEST_SHELL_PACKAGE = 'org.chromium.chrome.testshell'
CHROMIUM_TEST_SHELL_ACTIVITY = (
'org.chromium.chrome.testshell.ChromiumTestShellActivity')
CHROMIUM_TEST_SHELL_DEVTOOLS_SOCKET = 'chromium_testshell_devtools_remote'
CHROMIUM_TEST_SHELL_HOST_DRIVEN_DIR = os.path.join(
DIR_SOURCE_ROOT, 'chrome', 'android')
GTEST_TEST_PACKAGE_NAME = 'org.chromium.native_test'
GTEST_TEST_ACTIVITY_NAME = 'org.chromium.native_test.ChromeNativeTestActivity'
GTEST_COMMAND_LINE_FILE = 'chrome-native-tests-command-line'
BROWSERTEST_TEST_PACKAGE_NAME = 'org.chromium.content_browsertests_apk'
BROWSERTEST_TEST_ACTIVITY_NAME = (
'org.chromium.content_browsertests_apk.ContentBrowserTestsActivity')
BROWSERTEST_COMMAND_LINE_FILE = 'content-browser-tests-command-line'
# Ports arrangement for various test servers used in Chrome for Android.
# Lighttpd server will attempt to use 9000 as default port, if unavailable it
# will find a free port from 8001 - 8999.
LIGHTTPD_DEFAULT_PORT = 9000
LIGHTTPD_RANDOM_PORT_FIRST = 8001
LIGHTTPD_RANDOM_PORT_LAST = 8999
TEST_SYNC_SERVER_PORT = 9031
# The net test server is started from port 10201.
# TODO(pliard): http://crbug.com/239014. Remove this dirty workaround once
# http://crbug.com/239014 is fixed properly.
TEST_SERVER_PORT_FIRST = 10201
TEST_SERVER_PORT_LAST = 30000
# A file to record next valid port of test server.
TEST_SERVER_PORT_FILE = '/tmp/test_server_port'
TEST_SERVER_PORT_LOCKFILE = '/tmp/test_server_port.lock'
TEST_EXECUTABLE_DIR = '/data/local/tmp'
# Directories for common java libraries for SDK build.
# These constants are defined in build/android/ant/common.xml
SDK_BUILD_JAVALIB_DIR = 'lib.java'
SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
SDK_BUILD_APKS_DIR = 'apks'
# The directory on the device where perf test output gets saved to.
DEVICE_PERF_OUTPUT_DIR = '/data/data/' + CHROME_PACKAGE + '/files'
SCREENSHOTS_DIR = os.path.join(DIR_SOURCE_ROOT, 'out_screenshots')
ANDROID_SDK_VERSION = 18
ANDROID_SDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party/android_tools/sdk')
ANDROID_NDK_ROOT = os.path.join(DIR_SOURCE_ROOT,
'third_party/android_tools/ndk')
UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
def _GetADBPath():
if os.environ.get('ANDROID_SDK_ROOT'):
return 'adb'
# If envsetup.sh hasn't been sourced and there's no adb in the path,
# set it here.
try:
with file(os.devnull, 'w') as devnull:
subprocess.call(['adb', 'version'], stdout=devnull, stderr=devnull)
return 'adb'
except OSError:
print >> sys.stderr, 'No adb found in $PATH, fallback to checked in binary.'
return os.path.join(ANDROID_SDK_ROOT, 'platform-tools', 'adb')
ADB_PATH = _GetADBPath()
# Exit codes
ERROR_EXIT_CODE = 1
WARNING_EXIT_CODE = 88
| bsd-3-clause |
Laurawly/tvm-1 | python/tvm/te/hybrid/preprocessor.py | 4 | 4750 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Determines the declaration, r/w status, and last use of each variable"""
import ast
import sys
from .runtime import HYBRID_GLOBALS
from .utils import _internal_assert
class PyVariableUsage(ast.NodeVisitor):
"""The vistor class to determine the declaration, r/w status, and last use of each variable"""
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
def __init__(self, args, symbols, closure_vars):
self.status = {}
self.scope_level = []
self._args = {}
self.args = args
self.aug_assign_ = False
self.symbols = symbols
self.closure_vars = closure_vars
def visit_FunctionDef(self, node):
self.scope_level.append(node)
_internal_assert(
len(node.args.args) == len(self.args),
"#arguments passed should be the same as #arguments defined",
)
for idx, arg in enumerate(node.args.args):
_attr = "id" if sys.version_info[0] < 3 else "arg" # To make py2 and 3 compatible
self._args[getattr(arg, _attr)] = self.args[idx]
for i in node.body:
self.visit(i)
def visit_For(self, node):
_internal_assert(isinstance(node.target, ast.Name), "For's iterator should be an id")
self.visit(node.iter)
self.scope_level.append(node)
for i in node.body:
self.visit(i)
self.scope_level.pop()
def visit_Call(self, node):
# No function pointer supported so far
_internal_assert(isinstance(node.func, ast.Name), "Function call should be an id")
func_id = node.func.id
_internal_assert(
func_id
in list(HYBRID_GLOBALS.keys())
+ ["range", "max", "min", "len"]
+ list(self.symbols.keys()),
"Function call id " + func_id + " not in intrinsics' list",
)
for elem in node.args:
self.visit(elem)
def visit_AugAssign(self, node):
self.aug_assign_ = True
self.generic_visit(node)
self.aug_assign_ = False
def visit_Name(self, node):
# If it is True or False, we do not worry about it!
if sys.version_info[0] == 2 and node.id in ["True", "False"]:
return
# If it is from the argument list or loop variable, we do not worry about it!
if node.id in self._args.keys():
return
fors = [loop.target.id for loop in self.scope_level if isinstance(loop, ast.For)]
if node.id in fors:
return
# The loop variable cannot be overwritten when iteration
_internal_assert(
not isinstance(node.ctx, ast.Store) or node.id not in fors,
"Iter var cannot be overwritten",
)
if node.id not in self.status.keys():
# It is a captured value in closure
if node.id in self.closure_vars:
try:
ast.literal_eval(str(self.closure_vars[node.id]))
except ValueError:
raise ValueError("Only support capturing constant values in closure")
return
_internal_assert(isinstance(node.ctx, ast.Store), "Undeclared variable %s" % node.id)
if self.aug_assign_:
raise ValueError('"First store" cannot be an AugAssign')
self.status[node.id] = (node, self.scope_level[-1], set())
else:
decl, loop, usage = self.status[node.id]
usage.add(type(node.ctx))
_internal_assert(
loop in self.scope_level, "%s is used out of the scope it is defined!" % node.id
)
self.status[node.id] = (decl, loop, usage)
def determine_variable_usage(root, args, symbols, closure_vars):
"""The helper function for calling the dedicated visitor."""
visitor = PyVariableUsage(args, symbols, closure_vars)
visitor.visit(root)
return visitor.status
| apache-2.0 |
Klafyvel/ISN_2015 | nom molecule.py | 1 | 5422 | # Nomenclature
# Copyright (C) 2015 BOUVIN Valentin, HONNORATY Vincent, LEVY-FALK Hugo
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Créé par honnoraty, le 26/01/2015 en Python 3.2
from molecule import *
sourcechaine = dict()
sourcechaine["methan"] = 1,4
sourcechaine["ethan"] = 2,6
sourcechaine["propan"] = 3,8
sourcechaine["butan"] = 4,10
sourcechaine["pentan"] = 5,12
sourcechaine["hexan"] = 6,14
sourcechaine["heptan"] = 7,16
sourcechaine["octan"] = 8,18
sourcechaine["nonan"] = 9,20
sourcechaine["decan"] = 10,22
sourcechaine["undecan"] = 11,24
sourcechaine["dodecan"] = 12,26
sourcechaine["tridecan"] = 13,28
sourcechaine["methyl"] = 1,3
sourcechaine["ethyl"] = 2,5
sourcechaine["dimethyl"] = 2,5
sourcechaine["diethyl"] = 4,9
sourcechaine["trimethyl"] = 3,7
sourcechaine["triethyl"] = 6,12
chainegenerique = ["methan","ethan","propan","butan","hexan","octan","nonan","decan","undecan","dodecan","tridecan"]
branche = ["methyl","ethyl","dimethyl","diethyl","trimethyl","triethyl"]
nomentre = input("- entre les parties du nom")
nomentre = nomentre.split("-")
print(nomentre)
nbchainegen = 0
listbranche = [""]
ChAtome = [""]
positionbranche = [""]
nomMole = ""
for n, elt in enumerate(chainegenerique):
for i, elt in enumerate(nomentre):
if nomentre[i] == chainegenerique[n]:
nbchainegen = n
nbbranche = 0
n = 0
i = 0
lasti = 0
z = 0
y = 0
positionasign = 1
position = 0
hydroSurC = 0
decahydro = 0
decacarbo = 0
decachaine = 0
for n, elt in enumerate(branche):
for i, elt in enumerate(nomentre):
if nomentre[i] == branche[n]:
listbranche[nbbranche] = branche[n]
nbbranche += 1
listbranche.append(1)
while nomentre[i-positionasign].isdecimal():
positionbranche[position] = int(nomentre[i - positionasign])
positionbranche.append(1)
positionasign += 1
position += 1
positionasign = 0
(carb,hydro) = sourcechaine[chainegenerique[nbchainegen]]
for n in range(nbbranche):
carb, hydro = carb + sourcechaine[listbranche[n]][0], hydro + sourcechaine[listbranche[n]][1] - nbbranche
nomMole = "C" + str(carb) + "H" + str(hydro)
print(nomMole)
for n in range(position):
print(positionbranche[n])
for n in range(carb): #Génération des liste d'atome
ChAtome.append(1)
ChAtome[n] = CARBONE()
for n in range(hydro):
ChAtome.append(1)
ChAtome[n + carb] = HYDROGENE()
carbChaineg = int(sourcechaine[chainegenerique[nbchainegen]][0])
for n in range(carbChaineg - 1): #Génération de la chaine principale
ChAtome[n].link(ChAtome[n + 1])
#decacarbo = carbChaineg
print("décalage:" ,carbChaineg)
lasti = 0
for n in range(nbbranche): #Ajout des branches
ChAtome[positionbranche[n] - 1].link(ChAtome[carbChaineg + n])
for i in range(sourcechaine[listbranche[n]][1] + sourcechaine[listbranche[n]][0] - 1):
print("Posi hydro: ",carbChaineg + decachaine + decacarbo)
print("chaine",*ChAtome)
decacarbo += 1
ChAtome[carbChaineg + n + decachaine].link(ChAtome[carbChaineg + decachaine + decacarbo])
print(sourcechaine[listbranche[n]][1] + sourcechaine[listbranche[n]][0])
if ((lasti + 2 == i) and (decachaine == 0 ) or (lasti + 3 == i)):
decachaine += 1
lasti = i
if(i == 2):
decacarbo -= 1
if(i == 5 and (listbranche[n] == "trimethyl")):
decacarbo -= 1
#2-methyl-butan-e
#2-ethyl-butan-e
#2-trimethyl-butan
hydroChaineg = int(sourcechaine[chainegenerique[nbchainegen]][1])
print("yolo")
print(hydroChaineg)
print(len(ChAtome))
print(carbChaineg)
for n in range(carbChaineg):
hydroSurC = 4
for z in range(position):
try:
if(n == positionbranche[n]):
hydroSurC -= 1
except IndexError:
pass
if(((n == 0) or (n == carbChaineg - 1)) and (chainegenerique[nbchainegen] != "methan")):
hydroSurC -= 1
elif (chainegenerique[nbchainegen] != "methan"):
hydroSurC -= 2
print("Hydro sur Carb")
print(hydroSurC)
print(*ChAtome)
print("valeur de:",n)
for y in range(hydroSurC):
print("crab",n)
print(decacarbo)
print("hydro",decahydro + carbChaineg + decacarbo + decachaine)
ChAtome[n].link(ChAtome[n + decahydro + carbChaineg + decacarbo + decachaine])
print("liée")
print(*ChAtome)
decahydro += 1
decahydro -= 1
#molecule=Molecule(*ChAtome)
print(*ChAtome)
#print(molecule.add_atome)
#print(molecule)
#2-methyl-butan-e
| gpl-3.0 |
seann1/portfolio5 | .meteor/dev_bundle/python/Lib/lib-tk/Dialog.py | 187 | 1567 | # dialog.py -- Tkinter interface to the tk_dialog script.
from Tkinter import *
from Tkinter import _cnfmerge
if TkVersion <= 3.6:
DIALOG_ICON = 'warning'
else:
DIALOG_ICON = 'questhead'
class Dialog(Widget):
def __init__(self, master=None, cnf={}, **kw):
cnf = _cnfmerge((cnf, kw))
self.widgetName = '__dialog__'
Widget._setup(self, master, cnf)
self.num = self.tk.getint(
self.tk.call(
'tk_dialog', self._w,
cnf['title'], cnf['text'],
cnf['bitmap'], cnf['default'],
*cnf['strings']))
try: Widget.destroy(self)
except TclError: pass
def destroy(self): pass
def _test():
d = Dialog(None, {'title': 'File Modified',
'text':
'File "Python.h" has been modified'
' since the last time it was saved.'
' Do you want to save it before'
' exiting the application.',
'bitmap': DIALOG_ICON,
'default': 0,
'strings': ('Save File',
'Discard Changes',
'Return to Editor')})
print d.num
if __name__ == '__main__':
t = Button(None, {'text': 'Test',
'command': _test,
Pack: {}})
q = Button(None, {'text': 'Quit',
'command': t.quit,
Pack: {}})
t.mainloop()
| gpl-2.0 |
prathik/thrift | lib/py/setup.py | 46 | 3406 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
try:
from setuptools import setup, Extension
except:
from distutils.core import setup, Extension, Command
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
# Fix to build sdist under vagrant
import os
if 'vagrant' in str(os.environ):
del os.link
include_dirs = []
if sys.platform == 'win32':
include_dirs.append('compat/win32')
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError)
else:
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as x:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors as x:
raise BuildFailed()
def run_setup(with_binary):
if with_binary:
extensions = dict(
ext_modules = [
Extension('thrift.protocol.fastbinary',
sources = ['src/protocol/fastbinary.c'],
include_dirs = include_dirs,
)
],
cmdclass=dict(build_ext=ve_build_ext)
)
else:
extensions = dict()
setup(name = 'thrift',
version = '1.0.0-dev',
description = 'Python bindings for the Apache Thrift RPC system',
author = 'Thrift Developers',
author_email = '[email protected]',
url = 'http://thrift.apache.org',
license = 'Apache License 2.0',
packages = [
'thrift',
'thrift.protocol',
'thrift.transport',
'thrift.server',
],
package_dir = {'thrift' : 'src'},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking'
],
use_2to3 = True,
**extensions
)
try:
run_setup(True)
except BuildFailed:
print()
print('*' * 80)
print("An error occurred while trying to compile with the C extension enabled")
print("Attempting to build without the extension now")
print('*' * 80)
print()
run_setup(False)
| apache-2.0 |
jonashagstedt/swampdragon | chat_example/chat_example/settings.py | 13 | 2576 | import os
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '))%*y#of%4cnju5=$-1sab!ks(lq=60^rc3oyt-!69c19wl&r_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'chat_example.chat',
'swampdragon',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'chat_example.urls'
WSGI_APPLICATION = 'chat_example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dev.sqlite3',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = [
os.path.join(BASE_DIR, 'templates')
]
SWAMP_DRAGON_CONNECTION = ('chat_example.sockserver.DataConnection', '/data')
DRAGON_URL = 'http://localhost:9999/'
| bsd-3-clause |
Shao-Feng/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_object-src_cross-origin_multi_blocked_int-manual.py | 30 | 2479 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "object-src " + url2 + " https://tizen.org"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_object-src_cross-origin_multi_blocked_int</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#object-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is <strong>no red</strong>.</p>
<object data="support/red-100x100.png"/>
</body>
</html> """
| bsd-3-clause |
erudit/eruditorg | eruditorg/erudit/migrations/0089_thesisprovider.py | 1 | 1911 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-24 15:15
from __future__ import unicode_literals
from django.db import migrations, models
def populate_thesis_providers(apps, schema_editor):
ThesisProvider = apps.get_model("erudit", "ThesisProvider")
Collection = apps.get_model("erudit", "Collection")
Thesis = apps.get_model("erudit", "Thesis")
collection_ids = Thesis.objects.values_list("collection_id", flat=True)
collections = Collection.objects.filter(id__in=collection_ids)
for collection in collections.all():
tp = ThesisProvider.objects.create(
code=collection.code,
name=collection.name,
solr_name=collection.name,
logo=collection.logo,
)
class Migration(migrations.Migration):
dependencies = [
("erudit", "0088_remove_article_copyrights"),
]
operations = [
migrations.CreateModel(
name="ThesisProvider",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("code", models.CharField(max_length=10, unique=True, verbose_name="Code")),
("name", models.CharField(max_length=200, verbose_name="Nom")),
(
"solr_name",
models.CharField(db_index=True, max_length=200, verbose_name="Nom dans Solr"),
),
("logo", models.ImageField(blank=True, verbose_name="Logo")),
],
options={
"verbose_name_plural": "Éditeurs de thèses",
"verbose_name": "Éditeur de thèses",
},
),
migrations.RunPython(populate_thesis_providers, reverse_code=migrations.RunPython.noop),
]
| gpl-3.0 |
117111302/PyGithub | github/tests/GitBlob.py | 39 | 2801 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
class GitBlob(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.blob = self.g.get_user().get_repo("PyGithub").get_git_blob("53bce9fa919b4544e67275089b3ec5b44be20667")
def testAttributes(self):
self.assertTrue(self.blob.content.startswith("IyEvdXNyL2Jpbi9lbnYgcHl0aG9uCgpmcm9tIGRpc3R1dGlscy5jb3JlIGlt\ncG9ydCBzZXR1cAppbXBvcnQgdGV4dHdyYXAKCnNldHVwKAogICAgbmFtZSA9\n"))
self.assertTrue(self.blob.content.endswith("Z3JhbW1pbmcgTGFuZ3VhZ2UgOjogUHl0aG9uIiwKICAgICAgICAiVG9waWMg\nOjogU29mdHdhcmUgRGV2ZWxvcG1lbnQiLAogICAgXSwKKQo=\n"))
self.assertEqual(len(self.blob.content), 1757)
self.assertEqual(self.blob.encoding, "base64")
self.assertEqual(self.blob.size, 1295)
self.assertEqual(self.blob.sha, "53bce9fa919b4544e67275089b3ec5b44be20667")
self.assertEqual(self.blob.url, "https://api.github.com/repos/jacquev6/PyGithub/git/blobs/53bce9fa919b4544e67275089b3ec5b44be20667")
| gpl-3.0 |
tbeadle/django | tests/forms_tests/widget_tests/test_checkboxselectmultiple.py | 13 | 4796 | from django.forms import CheckboxSelectMultiple
from .base import WidgetTest
class CheckboxSelectMultipleTest(WidgetTest):
widget = CheckboxSelectMultiple
def test_render_value(self):
self.check_html(self.widget(choices=self.beatles), 'beatles', ['J'], html=(
"""<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>"""
))
def test_render_value_multiple(self):
self.check_html(self.widget(choices=self.beatles), 'beatles', ['J', 'P'], html=(
"""<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>"""
))
def test_render_none(self):
"""
If the value is None, none of the options are selected.
"""
self.check_html(self.widget(choices=self.beatles), 'beatles', None, html=(
"""<ul>
<li><label><input type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>"""
))
def test_nested_choices(self):
nested_choices = (
('unknown', 'Unknown'),
('Audio', (('vinyl', 'Vinyl'), ('cd', 'CD'))),
('Video', (('vhs', 'VHS'), ('dvd', 'DVD'))),
)
html = """
<ul id="media">
<li>
<label for="media_0"><input id="media_0" name="nestchoice" type="checkbox" value="unknown" /> Unknown</label>
</li>
<li>Audio<ul id="media_1">
<li>
<label for="media_1_0">
<input checked="checked" id="media_1_0" name="nestchoice" type="checkbox" value="vinyl" /> Vinyl
</label>
</li>
<li>
<label for="media_1_1"><input id="media_1_1" name="nestchoice" type="checkbox" value="cd" /> CD</label>
</li>
</ul></li>
<li>Video<ul id="media_2">
<li>
<label for="media_2_0"><input id="media_2_0" name="nestchoice" type="checkbox" value="vhs" /> VHS</label>
</li>
<li>
<label for="media_2_1">
<input checked="checked" id="media_2_1" name="nestchoice" type="checkbox" value="dvd" /> DVD
</label>
</li>
</ul></li>
</ul>
"""
self.check_html(
self.widget(choices=nested_choices), 'nestchoice', ('vinyl', 'dvd'),
attrs={'id': 'media'}, html=html,
)
def test_separate_ids(self):
"""
Each input gets a separate ID.
"""
choices = [('a', 'A'), ('b', 'B'), ('c', 'C')]
html = """
<ul id="abc">
<li>
<label for="abc_0"><input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" /> A</label>
</li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1" /> B</label></li>
<li>
<label for="abc_2"><input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" /> C</label>
</li>
</ul>
"""
self.check_html(self.widget(choices=choices), 'letters', ['a', 'c'], attrs={'id': 'abc'}, html=html)
def test_separate_ids_constructor(self):
"""
Each input gets a separate ID when the ID is passed to the constructor.
"""
widget = CheckboxSelectMultiple(attrs={'id': 'abc'}, choices=[('a', 'A'), ('b', 'B'), ('c', 'C')])
html = """
<ul id="abc">
<li>
<label for="abc_0"><input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" /> A</label>
</li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1" /> B</label></li>
<li>
<label for="abc_2"><input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" /> C</label>
</li>
</ul>
"""
self.check_html(widget, 'letters', ['a', 'c'], html=html)
| bsd-3-clause |
itsjeyd/edx-platform | openedx/core/djangoapps/api_admin/decorators.py | 27 | 1127 | """Decorators for API access management."""
from functools import wraps
from django.core.urlresolvers import reverse
from django.http import HttpResponseNotFound
from django.shortcuts import redirect
from openedx.core.djangoapps.api_admin.models import ApiAccessRequest, ApiAccessConfig
def api_access_enabled_or_404(view_func):
"""If API access management feature is not enabled, return a 404."""
@wraps(view_func)
def wrapped_view(view_obj, *args, **kwargs):
"""Wrapper for the view function."""
if ApiAccessConfig.current().enabled:
return view_func(view_obj, *args, **kwargs)
return HttpResponseNotFound()
return wrapped_view
def require_api_access(view_func):
"""If the requesting user does not have API access, bounce them to the request form."""
@wraps(view_func)
def wrapped_view(view_obj, *args, **kwargs):
"""Wrapper for the view function."""
if ApiAccessRequest.has_api_access(args[0].user):
return view_func(view_obj, *args, **kwargs)
return redirect(reverse('api_admin:api-request'))
return wrapped_view
| agpl-3.0 |
eduNEXT/edunext-platform | openedx/core/lib/gating/tests/test_api.py | 3 | 17438 | """
Tests for the gating API
"""
import unittest
import six
from completion.models import BlockCompletion
from ddt import data, ddt, unpack
from django.conf import settings
from milestones import api as milestones_api
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import Mock, patch
from lms.djangoapps.gating import api as lms_gating_api
from lms.djangoapps.grades.constants import GradeOverrideFeatureEnum
from lms.djangoapps.grades.models import PersistentSubsectionGrade, PersistentSubsectionGradeOverride
from lms.djangoapps.grades.tests.base import GradeTestBase
from lms.djangoapps.grades.tests.utils import mock_get_score
from openedx.core.lib.gating import api as gating_api
from openedx.core.lib.gating.exceptions import GatingValidationError
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@ddt
class TestGatingApi(ModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Tests for the gating API
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""
Initial data setup
"""
super(TestGatingApi, self).setUp()
# create course
self.course = CourseFactory.create(
org='edX',
number='EDX101',
run='EDX101_RUN1',
display_name='edX 101'
)
self.course.enable_subsection_gating = True
self.course.save()
# create chapter
self.chapter1 = ItemFactory.create(
parent_location=self.course.location,
category='chapter',
display_name='untitled chapter 1'
)
# create sequentials
self.seq1 = ItemFactory.create(
parent_location=self.chapter1.location,
category='sequential',
display_name='untitled sequential 1'
)
self.seq2 = ItemFactory.create(
parent_location=self.chapter1.location,
category='sequential',
display_name='untitled sequential 2'
)
# create vertical
self.vertical = ItemFactory.create(
parent_location=self.seq1.location,
category='vertical',
display_name='untitled vertical 1'
)
self.generic_milestone = {
'name': 'Test generic milestone',
'namespace': six.text_type(self.seq1.location),
}
@patch('openedx.core.lib.gating.api.log.warning')
def test_get_prerequisite_milestone_returns_none(self, mock_log):
""" Test test_get_prerequisite_milestone_returns_none """
prereq = gating_api._get_prerequisite_milestone(self.seq1.location) # pylint: disable=protected-access
self.assertIsNone(prereq)
self.assertTrue(mock_log.called)
def test_get_prerequisite_milestone_returns_milestone(self):
""" Test test_get_prerequisite_milestone_returns_milestone """
gating_api.add_prerequisite(self.course.id, self.seq1.location)
prereq = gating_api._get_prerequisite_milestone(self.seq1.location) # pylint: disable=protected-access
self.assertIsNotNone(prereq)
@data('', '0', '50', '100')
def test_validate_min_score_is_valid(self, min_score):
""" Test test_validate_min_score_is_valid """
self.assertIsNone(gating_api._validate_min_score(min_score)) # pylint: disable=protected-access
@data('abc', '-10', '110')
def test_validate_min_score_raises(self, min_score):
""" Test test_validate_min_score_non_integer """
with self.assertRaises(GatingValidationError):
gating_api._validate_min_score(min_score) # pylint: disable=protected-access
def test_find_gating_milestones(self):
""" Test test_find_gating_milestones """
gating_api.add_prerequisite(self.course.id, self.seq1.location)
gating_api.set_required_content(self.course.id, self.seq2.location, self.seq1.location, 100)
milestone = milestones_api.add_milestone(self.generic_milestone)
milestones_api.add_course_content_milestone(self.course.id, self.seq1.location, 'fulfills', milestone)
self.assertEqual(len(gating_api.find_gating_milestones(self.course.id, self.seq1.location, 'fulfills')), 1)
self.assertEqual(len(gating_api.find_gating_milestones(self.course.id, self.seq1.location, 'requires')), 0)
self.assertEqual(len(gating_api.find_gating_milestones(self.course.id, self.seq2.location, 'fulfills')), 0)
self.assertEqual(len(gating_api.find_gating_milestones(self.course.id, self.seq2.location, 'requires')), 1)
def test_get_gating_milestone_not_none(self):
""" Test test_get_gating_milestone_not_none """
gating_api.add_prerequisite(self.course.id, self.seq1.location)
gating_api.set_required_content(self.course.id, self.seq2.location, self.seq1.location, 100)
self.assertIsNotNone(gating_api.get_gating_milestone(self.course.id, self.seq1.location, 'fulfills'))
self.assertIsNotNone(gating_api.get_gating_milestone(self.course.id, self.seq2.location, 'requires'))
def test_get_gating_milestone_is_none(self):
""" Test test_get_gating_milestone_is_none """
gating_api.add_prerequisite(self.course.id, self.seq1.location)
gating_api.set_required_content(self.course.id, self.seq2.location, self.seq1.location, 100)
self.assertIsNone(gating_api.get_gating_milestone(self.course.id, self.seq1.location, 'requires'))
self.assertIsNone(gating_api.get_gating_milestone(self.course.id, self.seq2.location, 'fulfills'))
def test_prerequisites(self):
""" Test test_prerequisites """
gating_api.add_prerequisite(self.course.id, self.seq1.location)
prereqs = gating_api.get_prerequisites(self.course.id)
self.assertEqual(len(prereqs), 1)
self.assertEqual(prereqs[0]['block_display_name'], self.seq1.display_name)
self.assertEqual(prereqs[0]['block_usage_key'], six.text_type(self.seq1.location))
self.assertTrue(gating_api.is_prerequisite(self.course.id, self.seq1.location))
gating_api.remove_prerequisite(self.seq1.location)
self.assertEqual(len(gating_api.get_prerequisites(self.course.id)), 0)
self.assertFalse(gating_api.is_prerequisite(self.course.id, self.seq1.location))
def test_required_content(self):
""" Test test_required_content """
gating_api.add_prerequisite(self.course.id, self.seq1.location)
gating_api.set_required_content(self.course.id, self.seq2.location, self.seq1.location, 100, 100)
prereq_content_key, min_score, min_completion = gating_api.get_required_content(
self.course.id, self.seq2.location
)
self.assertEqual(prereq_content_key, six.text_type(self.seq1.location))
self.assertEqual(min_score, 100)
self.assertEqual(min_completion, 100)
gating_api.set_required_content(self.course.id, self.seq2.location, None, None, None)
prereq_content_key, min_score, min_completion = gating_api.get_required_content(
self.course.id, self.seq2.location
)
self.assertIsNone(prereq_content_key)
self.assertIsNone(min_score)
self.assertIsNone(min_completion)
def test_get_gated_content(self):
"""
Verify staff bypasses gated content and student gets list of unfulfilled prerequisites.
"""
staff = UserFactory(is_staff=True)
student = UserFactory(is_staff=False)
self.assertEqual(gating_api.get_gated_content(self.course, staff), [])
self.assertEqual(gating_api.get_gated_content(self.course, student), [])
gating_api.add_prerequisite(self.course.id, self.seq1.location)
gating_api.set_required_content(self.course.id, self.seq2.location, self.seq1.location, 100)
milestone = milestones_api.get_course_content_milestones(self.course.id, self.seq2.location, 'requires')[0]
self.assertEqual(gating_api.get_gated_content(self.course, staff), [])
self.assertEqual(gating_api.get_gated_content(self.course, student), [six.text_type(self.seq2.location)])
milestones_api.add_user_milestone({'id': student.id}, milestone)
self.assertEqual(gating_api.get_gated_content(self.course, student), [])
@data(
(100, 0, 50, 0, False),
(100, 0, 100, 0, True),
(0, 100, 0, 50, False),
(0, 100, 0, 100, True),
(100, 100, 50, 100, False),
(100, 100, 100, 50, False),
(100, 100, 100, 100, True),
)
@unpack
def test_is_gate_fulfilled(self, min_score, min_completion, learner_score, learner_completion, is_gate_fulfilled):
"""
Test if prereq section has any unfulfilled milestones
"""
student = UserFactory(is_staff=False)
gating_api.add_prerequisite(self.course.id, self.seq1.location)
gating_api.set_required_content(
self.course.id, self.seq2.location, self.seq1.location, min_score, min_completion
)
milestone = milestones_api.add_milestone(self.generic_milestone)
milestones_api.add_course_content_milestone(self.course.id, self.seq1.location, 'fulfills', milestone)
self.assertFalse(gating_api.is_gate_fulfilled(self.course.id, self.seq1.location, student.id))
# complete the prerequisite to unlock the gated content
# this call triggers reevaluation of prerequisites fulfilled by the gating block.
with patch.object(gating_api, 'get_subsection_completion_percentage') as mock_grade:
mock_grade.return_value = learner_completion
lms_gating_api.evaluate_prerequisite(
self.course,
Mock(location=self.seq1.location, percent_graded=learner_score / 100.0),
student,
)
self.assertEqual(
gating_api.is_gate_fulfilled(self.course.id, self.seq1.location, student.id), is_gate_fulfilled
)
@data(
(1, 1, 100),
(0, 0, 0),
(1, 0, 100),
(0, 1, 0),
)
@unpack
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_get_subsection_completion_percentage(self, user_problem_completion, user_html_completion,
expected_completion_percentage):
"""
Test if gating_api.get_subsection_completion_percentage returns expected completion percentage
Note:
html blocks are ignored in computation of completion_percentage,so it should not affect result.
"""
student = UserFactory(is_staff=False)
problem_block = ItemFactory.create(
parent_location=self.vertical.location,
category='problem',
display_name='some problem'
)
html_block = ItemFactory.create(
parent_location=self.vertical.location,
category='html',
display_name='some html block'
)
with patch.object(BlockCompletion, 'get_learning_context_completions') as course_block_completions_mock:
course_block_completions_mock.return_value = {
problem_block.location: user_problem_completion,
html_block.location: user_html_completion,
}
completion_percentage = gating_api.get_subsection_completion_percentage(self.seq1.location, student)
self.assertEqual(completion_percentage, expected_completion_percentage)
@data(
('discussion', None, 100),
('html', None, 100),
('html', 1, 100),
('problem', 1, 100),
('problem', 0, 0),
('openassessment', 1, 100),
('openassessment', 0, 0),
)
@unpack
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_get_subsection_completion_percentage_single_component(
self,
component_type,
completed,
expected_completion_percentage
):
"""
Test if gating_api.get_subsection_completion_percentage returns expected completion percentage
when only a single component in a vertical/unit
Note:
html blocks and discussion blocks are ignored in calculations so should always return
100% complete
"""
student = UserFactory(is_staff=False)
component = ItemFactory.create(
parent_location=self.vertical.location,
category=component_type,
display_name=u'{} block'.format(component_type)
)
with patch.object(BlockCompletion, 'get_learning_context_completions') as course_block_completions_mock:
course_block_completions_mock.return_value = {
component.location: completed,
}
completion_percentage = gating_api.get_subsection_completion_percentage(self.seq1.location, student)
self.assertEqual(completion_percentage, expected_completion_percentage)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_compute_is_prereq_met(self):
"""
Test if prereq has been met and force recompute
"""
student = UserFactory(is_staff=False)
gating_api.add_prerequisite(self.course.id, self.seq1.location)
gating_api.set_required_content(self.course.id, self.seq2.location, self.seq1.location, 100, 0)
# complete the prerequisite to unlock the gated content
# this call triggers reevaluation of prerequisites fulfilled by the gating block.
with patch.object(gating_api, 'get_subsection_grade_percentage') as mock_grade:
mock_grade.return_value = 75
# don't force recompute
prereq_met, prereq_meta_info = gating_api.compute_is_prereq_met(self.seq2.location, student.id, False)
self.assertFalse(prereq_met)
self.assertIsNone(prereq_meta_info['url'])
self.assertIsNone(prereq_meta_info['display_name'])
# force recompute
prereq_met, prereq_meta_info = gating_api.compute_is_prereq_met(self.seq2.location, student.id, True)
self.assertFalse(prereq_met)
self.assertIsNotNone(prereq_meta_info['url'])
self.assertIsNotNone(prereq_meta_info['display_name'])
# change to passing grade
mock_grade.return_value = 100
# don't force recompute
prereq_met, prereq_meta_info = gating_api.compute_is_prereq_met(self.seq2.location, student.id, False)
self.assertFalse(prereq_met)
self.assertIsNone(prereq_meta_info['url'])
self.assertIsNone(prereq_meta_info['display_name'])
# force recompute
prereq_met, prereq_meta_info = gating_api.compute_is_prereq_met(self.seq2.location, student.id, True)
self.assertTrue(prereq_met)
self.assertIsNotNone(prereq_meta_info['url'])
self.assertIsNotNone(prereq_meta_info['display_name'])
class TestGatingGradesIntegration(GradeTestBase):
"""
Tests the integration between the gating API and our Persistent Grades framework.
"""
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_get_subsection_grade_percentage(self):
user = self.request.user
subsection_key = self.sequence.location
with mock_get_score(3, 3):
# this update() call creates a persistent grade
self.subsection_grade_factory.update(self.sequence)
# it's important that we stay in the mock_get_score() context here,
# since get_subsection_grade_percentage() creates its own SubsectionGradeFactory,
# which will in turn make calls to get_score().
grade_percentage = gating_api.get_subsection_grade_percentage(subsection_key, user)
assert 100.0 == grade_percentage
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_get_subsection_grade_percentage_with_override(self):
user = self.request.user
subsection_key = self.sequence.location
with mock_get_score(3, 3):
# this update() call creates a persistent grade
self.subsection_grade_factory.update(self.sequence)
# there should only be one persistent grade
persistent_grade = PersistentSubsectionGrade.objects.first()
PersistentSubsectionGradeOverride.update_or_create_override(
UserFactory(), # it doesn't matter to us who created the override
persistent_grade,
earned_graded_override=0,
earned_all_override=0,
possible_graded_override=3,
feature=GradeOverrideFeatureEnum.gradebook,
)
# it's important that we stay in the mock_get_score() context here,
# since get_subsection_grade_percentage() creates its own SubsectionGradeFactory,
# which will in turn make calls to get_score().
grade_percentage = gating_api.get_subsection_grade_percentage(subsection_key, user)
assert 0 == grade_percentage
| agpl-3.0 |
autosportlabs/podium-api | podium_api/types/venue.py | 1 | 2122 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class PodiumVenue(object):
"""
Object that represents a Venue.
**Attributes:**
**venue_id** (int): Venue Id
**uri** (string): URI for the Venue.
**name** (string): The Venue's name.
"""
def __init__(self, venue_id, uri, events_uri, updated, created,
name,
centerpoint,
country_code,
configuration,
track_map_array,
start_finish,
finish,
sector_points,
length
):
self.venue_id = venue_id
self.uri = uri
self.events_uri = events_uri
self.updated = updated
self.created = created
self.name = name
self.centerpoint = centerpoint
self.country_code = country_code
self.configuration = configuration
self.track_map_array = track_map_array
self.start_finish = start_finish
self.finish = finish
self.sector_points = sector_points
self.length = length
def get_venue_from_json(json):
"""
Returns a PodiumVenue object from the json dict received from podium api.
Args:
json (dict): Dict of data from REST api
Return:
PodiumVenue: The PodiumVenue object for the data.
"""
return PodiumVenue(json['id'],
json['URI'],
json['events_uri'],
json['updated'],
json['created'],
json.get('name', None),
json.get('centerpoint', None),
json.get('country_code', None),
json.get('configuration', None),
json.get('track_map_array', None),
json.get('start_finish', None),
json.get('finish', None),
json.get('sector_points', None),
json.get('length', None)
)
| mit |
vegetableman/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py | 121 | 5674 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.config.ports import DeprecatedPort
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool import steps
class StepsTest(unittest.TestCase):
def _step_options(self):
options = MockOptions()
options.non_interactive = True
options.port = 'MOCK port'
options.quiet = True
options.test = True
return options
def _run_step(self, step, tool=None, options=None, state=None):
if not tool:
tool = MockTool()
if not options:
options = self._step_options()
if not state:
state = {}
step(tool, options).run(state)
def test_update_step(self):
tool = MockTool()
options = self._step_options()
options.update = True
expected_logs = "Updating working directory\n"
OutputCapture().assert_outputs(self, self._run_step, [steps.Update, tool, options], expected_logs=expected_logs)
def test_prompt_for_bug_or_title_step(self):
tool = MockTool()
tool.user.prompt = lambda message: 50000
self._run_step(steps.PromptForBugOrTitle, tool=tool)
def _post_diff_options(self):
options = self._step_options()
options.git_commit = None
options.description = None
options.comment = None
options.review = True
options.request_commit = False
options.open_bug = True
return options
def _assert_step_output_with_bug(self, step, bug_id, expected_logs, options=None):
state = {'bug_id': bug_id}
OutputCapture().assert_outputs(self, self._run_step, [step, MockTool(), options, state], expected_logs=expected_logs)
def _assert_post_diff_output_for_bug(self, step, bug_id, expected_logs):
self._assert_step_output_with_bug(step, bug_id, expected_logs, self._post_diff_options())
def test_post_diff(self):
expected_logs = "MOCK add_patch_to_bug: bug_id=78, description=Patch, mark_for_review=True, mark_for_commit_queue=False, mark_for_landing=False\nMOCK: user.open_url: http://example.com/78\n"
self._assert_post_diff_output_for_bug(steps.PostDiff, 78, expected_logs)
def test_post_diff_for_commit(self):
expected_logs = "MOCK add_patch_to_bug: bug_id=78, description=Patch for landing, mark_for_review=False, mark_for_commit_queue=False, mark_for_landing=True\n"
self._assert_post_diff_output_for_bug(steps.PostDiffForCommit, 78, expected_logs)
def test_ensure_bug_is_open_and_assigned(self):
expected_logs = "MOCK reopen_bug 50004 with comment 'Reopening to attach new patch.'\n"
self._assert_step_output_with_bug(steps.EnsureBugIsOpenAndAssigned, 50004, expected_logs)
expected_logs = "MOCK reassign_bug: bug_id=50002, assignee=None\n"
self._assert_step_output_with_bug(steps.EnsureBugIsOpenAndAssigned, 50002, expected_logs)
def test_runtests_args(self):
mock_options = self._step_options()
mock_options.non_interactive = False
step = steps.RunTests(MockTool(log_executive=True), mock_options)
tool = MockTool(log_executive=True)
# FIXME: We shouldn't use a real port-object here, but there is too much to mock at the moment.
tool._deprecated_port = DeprecatedPort()
step = steps.RunTests(tool, mock_options)
expected_logs = """Running Python unit tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitpy'], cwd=/mock-checkout
Running Perl unit tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitperl'], cwd=/mock-checkout
Running JavaScriptCore tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/run-javascriptcore-tests'], cwd=/mock-checkout
Running bindings generation tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/run-bindings-tests'], cwd=/mock-checkout
Running run-webkit-tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/run-webkit-tests', '--quiet'], cwd=/mock-checkout
"""
OutputCapture().assert_outputs(self, step.run, [{}], expected_logs=expected_logs)
| bsd-3-clause |
demon-ru/iml-crm | addons/report_webkit/company.py | 431 | 2562 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.osv):
"""Override company to add Header object link a company can have many header and logos"""
_inherit = "res.company"
_columns = {
'header_image' : fields.many2many(
'ir.header_img',
'company_img_rel',
'company_id',
'img_id',
'Available Images',
),
'header_webkit' : fields.many2many(
'ir.header_webkit',
'company_html_rel',
'company_id',
'html_id',
'Available html',
),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jkonecki/autorest | AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/StorageManagementClient/storagemanagementclient/models/custom_domain.py | 2 | 1213 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CustomDomain(Model):
"""
The custom domain assigned to this storage account. This can be set via
Update.
:param name: Gets or sets the custom domain name. Name is the CNAME
source.
:type name: str
:param use_sub_domain: Indicates whether indirect CName validation is
enabled. Default value is false. This should only be set on updates
:type use_sub_domain: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'use_sub_domain': {'key': 'useSubDomain', 'type': 'bool'},
}
def __init__(self, name=None, use_sub_domain=None, **kwargs):
self.name = name
self.use_sub_domain = use_sub_domain
| mit |
pombreda/https-gitorious.org-appstream-software-center | softwarecenter/ui/gtk3/widgets/cellrenderers.py | 4 | 18064 | # Copyright (C) 2011 Canonical
#
# Authors:
# Matthew McGowan
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gtk, Gdk, GObject, Pango
from softwarecenter.utils import utf8
from softwarecenter.ui.gtk3.em import EM
from softwarecenter.ui.gtk3.models.appstore2 import CategoryRowReference
from stars import StarRenderer, StarSize
class CellButtonIDs:
INFO = 0
ACTION = 1
# custom cell renderer to support dynamic grow
class CellRendererAppView(Gtk.CellRendererText):
# x, y offsets for the overlay icon
OVERLAY_XO = OVERLAY_YO = 2
# size of the install overlay icon
OVERLAY_SIZE = 16
# ratings
MAX_STARS = 5
STAR_SIZE = EM
# initialize declared properties (LP: #965937)
application = GObject.Property(
type=GObject.TYPE_PYOBJECT,
nick='document',
blurb='a xapian document containing pkg information',
flags=(GObject.PARAM_READWRITE | GObject.PARAM_CONSTRUCT),
default=None)
isactive = GObject.Property(
type=bool,
nick='isactive',
blurb='is cell active/selected',
flags=(GObject.PARAM_READWRITE | GObject.PARAM_CONSTRUCT),
default=False)
def __init__(self, icons, layout, show_ratings, overlay_icon_name):
Gtk.CellRendererText.__init__(self)
# the icon pixbuf to be displayed in the row
self.icon = None
# geometry-state values
self.pixbuf_width = 0
self.apptitle_width = 0
self.apptitle_height = 0
self.normal_height = 0
self.selected_height = 0
self.show_ratings = show_ratings
# button packing
self.button_spacing = 0
self._buttons = {
Gtk.PackType.START: [],
Gtk.PackType.END: []
}
self._all_buttons = {}
# cache a layout
self._layout = layout
# star painter, paints stars
self._stars = StarRenderer()
self._stars.size = StarSize.SMALL
# icon/overlay jazz
try:
self._installed = icons.load_icon(overlay_icon_name,
self.OVERLAY_SIZE, 0)
except GObject.GError:
# icon not present in theme, probably because running uninstalled
self._installed = icons.load_icon('emblem-system',
self.OVERLAY_SIZE, 0)
def _layout_get_pixel_width(self, layout):
return layout.get_size()[0] / Pango.SCALE
def _layout_get_pixel_height(self, layout):
return layout.get_size()[1] / Pango.SCALE
def _render_category(self,
context, cr, app, cell_area, layout, xpad, ypad, is_rtl):
layout.set_markup('<b>%s</b>' % app.display_name, -1)
# work out max allowable layout width
lw = self._layout_get_pixel_width(layout)
lh = self._layout_get_pixel_height(layout)
if not is_rtl:
x = cell_area.x
else:
x = cell_area.x + cell_area.width - lw
y = cell_area.y + (cell_area.height - lh) / 2
Gtk.render_layout(context, cr, x, y, layout)
def _render_price(self, context, cr, app, layout, cell_area, xpad, ypad,
is_rtl):
layout.set_markup("US$ %s" % self.model.get_price(app), -1)
if is_rtl:
x = cell_area.x + xpad
else:
x = (cell_area.x + cell_area.width - xpad -
self._layout_get_pixel_width(layout))
Gtk.render_layout(context, cr,
x, ypad + cell_area.y, layout)
def _render_icon(self, cr, app, cell_area, xpad, ypad, is_rtl):
# calc offsets so icon is nicely centered
self.icon = self.model.get_icon(app)
self.icon_x_offset = xpad + cell_area.x
self.icon_y_offset = ypad + cell_area.y
xo = (self.pixbuf_width - self.icon.get_width()) / 2
if not is_rtl:
x = cell_area.x + xo + xpad
else:
x = cell_area.x + cell_area.width + xo - self.pixbuf_width - xpad
y = cell_area.y + ypad
# draw appicon pixbuf
Gdk.cairo_set_source_pixbuf(cr, self.icon, x, y)
cr.paint()
# draw overlay if application is installed
if self.model.is_installed(app):
if not is_rtl:
x += (self.pixbuf_width - self.OVERLAY_SIZE + self.OVERLAY_XO)
else:
x -= self.OVERLAY_XO
y += (self.pixbuf_width - self.OVERLAY_SIZE + self.OVERLAY_YO)
Gdk.cairo_set_source_pixbuf(cr, self._installed, x, y)
cr.paint()
def _render_summary(self, context, cr, app,
cell_area, layout, xpad, ypad,
star_width, is_rtl):
layout.set_markup(self.model.get_markup(app), -1)
# work out max allowable layout width
layout.set_width(-1)
lw = self._layout_get_pixel_width(layout)
max_layout_width = (cell_area.width - self.pixbuf_width -
3 * xpad - star_width)
max_layout_width = cell_area.width - self.pixbuf_width - 3 * xpad
stats = self.model.get_review_stats(app)
if self.show_ratings and stats:
max_layout_width -= star_width + 6 * xpad
if (self.props.isactive and
self.model.get_transaction_progress(app) > 0):
action_btn = self.get_button_by_name(CellButtonIDs.ACTION)
max_layout_width -= (xpad + action_btn.width)
if lw >= max_layout_width:
layout.set_width((max_layout_width) * Pango.SCALE)
layout.set_ellipsize(Pango.EllipsizeMode.END)
lw = max_layout_width
apptitle_extents = layout.get_line_readonly(0).get_pixel_extents()[1]
self.apptitle_width = apptitle_extents.width
self.apptitle_height = apptitle_extents.height
if not is_rtl:
x = cell_area.x + 2 * xpad + self.pixbuf_width
else:
x = (cell_area.x + cell_area.width - lw - self.pixbuf_width -
2 * xpad)
y = cell_area.y + ypad
Gtk.render_layout(context, cr, x, y, layout)
def _render_rating(self, context, cr, app,
cell_area, layout, xpad, ypad,
star_width, star_height, is_rtl):
stats = self.model.get_review_stats(app)
if not stats:
return
sr = self._stars
if not is_rtl:
x = (cell_area.x + 3 * xpad + self.pixbuf_width +
self.apptitle_width)
else:
x = (cell_area.x + cell_area.width
- 3 * xpad
- self.pixbuf_width
- self.apptitle_width
- star_width)
y = cell_area.y + ypad + (self.apptitle_height - self.STAR_SIZE) / 2
sr.rating = stats.ratings_average
sr.render_star(context, cr, x, y)
# and nr-reviews in parenthesis to the right of the title
nreviews = stats.ratings_total
s = "(%i)" % nreviews
layout.set_markup("<small>%s</small>" % s, -1)
if not is_rtl:
x += xpad + star_width
else:
x -= xpad + self._layout_get_pixel_width(layout)
context.save()
context.add_class("cellrenderer-avgrating-label")
Gtk.render_layout(context, cr, x, y, layout)
context.restore()
def _render_progress(self, context, cr, progress, cell_area, ypad, is_rtl):
percent = progress * 0.01
# per the spec, the progressbar should be the width of the action
# button
action_btn = self.get_button_by_name(CellButtonIDs.ACTION)
x, _, w, h = action_btn.allocation
# shift the bar to the top edge
y = cell_area.y + ypad
context.save()
context.add_class("trough")
Gtk.render_background(context, cr, x, y, w, h)
Gtk.render_frame(context, cr, x, y, w, h)
context.restore()
bar_size = w * percent
context.save()
context.add_class("progressbar")
if (bar_size > 0):
if is_rtl:
x += (w - bar_size)
Gtk.render_activity(context, cr, x, y, bar_size, h)
context.restore()
def _render_buttons(self, context, cr, cell_area, layout, xpad, ypad,
is_rtl):
# layout buttons and paint
y = cell_area.y + cell_area.height - ypad
spacing = self.button_spacing
if not is_rtl:
start = Gtk.PackType.START
end = Gtk.PackType.END
xs = cell_area.x + 2 * xpad + self.pixbuf_width
xb = cell_area.x + cell_area.width - xpad
else:
start = Gtk.PackType.END
end = Gtk.PackType.START
xs = cell_area.x + xpad
xb = cell_area.x + cell_area.width - 2 * xpad - self.pixbuf_width
for btn in self._buttons[start]:
btn.set_position(xs, y - btn.height)
btn.render(context, cr, layout)
xs += btn.width + spacing
for btn in self._buttons[end]:
xb -= btn.width
btn.set_position(xb, y - btn.height)
btn.render(context, cr, layout)
xb -= spacing
def set_pixbuf_width(self, w):
self.pixbuf_width = w
def set_button_spacing(self, spacing):
self.button_spacing = spacing
def get_button_by_name(self, name):
if name in self._all_buttons:
return self._all_buttons[name]
def get_buttons(self):
btns = ()
for k, v in self._buttons.items():
btns += tuple(v)
return btns
def button_pack(self, btn, pack_type=Gtk.PackType.START):
self._buttons[pack_type].append(btn)
self._all_buttons[btn.name] = btn
def button_pack_start(self, btn):
self.button_pack(btn, Gtk.PackType.START)
def button_pack_end(self, btn):
self.button_pack(btn, Gtk.PackType.END)
def do_set_property(self, pspec, value):
setattr(self, pspec.name, value)
def do_get_property(self, pspec):
return getattr(self, pspec.name)
def do_get_preferred_height_for_width(self, treeview, width):
if not self.get_properties("isactive")[0]:
return self.normal_height, self.normal_height
return self.selected_height, self.selected_height
def do_render(self, cr, widget, bg_area, cell_area, flags):
app = self.props.application
if not app:
return
self.model = widget.appmodel
context = widget.get_style_context()
xpad = self.get_property('xpad')
ypad = self.get_property('ypad')
star_width, star_height = self._stars.get_visible_size(context)
is_rtl = widget.get_direction() == Gtk.TextDirection.RTL
layout = self._layout
# important! ensures correct text rendering, esp. when using hicolor
# theme
#~ if (flags & Gtk.CellRendererState.SELECTED) != 0:
#~ # this follows the behaviour that gtk+ uses for states in
#~ # treeviews
#~ if widget.has_focus():
#~ state = Gtk.StateFlags.SELECTED
#~ else:
#~ state = Gtk.StateFlags.ACTIVE
#~ else:
#~ state = Gtk.StateFlags.NORMAL
context.save()
#~ context.set_state(state)
if isinstance(app, CategoryRowReference):
self._render_category(context, cr, app,
cell_area,
layout,
xpad, ypad,
is_rtl)
return
self._render_icon(cr, app,
cell_area,
xpad, ypad,
is_rtl)
self._render_summary(context, cr, app,
cell_area,
layout,
xpad, ypad,
star_width,
is_rtl)
# only show ratings if we have one
if self.show_ratings:
self._render_rating(context, cr, app,
cell_area,
layout,
xpad, ypad,
star_width,
star_height,
is_rtl)
progress = self.model.get_transaction_progress(app)
if progress > 0:
self._render_progress(context, cr, progress,
cell_area,
ypad,
is_rtl)
elif self.model.is_purchasable(app):
self._render_price(context, cr, app, layout,
cell_area, xpad, ypad, is_rtl)
# below is the stuff that is only done for the active cell
if not self.props.isactive:
return
self._render_buttons(context, cr,
cell_area,
layout,
xpad, ypad,
is_rtl)
context.restore()
class CellButtonRenderer(object):
def __init__(self, widget, name, use_max_variant_width=True):
# use_max_variant_width is currently ignored. assumed to be True
self.name = name
self.markup_variants = {}
self.current_variant = None
self.xpad = 12
self.ypad = 4
self.allocation = [0, 0, 1, 1]
self.state = Gtk.StateFlags.NORMAL
self.has_focus = False
self.visible = True
self.widget = widget
def _layout_reset(self, layout):
layout.set_width(-1)
layout.set_ellipsize(Pango.EllipsizeMode.NONE)
@property
def x(self):
return self.allocation[0]
@property
def y(self):
return self.allocation[1]
@property
def width(self):
return self.allocation[2]
@property
def height(self):
return self.allocation[3]
def configure_geometry(self, layout):
self._layout_reset(layout)
max_size = (0, 0)
for k, variant in self.markup_variants.items():
safe_markup = GObject.markup_escape_text(utf8(variant))
layout.set_markup(safe_markup, -1)
size = layout.get_size()
max_size = max(max_size, size)
w, h = max_size
w /= Pango.SCALE
h /= Pango.SCALE
self.set_size(w + 2 * self.xpad, h + 2 * self.ypad)
def point_in(self, px, py):
x, y, w, h = self.allocation
return (px >= x and px <= x + w and
py >= y and py <= y + h)
def get_size(self):
return self.allocation[2:]
def set_position(self, x, y):
self.allocation[:2] = int(x), int(y)
def set_size(self, w, h):
self.allocation[2:] = int(w), int(h)
def set_state(self, state):
if not isinstance(state, Gtk.StateFlags):
msg = ("state should be of type Gtk.StateFlags, got %s" %
type(state))
raise TypeError(msg)
elif state == self.state:
return
self.state = state
self.widget.queue_draw_area(*self.allocation)
def set_sensitive(self, is_sensitive):
if is_sensitive:
state = Gtk.StateFlags.PRELIGHT
else:
state = Gtk.StateFlags.INSENSITIVE
self.set_state(state)
def show(self):
self.visible = True
def hide(self):
self.visible = False
def set_markup(self, markup):
self.markup_variant = (markup,)
def set_markup_variants(self, markup_variants):
if not isinstance(markup_variants, dict):
msg = type(markup_variants)
raise TypeError("Expects a dict object, got %s" % msg)
elif not markup_variants:
return
self.markup_variants = markup_variants
self.current_variant = markup_variants.keys()[0]
def set_variant(self, current_var):
self.current_variant = current_var
def is_sensitive(self):
return self.state is not Gtk.StateFlags.INSENSITIVE
def render(self, context, cr, layout):
if not self.visible:
return
x, y, width, height = self.allocation
context.save()
context.add_class("cellrenderer-button")
if self.has_focus:
context.set_state(self.state | Gtk.StateFlags.FOCUSED)
else:
context.set_state(self.state)
# render background and focal frame if has-focus
context.save()
context.add_class(Gtk.STYLE_CLASS_BUTTON)
Gtk.render_background(context, cr, x, y, width, height)
context.restore()
if self.has_focus:
Gtk.render_focus(context, cr,
x + 3, y + 3,
width - 6, height - 6)
# position and render layout markup
context.save()
context.add_class(Gtk.STYLE_CLASS_BUTTON)
layout.set_markup(self.markup_variants[self.current_variant], -1)
layout_width = layout.get_pixel_extents()[1].width
x = x + (width - layout_width) / 2
y += self.ypad
Gtk.render_layout(context, cr, x, y, layout)
context.restore()
context.restore()
| gpl-3.0 |
upndwn4par/graviton_s4_kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
tx137884746/IzayoiMiku | toughradius/tools/livecd.py | 4 | 4933 | #!/usr/bin/env python
#coding:utf-8
from toughradius.tools.secret import gen_secret
def echo_radiusd_cnf():
return '''[DEFAULT]
debug = 0
tz = CST-8
secret = %s
ssl = 1
privatekey = /var/toughradius/privkey.pem
certificate = /var/toughradius/cacert.pem
[database]
dbtype = mysql
dburl = mysql://radiusd:[email protected]/toughradius?charset=utf8
echo = false
pool_size = 120
pool_recycle = 300
[radiusd]
acctport = 1813
adminport = 1815
authport = 1812
cache_timeout = 600
logfile = /var/toughradius/log/radiusd.log
[admin]
port = 1816
logfile = /var/toughradius/log/admin.log
[customer]
port = 1817
logfile = /var/toughradius/log/customer.log
'''%gen_secret(32)
def echo_privkey_pem():
return '''-----BEGIN RSA PRIVATE KEY-----
MIIBPAIBAAJBAK+a5EAeEZFJdpwmMdgexCvE/x5HpsSvkyx+CFt9MDI8Gx9sXTsQ
hn+Satm4bNKq9+0yarGL1MoVoXCmzMkv++0CAwEAAQJBAJel139XeCxTmM54XYsZ
5qc11Gs9zVMFnL9Lh8QadEisGBoLNVGRKspVuR21pf9yWK1APJYtxeY+ElxTeN6v
frECIQDlXCN0ZLF2IBOUbOAEBnBEzYA19cnpktaD1EyeD1bpOwIhAMQAY3R+suNO
JE1MvE/g6ICAQVCDeiSW0JBUHbpXT5z3AiBakZqygHyPD7WLm76N+Fjm4lspc6hK
oqAwqGmk1JvWNwIhAJicyNPLV1S/4mpB5pq3v7FWrASZ6wAUYh8PL/qIw1evAiEA
sS5pdElUCN0d7/EdoOPBmEAJL7RHs6SjYEihK5ds4TQ=
-----END RSA PRIVATE KEY-----'''
def echo_cacert_pem():
return '''-----BEGIN CERTIFICATE-----
MIIDTDCCAvagAwIBAgIJAMZsf8cd/CUeMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD
VQQGEwJDTjEOMAwGA1UECBMFSHVuYW4xETAPBgNVBAcTCENoYW5nc2hhMRgwFgYD
VQQKEw90b3VnaHJhZGl1cy5uZXQxFDASBgNVBAsTC3RvdWdocmFkaXVzMRgwFgYD
VQQDEw90b3VnaHJhZGl1cy5uZXQxJjAkBgkqhkiG9w0BCQEWF3N1cHBvcnRAdG91
Z2hyYWRpdXMubmV0MB4XDTE1MDMxODE2MTg1N1oXDTIwMTAyNTE2MTg1N1owgaIx
CzAJBgNVBAYTAkNOMQ4wDAYDVQQIEwVIdW5hbjERMA8GA1UEBxMIQ2hhbmdzaGEx
GDAWBgNVBAoTD3RvdWdocmFkaXVzLm5ldDEUMBIGA1UECxMLdG91Z2hyYWRpdXMx
GDAWBgNVBAMTD3RvdWdocmFkaXVzLm5ldDEmMCQGCSqGSIb3DQEJARYXc3VwcG9y
dEB0b3VnaHJhZGl1cy5uZXQwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAr5rkQB4R
kUl2nCYx2B7EK8T/HkemxK+TLH4IW30wMjwbH2xdOxCGf5Jq2bhs0qr37TJqsYvU
yhWhcKbMyS/77QIDAQABo4IBCzCCAQcwHQYDVR0OBBYEFK9UjaxgsGyDZqfLEGUl
zYUhZqyzMIHXBgNVHSMEgc8wgcyAFK9UjaxgsGyDZqfLEGUlzYUhZqyzoYGopIGl
MIGiMQswCQYDVQQGEwJDTjEOMAwGA1UECBMFSHVuYW4xETAPBgNVBAcTCENoYW5n
c2hhMRgwFgYDVQQKEw90b3VnaHJhZGl1cy5uZXQxFDASBgNVBAsTC3RvdWdocmFk
aXVzMRgwFgYDVQQDEw90b3VnaHJhZGl1cy5uZXQxJjAkBgkqhkiG9w0BCQEWF3N1
cHBvcnRAdG91Z2hyYWRpdXMubmV0ggkAxmx/xx38JR4wDAYDVR0TBAUwAwEB/zAN
BgkqhkiG9w0BAQUFAANBAF2J27T8NnXptROTUx7IKU3MIBGvRqj6imtwjsus6fQU
GOLwDVfVEaqmv6YE6jg5ummEfeIcwUfkD5fLgrfRQ9s=
-----END CERTIFICATE-----'''
def echo_radiusd_script():
return '''#!/bin/sh
### BEGIN INIT INFO
# Provides: radiusd
# Required-Start: $all
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: starts the radiusd daemon
# Description: starts toughradius using start-stop-daemon
### END INIT INFO
export PATH=$PATH:/usr/local/bin
set -e
set -u
usage ()
{
cat <<EOF
Usage: $0 [OPTIONS]
start start toughradius
stop stop toughradius
restart restart toughradius,
upgrade update toughradius version and restart
All other options are passed to the toughrad program.
EOF
exit 1
}
start()
{
toughctl --start all
}
stop()
{
toughctl --stop all
}
restart()
{
toughctl --restart all
}
upgrade()
{
echo 'starting upgrade...'
pip install -U https://github.com/talkincode/ToughRADIUS/archive/stable.zip
echo 'upgrade done'
}
case "$1" in
help)
usage
;;
start)
start
;;
stop)
stop
;;
restart)
restart
;;
upgrade)
upgrade
;;
*)
usage
;;
esac
exit 0
'''
def echo_mysql_cnf():
return '''[client]
port = 3306
socket = /var/run/mysqld/mysqld.sock
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
bind-address = 127.0.0.1
key_buffer = 16M
max_allowed_packet = 16M
thread_stack = 192K
thread_cache_size = 8
myisam-recover = BACKUP
max_connections = 1000
table_cache = 512
#thread_concurrency = 8
#
# * Query Cache Configuration
#
query_cache_limit = 4M
query_cache_size = 64M
server-id = 1
log_bin = /var/log/mysql/mysql-bin.log
expire_logs_days = 10
max_binlog_size = 100M
#
# * InnoDB
#
innodb_buffer_pool_size = 256M
innodb_data_file_path = ibdata1:16M:autoextend
innodb_additional_mem_pool_size = 16M
innodb_thread_concurrency = 8
innodb_flush_log_at_trx_commit = 1
innodb_log_buffer_size = 8M
innodb_log_file_size = 128M
log-error=/var/log/mysqld.log
[mysqldump]
quick
quote-names
max_allowed_packet = 64M
[mysql]
#no-auto-rehash # faster start of mysql but no tab completition
[isamchk]
key_buffer = 16M
!includedir /etc/mysql/conf.d/
''' | agpl-3.0 |
shumik/skencil-c | Sketch/UI/gradientedit.py | 1 | 9796 | # Sketch - A Python-based interactive drawing program
# Copyright (C) 1998, 1999, 2000, 2002 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import PIL.Image
import X, pax
from Sketch import _, Publisher, SketchError, _sketch
from Sketch import Blend, CreateRGBColor, MultiGradient
from Sketch.const import DROP_COLOR
from Sketch.warn import pdebug
from Sketch.Graphics import color
from Tkinter import Frame, Button
from Tkinter import BOTTOM, LEFT, RIGHT, BOTH
from tkext import PyWidget, MenuCommand, UpdatedMenu
import tkext
from colordlg import GetColor
from sketchdlg import SKModal
import skpixmaps
pixmaps = skpixmaps.PixmapTk
handle_height = 8
class GradientView(PyWidget, Publisher):
accept_drop = (DROP_COLOR,)
def __init__(self, master, width, height, gradient, **kw):
image = PIL.Image.new('RGB', (width, height))
self.orig_x = handle_height / 2
if not kw.has_key('width'):
kw["width"] = width + handle_height
if not kw.has_key('height'):
kw["height"] = height + handle_height
apply(PyWidget.__init__, (self, master), kw)
self.set_gradient(gradient)
self.update_pending = 0
self.dragging = 0
self.drag_idx = 0
self.drag_start = 0
self.drag_min = self.drag_max = 0.0
self.gc_initialized = 0
self.image = image
self.ximage = None
self.context_menu = None
self.bind('<ButtonPress-3>', self.PopupContextMenu)
self.bind('<ButtonPress>', self.ButtonPressEvent)
self.bind('<Motion>', self.PointerMotionEvent)
self.bind('<ButtonRelease>', self.ButtonReleaseEvent)
def __del__(self):
pdebug('__del__', '__del__', self)
def MapMethod(self):
if not self.gc_initialized:
self.init_gc()
self.tk.call(self._w, 'motionhints')
self.gc_initialized = 1
def DestroyMethod(self):
if self.context_menu is not None:
self.context_menu.clean_up()
self.context_menu = None
PyWidget.DestroyMethod(self)
def init_gc(self):
self.gc = self.tkwin.GetGC()
self.visual = color.skvisual
w = self.tkwin
width, height = self.image.size
depth = self.visual.depth
if depth > 16:
bpl = 4 * width
elif depth > 8:
bpl = ((2 * width + 3) / 4) * 4
elif depth == 8:
bpl = ((width + 3) / 4) * 4
else:
raise SketchError('unsupported depth for images')
self.ximage = w.CreateImage(depth, X.ZPixmap, 0, None, width, height,
32, bpl)
self.set_image(self.image)
def set_image(self, image):
self.image = image
if self.ximage:
ximage = self.ximage
_sketch.copy_image_to_ximage(self.visual, image.im, ximage,
0, 0, ximage.width, ximage.height)
self.UpdateWhenIdle()
def ResizedMethod(self, width, height):
pass
def set_gradient(self, gradient):
gradient = gradient.Colors()
self.gradient = []
for pos, color in gradient:
self.gradient.append((pos, tuple(color)))
def reverse(self):
for i in range(len(self.gradient)):
self.gradient[i]=(1 - self.gradient[i][0], self.gradient[i][1])
self.gradient.reverse()
self.UpdateWhenIdle()
def x_to_idx(self, x):
width = self.ximage.width
w2 = handle_height / 2
orig_x = self.orig_x
for i in range(len(self.gradient)):
if abs(x - orig_x - self.gradient[i][0] * width) < w2:
return i
return -1
def ButtonPressEvent(self, event):
if not self.dragging:
self.drag_idx = self.x_to_idx(event.x)
if self.drag_idx < 0:
return
if self.drag_idx == 0:
self.gradient.insert(0, self.gradient[0])
self.drag_idx = self.drag_idx + 1
if self.drag_idx == len(self.gradient) - 1:
self.gradient.append(self.gradient[-1])
self.drag_start = event.x, self.gradient[self.drag_idx][0]
if self.drag_idx > 0:
self.drag_min = self.gradient[self.drag_idx - 1][0]
else:
self.drag_min = 0.0
if self.drag_idx < len(self.gradient) - 1:
self.drag_max = self.gradient[self.drag_idx + 1][0]
else:
self.drag_max = 1.0
self.dragging = self.dragging + 1
def ButtonReleaseEvent(self, event):
if self.dragging:
self.dragging = self.dragging - 1
self.move_to(event.x)
if self.drag_idx == 1 and \
self.gradient[0][0] == self.gradient[1][0]:
del self.gradient[0]
elif self.drag_idx == len(self.gradient) - 2 and \
self.gradient[-1][0] == self.gradient[-2][0]:
del self.gradient[-1]
def PointerMotionEvent(self, event):
if self.dragging:
x = self.tkwin.QueryPointer()[4]
self.move_to(x)
def move_to(self, x):
start_x, start_pos = self.drag_start
pos = x - start_x + start_pos * self.ximage.width
pos = float(pos) / self.ximage.width
if pos < self.drag_min:
pos = self.drag_min
if pos > self.drag_max:
pos = self.drag_max
color = self.gradient[self.drag_idx][-1]
self.gradient[self.drag_idx] = (pos, color)
self.UpdateWhenIdle()
def PopupContextMenu(self, event):
self.context_idx = self.x_to_idx(event.x)
self.context_pos = (event.x - self.orig_x) / float(self.ximage.width)
if self.context_menu is None:
items = [MenuCommand(_("Set Handle Color"), self.set_handle_color,
sensitivecb = self.can_set_handle_color),
MenuCommand(_("Delete Handle"), self.delete_handle,
sensitivecb = self.can_delete_handle),
MenuCommand(_("Insert Handle"), self.insert_handle,
sensitivecb = self.can_insert_handle)]
self.context_menu = UpdatedMenu(self, items)
self.context_menu.Popup(event.x_root, event.y_root)
def delete_handle(self):
if 0 < self.context_idx < len(self.gradient) - 1:
del self.gradient[self.context_idx]
self.UpdateWhenIdle()
def can_delete_handle(self):
return 0 < self.context_idx < len(self.gradient) - 1
def insert_handle(self):
gradient = self.gradient
pos = self.context_pos
if 0.0 <= pos <= 1.0:
for i in range(len(gradient) - 1):
if gradient[i][0] < pos < gradient[i + 1][0]:
p1, c1 = gradient[i]
p2, c2 = gradient[i + 1]
color = Blend(apply(CreateRGBColor, c2),
apply(CreateRGBColor, c1),
(pos - p1) / (p2 - p1))
gradient.insert(i + 1, (pos, tuple(color)))
self.UpdateWhenIdle()
break
def can_insert_handle(self):
return self.context_idx < 0 and 0.0 <= self.context_pos <= 1.0
def set_handle_color(self):
if self.context_idx >= 0:
pos, color = self.gradient[self.context_idx]
color = GetColor(self, apply(CreateRGBColor, color))
if color is not None:
self.gradient[self.context_idx] = (pos, tuple(color))
self.UpdateWhenIdle()
def can_set_handle_color(self):
return self.context_idx >= 0
def update_gradient(self):
_sketch.fill_axial_gradient(self.image.im, self.gradient,
0, 0, self.image.size[0] - 1, 0)
self.set_image(self.image)
def UpdateWhenIdle(self):
if not self.update_pending:
self.update_pending = 1
PyWidget.UpdateWhenIdle(self)
def RedrawMethod(self, region = None):
if self.update_pending:
self.update_gradient()
self.update_pending = 0
pixmap = self.tkwin.CreatePixmap()
width = self.ximage.width
height = self.ximage.height
startx = handle_height / 2
self.gc.SetDrawable(pixmap)
self.tkborder.Fill3DRectangle(pixmap, 0, 0,
self.tkwin.width, self.tkwin.height,
0, pax.TK_RELIEF_FLAT)
self.gc.PutImage(self.ximage, 0, 0, startx, 0, width, height)
border = self.tkborder
win = self.tkwin
w2 = handle_height / 2
bot = handle_height + height
for pos in self.gradient:
pos = pos[0]
x = int(pos * width) + startx
poly = [(x - w2, bot), (x, height), (x + w2, bot)]
border.Draw3DPolygon(pixmap, poly, -2, pax.TK_RELIEF_SUNKEN)
self.gc.SetDrawable(self.tkwin)
pixmap.CopyArea(self.tkwin, self.gc, 0, 0,
self.tkwin.width, self.tkwin.height, 0, 0)
def DropAt(self, x, y, what, data):
if what == DROP_COLOR:
idx = self.x_to_idx(x)
if idx >= 0:
pos, color = self.gradient[idx]
self.gradient[idx] = (pos, tuple(data))
self.UpdateWhenIdle()
def GetGradient(self):
result = []
for pos, color in self.gradient:
result.append((pos, apply(CreateRGBColor, color)))
return MultiGradient(result)
gradient_size = (200, 10)
class EditGradientDlg(SKModal):
title = _("Edit Gradient")
def __init__(self, master, gradient, **kw):
self.gradient = gradient
apply(SKModal.__init__, (self, master), kw)
def build_dlg(self):
top = self.top
frame = Frame(top)
frame.pack(side = BOTTOM, fill = BOTH, expand = 1)
button = Button(frame, text = _("Reverse"), command = self.reverse)
button.pack(side = LEFT, expand = 1)
button = Button(frame, text = _("OK"), command = self.ok)
button.pack(side = LEFT, expand = 1)
button = Button(frame, text = _("Cancel"), command = self.cancel)
button.pack(side = RIGHT, expand = 1)
view = GradientView(top, gradient_size[0], gradient_size[1],
self.gradient)
view.pack(side = LEFT)
self.gradient_view = view
def reverse(self, *args):
self.gradient_view.reverse()
def ok(self, *args):
self.close_dlg(self.gradient_view.GetGradient())
def EditGradient(master, gradient):
dlg = EditGradientDlg(master, gradient)
return dlg.RunDialog(grab = 0)
| gpl-2.0 |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Utilities/DataConversions/XMLToXLS.py | 5 | 2894 | # -*- coding: utf-8 -*-
###############################################################################
#
# XMLToXLS
# Converts an XML file to a Base64 encoded Excel file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class XMLToXLS(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the XMLToXLS Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(XMLToXLS, self).__init__(temboo_session, '/Library/Utilities/DataConversions/XMLToXLS')
def new_input_set(self):
return XMLToXLSInputSet()
def _make_result_set(self, result, path):
return XMLToXLSResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return XMLToXLSChoreographyExecution(session, exec_id, path)
class XMLToXLSInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the XMLToXLS
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_XML(self, value):
"""
Set the value of the XML input for this Choreo. ((required, xml) The XML file you want to convert to XLS format. See documentation for information on the required XML schema.)
"""
super(XMLToXLSInputSet, self)._set_input('XML', value)
class XMLToXLSResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the XMLToXLS Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_XLS(self):
"""
Retrieve the value for the "XLS" output from this Choreo execution. (The Base64 encoded Excel data .)
"""
return self._output.get('XLS', None)
class XMLToXLSChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return XMLToXLSResultSet(response, path)
| gpl-3.0 |
umitproject/openmonitor-aggregator | simplejson/tests/test_fail.py | 136 | 3555 | from unittest import TestCase
import simplejson as json
# Fri Dec 30 18:57:26 2005
JSONDOCS = [
# http://json.org/JSON_checker/test/fail1.json
'"A JSON payload should be an object or array, not a string."',
# http://json.org/JSON_checker/test/fail2.json
'["Unclosed array"',
# http://json.org/JSON_checker/test/fail3.json
'{unquoted_key: "keys must be quoted}',
# http://json.org/JSON_checker/test/fail4.json
'["extra comma",]',
# http://json.org/JSON_checker/test/fail5.json
'["double extra comma",,]',
# http://json.org/JSON_checker/test/fail6.json
'[ , "<-- missing value"]',
# http://json.org/JSON_checker/test/fail7.json
'["Comma after the close"],',
# http://json.org/JSON_checker/test/fail8.json
'["Extra close"]]',
# http://json.org/JSON_checker/test/fail9.json
'{"Extra comma": true,}',
# http://json.org/JSON_checker/test/fail10.json
'{"Extra value after close": true} "misplaced quoted value"',
# http://json.org/JSON_checker/test/fail11.json
'{"Illegal expression": 1 + 2}',
# http://json.org/JSON_checker/test/fail12.json
'{"Illegal invocation": alert()}',
# http://json.org/JSON_checker/test/fail13.json
'{"Numbers cannot have leading zeroes": 013}',
# http://json.org/JSON_checker/test/fail14.json
'{"Numbers cannot be hex": 0x14}',
# http://json.org/JSON_checker/test/fail15.json
'["Illegal backslash escape: \\x15"]',
# http://json.org/JSON_checker/test/fail16.json
'["Illegal backslash escape: \\\'"]',
# http://json.org/JSON_checker/test/fail17.json
'["Illegal backslash escape: \\017"]',
# http://json.org/JSON_checker/test/fail18.json
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
# http://json.org/JSON_checker/test/fail19.json
'{"Missing colon" null}',
# http://json.org/JSON_checker/test/fail20.json
'{"Double colon":: null}',
# http://json.org/JSON_checker/test/fail21.json
'{"Comma instead of colon", null}',
# http://json.org/JSON_checker/test/fail22.json
'["Colon instead of comma": false]',
# http://json.org/JSON_checker/test/fail23.json
'["Bad value", truth]',
# http://json.org/JSON_checker/test/fail24.json
"['single quote']",
# http://code.google.com/p/simplejson/issues/detail?id=3
u'["A\u001FZ control characters in string"]',
]
SKIPS = {
1: "why not have a string payload?",
18: "spec doesn't specify any nesting limitations",
}
class TestFail(TestCase):
def test_failures(self):
for idx, doc in enumerate(JSONDOCS):
idx = idx + 1
if idx in SKIPS:
json.loads(doc)
continue
try:
json.loads(doc)
except json.JSONDecodeError:
pass
else:
#self.fail("Expected failure for fail{0}.json: {1!r}".format(idx, doc))
self.fail("Expected failure for fail%d.json: %r" % (idx, doc))
def test_array_decoder_issue46(self):
# http://code.google.com/p/simplejson/issues/detail?id=46
for doc in [u'[,]', '[,]']:
try:
json.loads(doc)
except json.JSONDecodeError, e:
self.assertEquals(e.pos, 1)
self.assertEquals(e.lineno, 1)
self.assertEquals(e.colno, 1)
except Exception, e:
self.fail("Unexpected exception raised %r %s" % (e, e))
else:
self.fail("Unexpected success parsing '[,]'") | agpl-3.0 |
sadmansk/servo | components/script/dom/bindings/codegen/parser/tests/test_constructor.py | 23 | 9627 | import WebIDL
def WebIDLTest(parser, harness):
def checkArgument(argument, QName, name, type, optional, variadic):
harness.ok(isinstance(argument, WebIDL.IDLArgument),
"Should be an IDLArgument")
harness.check(argument.identifier.QName(), QName, "Argument has the right QName")
harness.check(argument.identifier.name, name, "Argument has the right name")
harness.check(str(argument.type), type, "Argument has the right return type")
harness.check(argument.optional, optional, "Argument has the right optional value")
harness.check(argument.variadic, variadic, "Argument has the right variadic value")
def checkMethod(method, QName, name, signatures,
static=True, getter=False, setter=False,
deleter=False, legacycaller=False, stringifier=False,
chromeOnly=False, htmlConstructor=False):
harness.ok(isinstance(method, WebIDL.IDLMethod),
"Should be an IDLMethod")
harness.ok(method.isMethod(), "Method is a method")
harness.ok(not method.isAttr(), "Method is not an attr")
harness.ok(not method.isConst(), "Method is not a const")
harness.check(method.identifier.QName(), QName, "Method has the right QName")
harness.check(method.identifier.name, name, "Method has the right name")
harness.check(method.isStatic(), static, "Method has the correct static value")
harness.check(method.isGetter(), getter, "Method has the correct getter value")
harness.check(method.isSetter(), setter, "Method has the correct setter value")
harness.check(method.isDeleter(), deleter, "Method has the correct deleter value")
harness.check(method.isLegacycaller(), legacycaller, "Method has the correct legacycaller value")
harness.check(method.isStringifier(), stringifier, "Method has the correct stringifier value")
harness.check(method.getExtendedAttribute("ChromeOnly") is not None, chromeOnly, "Method has the correct value for ChromeOnly")
harness.check(method.isHTMLConstructor(), htmlConstructor, "Method has the correct htmlConstructor value")
harness.check(len(method.signatures()), len(signatures), "Method has the correct number of signatures")
sigpairs = zip(method.signatures(), signatures)
for (gotSignature, expectedSignature) in sigpairs:
(gotRetType, gotArgs) = gotSignature
(expectedRetType, expectedArgs) = expectedSignature
harness.check(str(gotRetType), expectedRetType,
"Method has the expected return type.")
for i in range(0, len(gotArgs)):
(QName, name, type, optional, variadic) = expectedArgs[i]
checkArgument(gotArgs[i], QName, name, type, optional, variadic)
parser.parse("""
[Constructor]
interface TestConstructorNoArgs {
};
[Constructor(DOMString name)]
interface TestConstructorWithArgs {
};
[Constructor(object foo), Constructor(boolean bar)]
interface TestConstructorOverloads {
};
""")
results = parser.finish()
harness.check(len(results), 3, "Should be three productions")
harness.ok(isinstance(results[0], WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.ok(isinstance(results[1], WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.ok(isinstance(results[2], WebIDL.IDLInterface),
"Should be an IDLInterface")
checkMethod(results[0].ctor(), "::TestConstructorNoArgs::constructor",
"constructor", [("TestConstructorNoArgs (Wrapper)", [])])
checkMethod(results[1].ctor(), "::TestConstructorWithArgs::constructor",
"constructor",
[("TestConstructorWithArgs (Wrapper)",
[("::TestConstructorWithArgs::constructor::name", "name", "String", False, False)])])
checkMethod(results[2].ctor(), "::TestConstructorOverloads::constructor",
"constructor",
[("TestConstructorOverloads (Wrapper)",
[("::TestConstructorOverloads::constructor::foo", "foo", "Object", False, False)]),
("TestConstructorOverloads (Wrapper)",
[("::TestConstructorOverloads::constructor::bar", "bar", "Boolean", False, False)])])
parser = parser.reset()
parser.parse("""
[ChromeConstructor()]
interface TestChromeConstructor {
};
""")
results = parser.finish()
harness.check(len(results), 1, "Should be one production")
harness.ok(isinstance(results[0], WebIDL.IDLInterface),
"Should be an IDLInterface")
checkMethod(results[0].ctor(), "::TestChromeConstructor::constructor",
"constructor", [("TestChromeConstructor (Wrapper)", [])],
chromeOnly=True)
parser = parser.reset()
parser.parse("""
[HTMLConstructor]
interface TestHTMLConstructor {
};
""")
results = parser.finish()
harness.check(len(results), 1, "Should be one production")
harness.ok(isinstance(results[0], WebIDL.IDLInterface),
"Should be an IDLInterface")
checkMethod(results[0].ctor(), "::TestHTMLConstructor::constructor",
"constructor", [("TestHTMLConstructor (Wrapper)", [])],
htmlConstructor=True)
parser = parser.reset()
threw = False
try:
parser.parse("""
[Constructor(),
ChromeConstructor(DOMString a)]
interface TestChromeConstructor {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Can't have both a Constructor and a ChromeConstructor")
# Test HTMLConstructor with argument
parser = parser.reset()
threw = False
try:
parser.parse("""
[HTMLConstructor(DOMString a)]
interface TestHTMLConstructorWithArgs {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "HTMLConstructor should take no argument")
# Test HTMLConstructor on a callback interface
parser = parser.reset()
threw = False
try:
parser.parse("""
[HTMLConstructor]
callback interface TestHTMLConstructorOnCallbackInterface {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "HTMLConstructor can't be used on a callback interface")
# Test HTMLConstructor and Constructor
parser = parser.reset()
threw = False
try:
parser.parse("""
[Constructor,
HTMLConstructor]
interface TestHTMLConstructorAndConstructor {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Can't have both a Constructor and a HTMLConstructor")
parser = parser.reset()
threw = False
try:
parser.parse("""
[HTMLConstructor,
Constructor]
interface TestHTMLConstructorAndConstructor {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Can't have both a HTMLConstructor and a Constructor")
parser = parser.reset()
threw = False
try:
parser.parse("""
[HTMLConstructor,
Constructor(DOMString a)]
interface TestHTMLConstructorAndConstructor {
};
""")
except:
threw = True
harness.ok(threw, "Can't have both a HTMLConstructor and a Constructor")
parser = parser.reset()
threw = False
try:
parser.parse("""
[Constructor(DOMString a),
HTMLConstructor]
interface TestHTMLConstructorAndConstructor {
};
""")
except:
threw = True
harness.ok(threw, "Can't have both a HTMLConstructor and a Constructor")
# Test HTMLConstructor and ChromeConstructor
parser = parser.reset()
threw = False
try:
parser.parse("""
[ChromeConstructor,
HTMLConstructor]
interface TestHTMLConstructorAndChromeConstructor {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Can't have both a HTMLConstructor and a ChromeConstructor")
parser = parser.reset()
threw = False
try:
parser.parse("""
[HTMLConstructor,
ChromeConstructor]
interface TestHTMLConstructorAndChromeConstructor {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Can't have both a HTMLConstructor and a ChromeConstructor")
parser = parser.reset()
threw = False
try:
parser.parse("""
[ChromeConstructor(DOMString a),
HTMLConstructor]
interface TestHTMLConstructorAndChromeConstructor {
};
""")
results = parser.finish()
except:
threw = True
parser = parser.reset()
threw = False
try:
parser.parse("""
[HTMLConstructor,
ChromeConstructor(DOMString a)]
interface TestHTMLConstructorAndChromeConstructor {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Can't have both a HTMLConstructor and a ChromeConstructor")
| mpl-2.0 |
taktik/account-invoicing | account_invoice_shipping_address/tests/test_invoice_shipping_test.py | 30 | 2274 | # -*- coding: utf-8 -*-
##############################################################################
# This file is part of account_invoice_shipping_address, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# account_invoice_line_sort is free software: you can redistribute it
# and/or modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# account_invoice_line_sort is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with account_invoice_line_sort.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp.tests.common as common
class TestAccountInvoiceShippement(common.TransactionCase):
def setUp(self):
super(TestAccountInvoiceShippement, self).setUp()
self.inv_model = self.env['account.invoice']
self.stock_model = self.env['stock.picking']
self.partner_2 = self.ref('base.res_partner_2')
self.partner_address_3 = self.ref('base.res_partner_address_3')
self.shipment4 = self.ref('stock.incomming_shipment4')
self.account_journal = self.ref('account.check_journal')
def test_create_invoice_from_stock(self):
stock = self.stock_model.browse(self.shipment4)
stock.invoice_state = '2binvoiced'
stock.partner_id = self.partner_address_3
stock.move_lines[0].partner_id = self.partner_2
res = stock.action_invoice_create(journal_id=self.account_journal)
self.assertEqual(len(res), 1)
inv_id = res[0]
created_invoice = self.inv_model.browse(inv_id)
self.assertEqual(created_invoice.partner_id.id,
self.partner_address_3)
self.assertEqual(created_invoice.address_shipping_id.id,
self.partner_2)
| agpl-3.0 |
josiah-wolf-oberholtzer/supriya | tests/commands/test_commands_SynthNewRequest.py | 1 | 3544 | import pytest
import uqbar.strings
import supriya
def test_do_not_coerce_arguments():
synth = supriya.realtime.Synth()
group = supriya.realtime.Group()
assert synth.node_id is None
assert group.node_id is None
request = supriya.commands.SynthNewRequest(
node_id=synth, synthdef=synth.synthdef, target_node_id=group
)
assert request.node_id is synth
assert request.target_node_id is group
assert synth.node_id is None
assert group.node_id is None
with pytest.raises(TypeError):
request.to_osc()
def test_allocate_ids_before_remote_application(server):
"""
Local application allocates the synth's ID before we generate the OSC
message.
"""
synth = supriya.realtime.Synth()
group = supriya.realtime.Group().allocate()
assert synth.node_id is None
assert group.node_id == 1000
request = supriya.commands.SynthNewRequest(
node_id=synth, synthdef=synth.synthdef, target_node_id=group
)
assert request.node_id is synth
assert request.target_node_id is group
with server.osc_protocol.capture() as transcript:
request.communicate()
assert [(_.label, _.message) for _ in transcript] == [
("S", supriya.osc.OscMessage("/s_new", "default", 1001, 0, 1000)),
("R", supriya.osc.OscMessage("/n_go", 1001, 1000, -1, -1, 0)),
]
assert synth.node_id == 1001
assert synth.parent is group
assert synth.is_allocated
def test_no_preexisting_synth_object(server):
"""
Communicating without a pre-existing synth creates that synth during local
application.
"""
synthdef = supriya.assets.synthdefs.test.allocate()
group = supriya.realtime.Group().allocate()
request = supriya.commands.SynthNewRequest(
node_id=666, synthdef=synthdef, target_node_id=group
)
assert request.node_id == 666
with server.osc_protocol.capture() as transcript:
request.communicate()
assert [(_.label, _.message) for _ in transcript] == [
("S", supriya.osc.OscMessage("/s_new", "test", 666, 0, 1000)),
("R", supriya.osc.OscMessage("/n_go", 666, 1000, -1, -1, 0)),
]
synth = server[666]
assert synth.parent is group
assert synth.synthdef is synthdef
def test_bus_symbol_mapping(server):
synthdef = supriya.assets.synthdefs.test.allocate()
group = supriya.realtime.Group().allocate()
request = supriya.commands.SynthNewRequest(
node_id=666,
synthdef=synthdef,
target_node_id=group,
amplitude="c0",
frequency="a1",
)
with server.osc_protocol.capture() as transcript:
request.communicate()
assert [(_.label, _.message) for _ in transcript] == [
(
"S",
supriya.osc.OscMessage(
"/s_new", "test", 666, 0, 1000, "amplitude", "c0", "frequency", "a1"
),
),
("R", supriya.osc.OscMessage("/n_go", 666, 1000, -1, -1, 0)),
]
synth = server[666]
assert synth.parent is group
assert synth.synthdef is synthdef
assert str(synth.controls["amplitude"].value) == "c0"
assert str(synth.controls["frequency"].value) == "a1"
server_state = str(server.query_remote_nodes(True))
assert server_state == uqbar.strings.normalize(
"""
NODE TREE 0 group
1 group
1000 group
666 test
amplitude: c0, frequency: a1
"""
)
assert str(server.query_local_nodes(True)) == server_state
| mit |
zsoltdudas/lis-tempest | tempest/services/object_storage/object_client.py | 4 | 9809 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from six.moves import http_client as httplib
from six.moves.urllib import parse as urlparse
from tempest.lib.common import rest_client
class ObjectClient(rest_client.RestClient):
def create_object(self, container, object_name, data,
params=None, metadata=None, headers=None):
"""Create storage object."""
if headers is None:
headers = self.get_headers()
if not data:
headers['content-length'] = '0'
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
url = "%s/%s" % (str(container), str(object_name))
if params:
url += '?%s' % urlparse.urlencode(params)
resp, body = self.put(url, data, headers)
self.expected_success(201, resp.status)
return resp, body
def update_object(self, container, object_name, data):
"""Upload data to replace current storage object."""
resp, body = self.create_object(container, object_name, data)
self.expected_success(201, resp.status)
return resp, body
def delete_object(self, container, object_name, params=None):
"""Delete storage object."""
url = "%s/%s" % (str(container), str(object_name))
if params:
url += '?%s' % urlparse.urlencode(params)
resp, body = self.delete(url, headers={})
self.expected_success([200, 204], resp.status)
return resp, body
def update_object_metadata(self, container, object_name, metadata,
metadata_prefix='X-Object-Meta-'):
"""Add, remove, or change X-Object-Meta metadata for storage object."""
headers = {}
for key in metadata:
headers["%s%s" % (str(metadata_prefix), str(key))] = metadata[key]
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.post(url, None, headers=headers)
self.expected_success(202, resp.status)
return resp, body
def list_object_metadata(self, container, object_name):
"""List all storage object X-Object-Meta- metadata."""
url = "%s/%s" % (str(container), str(object_name))
resp, body = self.head(url)
self.expected_success(200, resp.status)
return resp, body
def get_object(self, container, object_name, metadata=None):
"""Retrieve object's data."""
headers = {}
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
url = "{0}/{1}".format(container, object_name)
resp, body = self.get(url, headers=headers)
self.expected_success([200, 206], resp.status)
return resp, body
def copy_object_in_same_container(self, container, src_object_name,
dest_object_name, metadata=None):
"""Copy storage object's data to the new object using PUT."""
url = "{0}/{1}".format(container, dest_object_name)
headers = {}
headers['X-Copy-From'] = "%s/%s" % (str(container),
str(src_object_name))
headers['content-length'] = '0'
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
resp, body = self.put(url, None, headers=headers)
self.expected_success(201, resp.status)
return resp, body
def copy_object_across_containers(self, src_container, src_object_name,
dst_container, dst_object_name,
metadata=None):
"""Copy storage object's data to the new object using PUT."""
url = "{0}/{1}".format(dst_container, dst_object_name)
headers = {}
headers['X-Copy-From'] = "%s/%s" % (str(src_container),
str(src_object_name))
headers['content-length'] = '0'
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
resp, body = self.put(url, None, headers=headers)
self.expected_success(201, resp.status)
return resp, body
def copy_object_2d_way(self, container, src_object_name, dest_object_name,
metadata=None):
"""Copy storage object's data to the new object using COPY."""
url = "{0}/{1}".format(container, src_object_name)
headers = {}
headers['Destination'] = "%s/%s" % (str(container),
str(dest_object_name))
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
resp, body = self.copy(url, headers=headers)
self.expected_success(201, resp.status)
return resp, body
def create_object_segments(self, container, object_name, segment, data):
"""Creates object segments."""
url = "{0}/{1}/{2}".format(container, object_name, segment)
resp, body = self.put(url, data)
self.expected_success(201, resp.status)
return resp, body
def put_object_with_chunk(self, container, name, contents, chunk_size):
"""Put an object with Transfer-Encoding header"""
if self.base_url is None:
self._set_auth()
headers = {'Transfer-Encoding': 'chunked'}
if self.token:
headers['X-Auth-Token'] = self.token
conn = put_object_connection(self.base_url, container, name, contents,
chunk_size, headers)
resp = conn.getresponse()
body = resp.read()
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
self._error_checker('PUT', None, headers, contents, resp, body)
self.expected_success(201, resp.status)
return resp.status, resp.reason, resp_headers
def create_object_continue(self, container, object_name,
data, metadata=None):
"""Create storage object."""
headers = {}
if metadata:
for key in metadata:
headers[str(key)] = metadata[key]
if not data:
headers['content-length'] = '0'
if self.base_url is None:
self._set_auth()
headers['X-Auth-Token'] = self.token
conn = put_object_connection(self.base_url, str(container),
str(object_name), data, None, headers)
response = conn.response_class(conn.sock,
strict=conn.strict,
method=conn._method)
version, status, reason = response._read_status()
resp = {'version': version,
'status': str(status),
'reason': reason}
return resp
def put_object_connection(base_url, container, name, contents=None,
chunk_size=65536, headers=None, query_string=None):
"""Helper function to make connection to put object with httplib
:param base_url: base_url of an object client
:param container: container name that the object is in
:param name: object name to put
:param contents: a string or a file like object to read object data
from; if None, a zero-byte put will be done
:param chunk_size: chunk size of data to write; it defaults to 65536;
used only if the contents object has a 'read'
method, eg. file-like objects, ignored otherwise
:param headers: additional headers to include in the request, if any
:param query_string: if set will be appended with '?' to generated path
"""
parsed = urlparse.urlparse(base_url)
if parsed.scheme == 'https':
conn = httplib.HTTPSConnection(parsed.netloc)
else:
conn = httplib.HTTPConnection(parsed.netloc)
path = str(parsed.path) + "/"
path += "%s/%s" % (str(container), str(name))
if query_string:
path += '?' + query_string
if headers:
headers = dict(headers)
else:
headers = {}
if hasattr(contents, 'read'):
conn.putrequest('PUT', path)
for header, value in six.iteritems(headers):
conn.putheader(header, value)
if 'Content-Length' not in headers:
if 'Transfer-Encoding' not in headers:
conn.putheader('Transfer-Encoding', 'chunked')
conn.endheaders()
chunk = contents.read(chunk_size)
while chunk:
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = contents.read(chunk_size)
conn.send('0\r\n\r\n')
else:
conn.endheaders()
left = headers['Content-Length']
while left > 0:
size = chunk_size
if size > left:
size = left
chunk = contents.read(size)
conn.send(chunk)
left -= len(chunk)
else:
conn.request('PUT', path, contents, headers)
return conn
| apache-2.0 |
adamncasey/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_parser.py | 451 | 3612 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import traceback
import warnings
import re
warnings.simplefilter("error")
from .support import get_data_files
from .support import TestData, convert, convertExpected, treeTypes
from html5lib import html5parser, constants
# Run the parse error checks
checkParseErrors = False
# XXX - There should just be one function here but for some reason the testcase
# format differs from the treedump format by a single space character
def convertTreeDump(data):
return "\n".join(convert(3)(data).split("\n")[1:])
namespaceExpected = re.compile(r"^(\s*)<(\S+)>", re.M).sub
def runParserTest(innerHTML, input, expected, errors, treeClass,
namespaceHTMLElements):
with warnings.catch_warnings(record=True) as caughtWarnings:
warnings.simplefilter("always")
p = html5parser.HTMLParser(tree=treeClass,
namespaceHTMLElements=namespaceHTMLElements)
try:
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
document = p.parse(input)
except:
errorMsg = "\n".join(["\n\nInput:", input, "\nExpected:", expected,
"\nTraceback:", traceback.format_exc()])
assert False, errorMsg
otherWarnings = [x for x in caughtWarnings
if not issubclass(x.category, constants.DataLossWarning)]
assert len(otherWarnings) == 0, [(x.category, x.message) for x in otherWarnings]
if len(caughtWarnings):
return
output = convertTreeDump(p.tree.testSerializer(document))
expected = convertExpected(expected)
if namespaceHTMLElements:
expected = namespaceExpected(r"\1<html \2>", expected)
errorMsg = "\n".join(["\n\nInput:", input, "\nExpected:", expected,
"\nReceived:", output])
assert expected == output, errorMsg
errStr = []
for (line, col), errorcode, datavars in p.errors:
assert isinstance(datavars, dict), "%s, %s" % (errorcode, repr(datavars))
errStr.append("Line: %i Col: %i %s" % (line, col,
constants.E[errorcode] % datavars))
errorMsg2 = "\n".join(["\n\nInput:", input,
"\nExpected errors (" + str(len(errors)) + "):\n" + "\n".join(errors),
"\nActual errors (" + str(len(p.errors)) + "):\n" + "\n".join(errStr)])
if checkParseErrors:
assert len(p.errors) == len(errors), errorMsg2
def test_parser():
sys.stderr.write('Testing tree builders ' + " ".join(list(treeTypes.keys())) + "\n")
files = get_data_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat", "")
if testName in ("template",):
continue
tests = TestData(filename, "data")
for index, test in enumerate(tests):
input, errors, innerHTML, expected = [test[key] for key in
('data', 'errors',
'document-fragment',
'document')]
if errors:
errors = errors.split("\n")
for treeName, treeCls in treeTypes.items():
for namespaceHTMLElements in (True, False):
yield (runParserTest, innerHTML, input, expected, errors, treeCls,
namespaceHTMLElements)
| mpl-2.0 |
chrishokamp/fuel | fuel/transformers/image.py | 6 | 12409 | from __future__ import division
from io import BytesIO
import math
import numpy
from PIL import Image
from six import PY3
try:
from ._image import window_batch_bchw
window_batch_bchw_available = True
except ImportError:
window_batch_bchw_available = False
from . import ExpectsAxisLabels, SourcewiseTransformer
from .. import config
class ImagesFromBytes(SourcewiseTransformer):
"""Load from a stream of bytes objects representing encoded images.
Parameters
----------
data_stream : instance of :class:`AbstractDataStream`
The wrapped data stream. The individual examples returned by
this should be the bytes (in a `bytes` container on Python 3
or a `str` on Python 2) comprising an image in a format readable
by PIL, such as PNG, JPEG, etc.
color_mode : str, optional
Mode to pass to PIL for color space conversion. Default is RGB.
If `None`, no coercion is performed.
Notes
-----
Images are returned as NumPy arrays converted from PIL objects.
If there is more than one color channel, then the array is transposed
from the `(height, width, channel)` dimension layout native to PIL to
the `(channel, height, width)` layout that is pervasive in the world
of convolutional networks. If there is only one color channel, as for
monochrome or binary images, a leading axis with length 1 is added for
the sake of uniformity/predictability.
This SourcewiseTransformer supports streams returning single examples
as `bytes` objects (`str` on Python 2.x) as well as streams that
return iterables containing such objects. In the case of an
iterable, a list of loaded images is returned.
"""
def __init__(self, data_stream, color_mode='RGB', **kwargs):
kwargs.setdefault('produces_examples', data_stream.produces_examples)
# Acrobatics currently required to correctly set axis labels.
which_sources = kwargs.get('which_sources', data_stream.sources)
axis_labels = self._make_axis_labels(data_stream, which_sources,
kwargs['produces_examples'])
kwargs.setdefault('axis_labels', axis_labels)
super(ImagesFromBytes, self).__init__(data_stream, **kwargs)
self.color_mode = color_mode
def transform_source_example(self, example, source_name):
if PY3:
bytes_type = bytes
else:
bytes_type = str
if not isinstance(example, bytes_type):
raise TypeError("expected {} object".format(bytes_type.__name__))
pil_image = Image.open(BytesIO(example))
if self.color_mode is not None:
pil_image = pil_image.convert(self.color_mode)
image = numpy.array(pil_image)
if image.ndim == 3:
# Transpose to `(channels, height, width)` layout.
return image.transpose(2, 0, 1)
elif image.ndim == 2:
# Add a channels axis of length 1.
image = image[numpy.newaxis]
else:
raise ValueError('unexpected number of axes')
return image
def transform_source_batch(self, batch, source_name):
return [self.transform_source_example(im, source_name) for im in batch]
def _make_axis_labels(self, data_stream, which_sources, produces_examples):
# This is ugly and probably deserves a refactoring of how we handle
# axis labels. It would be simpler to use memoized read-only
# properties, but the AbstractDataStream constructor tries to set
# self.axis_labels currently. We can't use self.which_sources or
# self.produces_examples here, because this *computes* things that
# need to be passed into the superclass constructor, necessarily
# meaning that the superclass constructor hasn't been called.
# Cooperative inheritance is hard, etc.
labels = {}
for source in data_stream.sources:
if source in which_sources:
if produces_examples:
labels[source] = ('channel', 'height', 'width')
else:
labels[source] = ('batch', 'channel', 'height', 'width')
else:
labels[source] = (data_stream.axis_labels[source]
if source in data_stream.axis_labels
else None)
return labels
class MinimumImageDimensions(SourcewiseTransformer, ExpectsAxisLabels):
"""Resize (lists of) images to minimum dimensions.
Parameters
----------
data_stream : instance of :class:`AbstractDataStream`
The data stream to wrap.
minimum_shape : 2-tuple
The minimum `(height, width)` dimensions every image must have.
Images whose height and width are larger than these dimensions
are passed through as-is.
resample : str, optional
Resampling filter for PIL to use to upsample any images requiring
it. Options include 'nearest' (default), 'bilinear', and 'bicubic'.
See the PIL documentation for more detailed information.
Notes
-----
This transformer expects stream sources returning individual images,
represented as 2- or 3-dimensional arrays, or lists of the same.
The format of the stream is unaltered.
"""
def __init__(self, data_stream, minimum_shape, resample='nearest',
**kwargs):
self.minimum_shape = minimum_shape
try:
self.resample = getattr(Image, resample.upper())
except AttributeError:
raise ValueError("unknown resampling filter '{}'".format(resample))
kwargs.setdefault('produces_examples', data_stream.produces_examples)
kwargs.setdefault('axis_labels', data_stream.axis_labels)
super(MinimumImageDimensions, self).__init__(data_stream, **kwargs)
def transform_source_batch(self, batch, source_name):
self.verify_axis_labels(('batch', 'channel', 'height', 'width'),
self.data_stream.axis_labels[source_name],
source_name)
return [self._example_transform(im, source_name) for im in batch]
def transform_source_example(self, example, source_name):
self.verify_axis_labels(('channel', 'height', 'width'),
self.data_stream.axis_labels[source_name],
source_name)
return self._example_transform(example, source_name)
def _example_transform(self, example, _):
if example.ndim > 3 or example.ndim < 2:
raise NotImplementedError
min_height, min_width = self.minimum_shape
original_height, original_width = example.shape[-2:]
if original_height < min_height or original_width < min_width:
dt = example.dtype
# If we're dealing with a colour image, swap around the axes
# to be in the format that PIL needs.
if example.ndim == 3:
im = example.transpose(1, 2, 0)
else:
im = example
im = Image.fromarray(im)
width, height = im.size
multiplier = max(1, min_width / width, min_height / height)
width = int(math.ceil(width * multiplier))
height = int(math.ceil(height * multiplier))
im = numpy.array(im.resize((width, height))).astype(dt)
# If necessary, undo the axis swap from earlier.
if im.ndim == 3:
example = im.transpose(2, 0, 1)
else:
example = im
return example
class RandomFixedSizeCrop(SourcewiseTransformer, ExpectsAxisLabels):
"""Randomly crop images to a fixed window size.
Parameters
----------
data_stream : :class:`AbstractDataStream`
The data stream to wrap.
window_shape : tuple
The `(height, width)` tuple representing the size of the output
window.
Notes
-----
This transformer expects to act on stream sources which provide one of
* Single images represented as 3-dimensional ndarrays, with layout
`(channel, height, width)`.
* Batches of images represented as lists of 3-dimensional ndarrays,
possibly of different shapes (i.e. images of differing
heights/widths).
* Batches of images represented as 4-dimensional ndarrays, with
layout `(batch, channel, height, width)`.
The format of the stream will be un-altered, i.e. if lists are
yielded by `data_stream` then lists will be yielded by this
transformer.
"""
def __init__(self, data_stream, window_shape, **kwargs):
if not window_batch_bchw_available:
raise ImportError('window_batch_bchw not compiled')
self.window_shape = window_shape
self.rng = kwargs.pop('rng', None)
self.warned_axis_labels = False
if self.rng is None:
self.rng = numpy.random.RandomState(config.default_seed)
kwargs.setdefault('produces_examples', data_stream.produces_examples)
kwargs.setdefault('axis_labels', data_stream.axis_labels)
super(RandomFixedSizeCrop, self).__init__(data_stream, **kwargs)
def transform_source_batch(self, source, source_name):
self.verify_axis_labels(('batch', 'channel', 'height', 'width'),
self.data_stream.axis_labels[source_name],
source_name)
windowed_height, windowed_width = self.window_shape
if isinstance(source, list) and all(isinstance(b, numpy.ndarray) and
b.ndim == 3 for b in source):
return [self.transform_source_example(im, source_name)
for im in source]
elif isinstance(source, numpy.ndarray) and source.ndim == 4:
# Hardcoded assumption of (batch, channels, height, width).
# This is what the fast Cython code supports.
out = numpy.empty(source.shape[:2] + self.window_shape,
dtype=source.dtype)
batch_size = source.shape[0]
image_height, image_width = source.shape[2:]
max_h_off = image_height - windowed_height
max_w_off = image_width - windowed_width
if max_h_off < 0 or max_w_off < 0:
raise ValueError("Got ndarray batch with image dimensions {} "
"but requested window shape of {}".format(
source.shape[2:], self.window_shape))
offsets_w = self.rng.random_integers(0, max_w_off, size=batch_size)
offsets_h = self.rng.random_integers(0, max_h_off, size=batch_size)
window_batch_bchw(source, offsets_h, offsets_w, out)
return out
else:
raise ValueError("uninterpretable batch format; expected a list "
"of arrays with ndim = 3, or an array with "
"ndim = 4")
def transform_source_example(self, example, source_name):
self.verify_axis_labels(('channel', 'height', 'width'),
self.data_stream.axis_labels[source_name],
source_name)
windowed_height, windowed_width = self.window_shape
if not isinstance(example, numpy.ndarray) or example.ndim != 3:
raise ValueError("uninterpretable example format; expected "
"ndarray with ndim = 3")
image_height, image_width = example.shape[1:]
if image_height < windowed_height or image_width < windowed_width:
raise ValueError("can't obtain ({}, {}) window from image "
"dimensions ({}, {})".format(
windowed_height, windowed_width,
image_height, image_width))
if image_height - windowed_height > 0:
off_h = self.rng.random_integers(0, image_height - windowed_height)
else:
off_h = 0
if image_width - windowed_width > 0:
off_w = self.rng.random_integers(0, image_width - windowed_width)
else:
off_w = 0
return example[:, off_h:off_h + windowed_height,
off_w:off_w + windowed_width]
| mit |
jandom/rdkit | rdkit/Chem/FeatMaps/FeatMapParser.py | 12 | 5307 | # $Id$
#
# Copyright (C) 2006 Greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import Geometry
from rdkit.Chem.FeatMaps import FeatMaps, FeatMapPoint
import re
"""
ScoreMode=All
DirScoreMode=Ignore
BeginParams
family=Aromatic radius=2.5 width=1.0 profile=Gaussian
family=Acceptor radius=1.5
EndParams
# optional
BeginPoints
family=Acceptor pos=(1.0, 0.0, 5.0) weight=1.25 dir=(1, 1, 0)
family=Aromatic pos=(0.0,1.0,0.0) weight=2.0 dir=(0,0,1) dir=(0,0,-1)
family=Acceptor pos=(1.0,1.0,2.0) weight=1.25
EndPoints
"""
class FeatMapParseError(ValueError):
pass
class FeatMapParser(object):
data = None
def __init__(self, file=None, data=None):
if file:
self.data = file.readlines()
elif data:
self.SetData(data)
self._lineNum = 0
def SetData(self, data):
if isinstance(data, str):
self.data = data.split('\n')
else:
self.data = data
self._lineNum = 0
def _NextLine(self):
txt = ''
while 1:
try:
l = self.data[self._lineNum].split('#')[0].strip()
except IndexError:
break
self._lineNum += 1
if l:
txt += l
if l[-1] != '\\':
break
return txt
def Parse(self, featMap=None):
if featMap is None:
featMap = FeatMaps.FeatMap()
l = self._NextLine().strip()
while l:
splitL = l.split('=')
if len(splitL) == 1:
keyword = splitL[0].strip().lower()
if keyword == 'beginpoints':
pts = self.ParseFeatPointBlock()
for pt in pts:
featMap.AddFeatPoint(pt)
elif keyword == 'beginparams':
featMap.params = self.ParseParamBlock()
else:
raise FeatMapParseError('Unrecognized keyword %s on line %d' % (keyword, self._lineNum))
else:
keyword = splitL[0].strip().lower()
val = splitL[1].strip()
if keyword == 'scoremode':
try:
featMap.scoreMode = getattr(FeatMaps.FeatMapScoreMode, val)
except AttributeError:
raise FeatMapParseError('ScoreMode %s not recognized on line %d' % (val, self._lineNum))
elif keyword == 'dirscoremode':
try:
featMap.dirScoreMode = getattr(FeatMaps.FeatDirScoreMode, val)
except AttributeError:
raise FeatMapParseError('DirScoreMode %s not recognized on line %d' %
(val, self._lineNum))
else:
raise FeatMapParseError('Unrecognized keyword %s on line %d' % (keyword, self._lineNum))
l = self._NextLine().strip()
return featMap
def ParseParamBlock(self):
paramLineSplitter = re.compile(r'([a-zA-Z]+) *= *(\S+)')
params = {}
l = self._NextLine()
while l and l != 'EndParams':
param = FeatMaps.FeatMapParams()
vals = paramLineSplitter.findall(l)
for name, val in vals:
name = name.lower()
if name == 'family':
family = val
elif name == 'radius':
param.radius = float(val)
elif name == 'width':
param.width = float(val)
elif name == 'profile':
try:
param.featProfile = getattr(param.FeatProfile, val)
except AttributeError:
raise FeatMapParseError('Profile %s not recognized on line %d' % (val, self._lineNum))
else:
raise FeatMapParseError('FeatMapParam option %s not recognized on line %d' %
(name, self._lineNum))
params[family] = param
l = self._NextLine()
if l != 'EndParams':
raise FeatMapParseError('EndParams line not found')
return params
def _parsePoint(self, txt):
txt = txt.strip()
startP = 0
endP = len(txt)
if txt[0] == '(':
startP += 1
if txt[-1] == ')':
endP -= 1
txt = txt[startP:endP]
splitL = txt.split(',')
if len(splitL) != 3:
raise ValueError('Bad location string')
vs = [float(x) for x in splitL]
pt = Geometry.Point3D(vs[0], vs[1], vs[2])
return pt
def ParseFeatPointBlock(self):
featLineSplitter = re.compile(r'([a-zA-Z]+) *= *')
feats = []
l = self._NextLine()
while l and l != 'EndPoints':
vals = featLineSplitter.split(l)
while vals.count(''):
vals.remove('')
p = FeatMapPoint.FeatMapPoint()
i = 0
while i < len(vals):
name = vals[i].lower()
if name == 'family':
i += 1
val = vals[i].strip()
p.SetFamily(val)
elif name == 'weight':
i += 1
val = float(vals[i])
p.weight = val
elif name == 'pos':
i += 1
val = vals[i]
pos = self._parsePoint(val)
p.SetPos(pos)
elif name == 'dir':
i += 1
val = vals[i]
pos = self._parsePoint(val)
p.featDirs.append(pos)
else:
raise FeatMapParseError('FeatPoint option %s not recognized on line %d' %
(name, self._lineNum))
i += 1
feats.append(p)
l = self._NextLine()
return feats
| bsd-3-clause |
xingyepei/edx-platform | common/lib/capa/capa/safe_exec/lazymod.py | 193 | 1200 | """A module proxy for delayed importing of modules.
From http://barnesc.blogspot.com/2006/06/automatic-python-imports-with-autoimp.html,
in the public domain.
"""
import sys
class LazyModule(object):
"""A lazy module proxy."""
def __init__(self, modname):
self.__dict__['__name__'] = modname
self._set_mod(None)
def _set_mod(self, mod):
if mod is not None:
self.__dict__ = mod.__dict__
self.__dict__['_lazymod_mod'] = mod
def _load_mod(self):
__import__(self.__name__)
self._set_mod(sys.modules[self.__name__])
def __getattr__(self, name):
if self.__dict__['_lazymod_mod'] is None:
self._load_mod()
mod = self.__dict__['_lazymod_mod']
if hasattr(mod, name):
return getattr(mod, name)
else:
try:
subname = '%s.%s' % (self.__name__, name)
__import__(subname)
submod = getattr(mod, name)
except ImportError:
raise AttributeError("'module' object has no attribute %r" % name)
self.__dict__[name] = LazyModule(subname)
return self.__dict__[name]
| agpl-3.0 |
nycholas/ask-undrgz | src/ask-undrgz/django/contrib/gis/tests/geoapp/feeds.py | 326 | 1856 | from django.contrib.gis import feeds
from django.contrib.gis.tests.utils import mysql
from models import City, Country
class TestGeoRSS1(feeds.Feed):
link = '/city/'
title = 'Test GeoDjango Cities'
def items(self):
return City.objects.all()
def item_link(self, item):
return '/city/%s/' % item.pk
def item_geometry(self, item):
return item.point
class TestGeoRSS2(TestGeoRSS1):
def geometry(self, obj):
# This should attach a <georss:box> element for the extent of
# of the cities in the database. This tuple came from
# calling `City.objects.extent()` -- we can't do that call here
# because `extent` is not implemented for MySQL/Oracle.
return (-123.30, -41.32, 174.78, 48.46)
def item_geometry(self, item):
# Returning a simple tuple for the geometry.
return item.point.x, item.point.y
class TestGeoAtom1(TestGeoRSS1):
feed_type = feeds.GeoAtom1Feed
class TestGeoAtom2(TestGeoRSS2):
feed_type = feeds.GeoAtom1Feed
def geometry(self, obj):
# This time we'll use a 2-tuple of coordinates for the box.
return ((-123.30, -41.32), (174.78, 48.46))
class TestW3CGeo1(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
# The following feeds are invalid, and will raise exceptions.
class TestW3CGeo2(TestGeoRSS2):
feed_type = feeds.W3CGeoFeed
class TestW3CGeo3(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
def item_geometry(self, item):
from django.contrib.gis.geos import Polygon
return Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
# The feed dictionary to use for URLs.
feed_dict = {
'rss1' : TestGeoRSS1,
'rss2' : TestGeoRSS2,
'atom1' : TestGeoAtom1,
'atom2' : TestGeoAtom2,
'w3cgeo1' : TestW3CGeo1,
'w3cgeo2' : TestW3CGeo2,
'w3cgeo3' : TestW3CGeo3,
}
| bsd-3-clause |
ammiranda/python_koans | python3/libs/colorama/win32.py | 86 | 2730 |
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort
)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def GetConsoleScreenBufferInfo(stream_id):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
# This fails when imported via setup.py when installing using 'pip'
# presumably the fix is that running setup.py should not trigger all
# this activity.
# assert success
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
success = windll.kernel32.SetConsoleTextAttribute(handle, attrs)
assert success
def SetConsoleCursorPosition(stream_id, position):
handle = handles[stream_id]
position = COORD(*position)
success = windll.kernel32.SetConsoleCursorPosition(handle, position)
assert success
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = TCHAR(char)
length = DWORD(length)
start = COORD(*start)
num_written = DWORD(0)
# AttributeError: function 'FillConsoleOutputCharacter' not found
# could it just be that my types are wrong?
success = windll.kernel32.FillConsoleOutputCharacter(
handle, char, length, start, byref(num_written))
assert success
return num_written.value
if __name__=='__main__':
x = GetConsoleScreenBufferInfo(STDOUT)
print(x.dwSize)
print(x.dwCursorPosition)
print(x.wAttributes)
print(x.srWindow)
print(x.dwMaximumWindowSize)
| mit |
DimensionDataCBUSydney/libcloud | libcloud/test/test_file_fixtures.py | 50 | 1229 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.test.file_fixtures import ComputeFileFixtures
class FileFixturesTests(unittest.TestCase):
def test_success(self):
f = ComputeFileFixtures('meta')
self.assertEqual("Hello, World!", f.load('helloworld.txt'))
def test_failure(self):
f = ComputeFileFixtures('meta')
self.assertRaises(IOError, f.load, 'nil')
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
jazkarta/edx-platform-for-isc | cms/djangoapps/contentstore/management/commands/git_export.py | 18 | 2804 | """
This command exports a course from CMS to a git repository.
It takes as arguments the course id to export (i.e MITx/999/2020 ) and
the repository to commit too. It takes username as an option for identifying
the commit, as well as a directory path to place the git repository.
By default it will use settings.GIT_REPO_EXPORT_DIR/repo_name as the cloned
directory. It is branch aware, but will reset all local changes to the
repository before attempting to export the XML, add, and commit changes if
any have taken place.
This functionality is also available as an export view in studio if the giturl
attribute is set and the FEATURE['ENABLE_EXPORT_GIT'] is set.
"""
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _
import contentstore.git_export_utils as git_export_utils
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys import InvalidKeyError
from contentstore.git_export_utils import GitExportError
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Take a course from studio and export it to a git repository.
"""
option_list = BaseCommand.option_list + (
make_option('--username', '-u', dest='user',
help=('Specify a username from LMS/Studio to be used '
'as the commit author.')),
make_option('--repo_dir', '-r', dest='repo',
help='Specify existing git repo directory.'),
)
help = _('Take the specified course and attempt to '
'export it to a git repository\n. Course directory '
'must already be a git repository. Usage: '
' git_export <course_loc> <git_url>')
def handle(self, *args, **options):
"""
Checks arguments and runs export function if they are good
"""
if len(args) != 2:
raise CommandError('This script requires exactly two arguments: '
'course_loc and git_url')
# Rethrow GitExportError as CommandError for SystemExit
try:
course_key = CourseKey.from_string(args[0])
except InvalidKeyError:
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0])
except InvalidKeyError:
raise CommandError(_(GitExportError.BAD_COURSE))
try:
git_export_utils.export_to_git(
course_key,
args[1],
options.get('user', ''),
options.get('rdir', None)
)
except git_export_utils.GitExportError as ex:
raise CommandError(_(ex.message))
| agpl-3.0 |
mrshelly/openerp71313 | openerp/addons/mail/tests/test_mail_message.py | 1 | 23446 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.mail.tests.test_mail_base import TestMailBase
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class test_mail_access_rights(TestMailBase):
def setUp(self):
super(test_mail_access_rights, self).setUp()
cr, uid = self.cr, self.uid
# Test mail.group: public to provide access to everyone
self.group_jobs_id = self.mail_group.create(cr, uid, {'name': 'Jobs', 'public': 'public'})
# Test mail.group: private to restrict access
self.group_priv_id = self.mail_group.create(cr, uid, {'name': 'Private', 'public': 'private'})
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_00_mail_group_access_rights(self):
""" Testing mail_group access rights and basic mail_thread features """
cr, uid, user_bert_id, user_raoul_id = self.cr, self.uid, self.user_bert_id, self.user_raoul_id
# Do: Bert reads Jobs -> ok, public
self.mail_group.read(cr, user_bert_id, [self.group_jobs_id])
# Do: Bert read Pigs -> ko, restricted to employees
self.assertRaises(except_orm, self.mail_group.read,
cr, user_bert_id, [self.group_pigs_id])
# Do: Raoul read Pigs -> ok, belong to employees
self.mail_group.read(cr, user_raoul_id, [self.group_pigs_id])
# Do: Bert creates a group -> ko, no access rights
self.assertRaises(except_orm, self.mail_group.create,
cr, user_bert_id, {'name': 'Test'})
# Do: Raoul creates a restricted group -> ok
new_group_id = self.mail_group.create(cr, user_raoul_id, {'name': 'Test'})
# Do: Bert added in followers, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [new_group_id], [user_bert_id])
self.mail_group.read(cr, user_bert_id, [new_group_id])
# Do: Raoul reads Priv -> ko, private
self.assertRaises(except_orm, self.mail_group.read,
cr, user_raoul_id, [self.group_priv_id])
# Do: Raoul added in follower, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [self.group_priv_id], [user_raoul_id])
self.mail_group.read(cr, user_raoul_id, [self.group_priv_id])
# Do: Raoul write on Jobs -> ok
self.mail_group.write(cr, user_raoul_id, [self.group_priv_id], {'name': 'modified'})
# Do: Bert cannot write on Private -> ko (read but no write)
self.assertRaises(except_orm, self.mail_group.write,
cr, user_bert_id, [self.group_priv_id], {'name': 're-modified'})
# Test: Bert cannot unlink the group
self.assertRaises(except_orm,
self.mail_group.unlink,
cr, user_bert_id, [self.group_priv_id])
# Do: Raoul unlinks the group, there are no followers and messages left
self.mail_group.unlink(cr, user_raoul_id, [self.group_priv_id])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(fol_ids, 'unlinked document should not have any followers left')
msg_ids = self.mail_message.search(cr, uid, [('model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(msg_ids, 'unlinked document should not have any followers left')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_10_mail_message_search_access_rights(self):
""" Testing mail_message.search() using specific _search implementation """
cr, uid, group_pigs_id = self.cr, self.uid, self.group_pigs_id
# Data: comment subtype for mail.message creation
ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'mail', 'mt_comment')
subtype_id = ref and ref[1] or False
# Data: Birds group, private
group_birds_id = self.mail_group.create(self.cr, self.uid, {'name': 'Birds', 'public': 'private'})
# Data: Raoul is member of Pigs
self.mail_group.message_subscribe(cr, uid, [group_pigs_id], [self.partner_raoul_id])
# Data: various author_ids, partner_ids, documents
msg_id1 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A', 'subtype_id': subtype_id})
msg_id2 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B', 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})
msg_id3 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'subtype_id': subtype_id})
msg_id4 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+B Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_bert_id])], 'subtype_id': subtype_id})
msg_id5 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A+R Pigs', 'model': 'mail.group', 'res_id': group_pigs_id, 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})
msg_id6 = self.mail_message.create(cr, uid, {'subject': '_Test', 'body': 'A Birds', 'model': 'mail.group', 'res_id': group_birds_id, 'subtype_id': subtype_id})
msg_id7 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B', 'subtype_id': subtype_id})
msg_id8 = self.mail_message.create(cr, self.user_raoul_id, {'subject': '_Test', 'body': 'B+R', 'partner_ids': [(6, 0, [self.partner_raoul_id])], 'subtype_id': subtype_id})
# Test: Bert: 2 messages that have Bert in partner_ids
msg_ids = self.mail_message.search(cr, self.user_bert_id, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id2, msg_id4]), set(msg_ids), 'mail_message search failed')
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group)
msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test'), ('body', 'like', 'A')])
self.assertEqual(set([msg_id3, msg_id4, msg_id5]), set(msg_ids), 'mail_message search failed')
# Test: Raoul: 3 messages on Pigs Raoul can read (employee can read group with default values), 0 on Birds (private group) + 2 messages as author
msg_ids = self.mail_message.search(cr, self.user_raoul_id, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id3, msg_id4, msg_id5, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')
# Test: Admin: all messages
msg_ids = self.mail_message.search(cr, uid, [('subject', 'like', '_Test')])
self.assertEqual(set([msg_id1, msg_id2, msg_id3, msg_id4, msg_id5, msg_id6, msg_id7, msg_id8]), set(msg_ids), 'mail_message search failed')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_15_mail_message_check_access_rule(self):
""" Testing mail_message.check_access_rule() """
cr, uid = self.cr, self.uid
partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id
user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id
# Prepare groups: Pigs (employee), Jobs (public)
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message')
priv_msg_id = self.mail_group.message_post(cr, uid, self.group_priv_id, body='Message')
# prepare an attachment
attachment_id = self.ir_attachment.create(cr, uid, {'datas': 'My attachment'.encode('base64'), 'name': 'doc.txt', 'datas_fname': 'doc.txt'})
# ----------------------------------------
# CASE1: read
# ----------------------------------------
# Do: create a new mail.message
message_id = self.mail_message.create(cr, uid, {'body': 'My Body', 'attachment_ids': [(4, attachment_id)]})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, user_bert_id, message_id)
# Do: message is pushed to Bert
notif_id = self.mail_notification.create(cr, uid, {'message_id': message_id, 'partner_id': partner_bert_id})
# Test: Bert reads the message, ok because notification pushed
self.mail_message.read(cr, user_bert_id, message_id)
# Test: Bert downloads attachment, ok because he can read message
self.mail_message.download_attachment(cr, user_bert_id, message_id, attachment_id)
# Do: remove notification
self.mail_notification.unlink(cr, uid, notif_id)
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, self.user_bert_id, message_id)
# Test: Bert downloads attachment, crash because he can't read message
self.assertRaises(except_orm, self.mail_message.download_attachment,
cr, user_bert_id, message_id, attachment_id)
# Do: Bert is now the author
self.mail_message.write(cr, uid, [message_id], {'author_id': partner_bert_id})
# Test: Bert reads the message, ok because Bert is the author
self.mail_message.read(cr, user_bert_id, message_id)
# Do: Bert is not the author anymore
self.mail_message.write(cr, uid, [message_id], {'author_id': partner_raoul_id})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, user_bert_id, message_id)
# Do: message is attached to a document Bert can read, Jobs
self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_jobs_id})
# Test: Bert reads the message, ok because linked to a doc he is allowed to read
self.mail_message.read(cr, user_bert_id, message_id)
# Do: message is attached to a document Bert cannot read, Pigs
self.mail_message.write(cr, uid, [message_id], {'model': 'mail.group', 'res_id': self.group_pigs_id})
# Test: Bert reads the message, crash because not notification/not in doc followers/not read on doc
self.assertRaises(except_orm, self.mail_message.read,
cr, user_bert_id, message_id)
# ----------------------------------------
# CASE2: create
# ----------------------------------------
# Do: Bert creates a message on Pigs -> ko, no creation rights
self.assertRaises(except_orm, self.mail_message.create,
cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_pigs_id, 'body': 'Test'})
# Do: Bert create a message on Jobs -> ko, no creation rights
self.assertRaises(except_orm, self.mail_message.create,
cr, user_bert_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})
# Do: Bert create a private message -> ko, no creation rights
self.assertRaises(except_orm, self.mail_message.create,
cr, user_bert_id, {'body': 'Test'})
# Do: Raoul creates a message on Jobs -> ok, write access to the related document
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_jobs_id, 'body': 'Test'})
# Do: Raoul creates a message on Priv -> ko, no write access to the related document
self.assertRaises(except_orm, self.mail_message.create,
cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test'})
# Do: Raoul creates a private message -> ok
self.mail_message.create(cr, user_raoul_id, {'body': 'Test'})
# Do: Raoul creates a reply to a message on Priv -> ko
self.assertRaises(except_orm, self.mail_message.create,
cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})
# Do: Raoul creates a reply to a message on Priv-> ok if has received parent
self.mail_notification.create(cr, uid, {'message_id': priv_msg_id, 'partner_id': self.partner_raoul_id})
self.mail_message.create(cr, user_raoul_id, {'model': 'mail.group', 'res_id': self.group_priv_id, 'body': 'Test', 'parent_id': priv_msg_id})
def test_20_message_set_star(self):
""" Tests for starring messages and its related access rights """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin stars msg
self.mail_message.set_message_starred(cr, uid, [msg.id], True)
msg.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')
# Test: notification starred
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.starred, 'mail_notification starred failed')
self.assertTrue(msg.starred, 'mail_message starred failed')
# Do: Raoul stars msg
self.mail_message.set_message_starred(cr, self.user_raoul_id, [msg.id], True)
msg_raoul.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_starred: more than one notification created')
# Test: notification starred
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.starred, 'mail_notification starred failed')
self.assertTrue(msg_raoul.starred, 'mail_message starred failed')
# Do: Admin unstars msg
self.mail_message.set_message_starred(cr, uid, [msg.id], False)
msg.refresh()
msg_raoul.refresh()
# Test: msg unstarred for Admin, starred for Raoul
self.assertFalse(msg.starred, 'mail_message starred failed')
self.assertTrue(msg_raoul.starred, 'mail_message starred failed')
def test_30_message_set_read(self):
""" Tests for reading messages and its related access rights """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin reads msg
self.mail_message.set_message_read(cr, uid, [msg.id], True)
msg.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_admin_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')
# Test: notification read
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.read, 'mail_notification read failed')
self.assertFalse(msg.to_read, 'mail_message read failed')
# Do: Raoul reads msg
self.mail_message.set_message_read(cr, self.user_raoul_id, [msg.id], True)
msg_raoul.refresh()
# Test: notification exists
notif_ids = self.mail_notification.search(cr, uid, [('partner_id', '=', self.partner_raoul_id), ('message_id', '=', msg.id)])
self.assertEqual(len(notif_ids), 1, 'mail_message set_message_read: more than one notification created')
# Test: notification read
notif = self.mail_notification.browse(cr, uid, notif_ids[0])
self.assertTrue(notif.read, 'mail_notification starred failed')
self.assertFalse(msg_raoul.to_read, 'mail_message starred failed')
# Do: Admin unreads msg
self.mail_message.set_message_read(cr, uid, [msg.id], False)
msg.refresh()
msg_raoul.refresh()
# Test: msg unread for Admin, read for Raoul
self.assertTrue(msg.to_read, 'mail_message read failed')
self.assertFalse(msg_raoul.to_read, 'mail_message read failed')
def test_40_message_vote(self):
""" Test designed for the vote/unvote feature. """
cr, uid = self.cr, self.uid
# Data: post a message on Pigs
msg_id = self.group_pigs.message_post(body='My Body', subject='1')
msg = self.mail_message.browse(cr, uid, msg_id)
msg_raoul = self.mail_message.browse(cr, self.user_raoul_id, msg_id)
# Do: Admin vote for msg
self.mail_message.vote_toggle(cr, uid, [msg.id])
msg.refresh()
# Test: msg has Admin as voter
self.assertEqual(set(msg.vote_user_ids), set([self.user_admin]), 'mail_message vote: after voting, Admin should be in the voter')
# Do: Bert vote for msg
self.mail_message.vote_toggle(cr, self.user_raoul_id, [msg.id])
msg_raoul.refresh()
# Test: msg has Admin and Bert as voters
self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_admin, self.user_raoul]), 'mail_message vote: after voting, Admin and Bert should be in the voters')
# Do: Admin unvote for msg
self.mail_message.vote_toggle(cr, uid, [msg.id])
msg.refresh()
msg_raoul.refresh()
# Test: msg has Bert as voter
self.assertEqual(set(msg.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')
self.assertEqual(set(msg_raoul.vote_user_ids), set([self.user_raoul]), 'mail_message vote: after unvoting, Bert should be in the voter')
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.osv.orm')
def test_50_mail_flow_access_rights(self):
""" Test a Chatter-looks alike flow to test access rights """
cr, uid = self.cr, self.uid
mail_compose = self.registry('mail.compose.message')
partner_bert_id, partner_raoul_id = self.partner_bert_id, self.partner_raoul_id
user_bert_id, user_raoul_id = self.user_bert_id, self.user_raoul_id
# Prepare groups: Pigs (employee), Jobs (public)
pigs_msg_id = self.mail_group.message_post(cr, uid, self.group_pigs_id, body='Message', partner_ids=[(4, self.partner_admin_id)])
jobs_msg_id = self.mail_group.message_post(cr, uid, self.group_jobs_id, body='Message', partner_ids=[(4, self.partner_admin_id)])
# ----------------------------------------
# CASE1: Bert, without groups
# ----------------------------------------
# Do: Bert reads Jobs basic fields, ok because public = read access on the group
self.mail_group.read(cr, user_bert_id, self.group_jobs_id, ['name', 'description'])
# Do: Bert reads Jobs messages, ok because read access on the group => read access on its messages
jobs_message_ids = self.mail_group.read(cr, user_bert_id, self.group_jobs_id, ['message_ids'])['message_ids']
self.mail_message.read(cr, user_bert_id, jobs_message_ids)
# Do: Bert browses Jobs, ok (no direct browse of partners), ok for messages, ko for followers (accessible to employees or partner manager)
bert_jobs = self.mail_group.browse(cr, user_bert_id, self.group_jobs_id)
trigger_read = bert_jobs.name
for message in bert_jobs.message_ids:
trigger_read = message.subject
for partner in bert_jobs.message_follower_ids:
with self.assertRaises(except_orm):
trigger_read = partner.name
# Do: Bert comments Jobs, ko because no creation right
self.assertRaises(except_orm,
self.mail_group.message_post,
cr, user_bert_id, self.group_jobs_id, body='I love Pigs')
# Do: Bert writes on its own profile, ko because no message create access
with self.assertRaises(except_orm):
self.res_users.message_post(cr, user_bert_id, user_bert_id, body='I love Bert')
self.res_partner.message_post(cr, user_bert_id, partner_bert_id, body='I love Bert')
# ----------------------------------------
# CASE2: Raoul, employee
# ----------------------------------------
# Do: Raoul browses Jobs -> ok, ok for message_ids, of for message_follower_ids
raoul_jobs = self.mail_group.browse(cr, user_raoul_id, self.group_jobs_id)
trigger_read = raoul_jobs.name
for message in raoul_jobs.message_ids:
trigger_read = message.subject
for partner in raoul_jobs.message_follower_ids:
trigger_read = partner.name
# Do: Raoul comments Jobs, ok
self.mail_group.message_post(cr, user_raoul_id, self.group_jobs_id, body='I love Pigs')
# Do: Raoul create a mail.compose.message record on Jobs, because he uses the wizard
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},
{'default_composition_mode': 'comment', 'default_model': 'mail.group', 'default_res_id': self.group_jobs_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
# Do: Raoul replies to a Jobs message using the composer
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text'},
{'default_composition_mode': 'reply', 'default_parent_id': pigs_msg_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
# Do: Raoul writes on its own profile, ok because follower of its partner
self.res_users.message_post(cr, user_raoul_id, user_raoul_id, body='I love Raoul')
self.res_partner.message_post(cr, user_raoul_id, partner_raoul_id, body='I love Raoul')
compose_id = mail_compose.create(cr, user_raoul_id,
{'subject': 'Subject', 'body': 'Body text', 'partner_ids': []},
{'default_composition_mode': 'comment', 'default_model': 'res.users', 'default_res_id': self.user_raoul_id})
mail_compose.send_mail(cr, user_raoul_id, [compose_id])
| agpl-3.0 |
inflector/singnet | agent/sn_agent/api/poller.py | 6 | 1373 | import asyncio
import datetime
import logging
from contextlib import suppress
from aiohttp import web
logger = logging.getLogger(__file__)
class Periodic:
def __init__(self, func, time):
self.func = func
self.time = time
self.is_started = False
self._task = None
async def start(self):
logger.debug('Starting periodic task')
if not self.is_started:
self.is_started = True
# Start task to call func periodically:
self._task = asyncio.ensure_future(self._run())
async def stop(self):
logger.debug('Stopping periodic task')
if self.is_started:
self.is_started = False
# Stop task and await it stopped:
self._task.cancel()
with suppress(asyncio.CancelledError):
await self._task
async def _run(self):
while True:
await asyncio.sleep(self.time)
self.func()
def task_to_run():
print('Periodic Task: %s' % datetime.datetime.now())
async def startup(app: web.Application):
poller = Periodic(task_to_run, 5)
await poller.start()
app['eth_client_poller'] = poller
async def cleanup(app: web.Application):
await app['eth_client_poller'].stop()
def setup_poller(app):
app.on_startup.append(startup)
app.on_cleanup.append(cleanup)
| mit |
echodaemon/Empire | lib/common/messages.py | 8 | 16901 | """
Common terminal messages used across Empire.
Titles, agent displays, listener displays, etc.
"""
import os
import time
import textwrap
# Empire imports
import helpers
###############################################################
#
# Messages
#
###############################################################
def title(version):
"""
Print the tool title, with version.
"""
os.system('clear')
print "================================================================"
# print ' [Empire] PowerShell/Python post-exploitation framework'
print " [Empire] Post-Exploitation Framework"
print '================================================================'
print " [Version] %s | [Web] https://github.com/empireProject/Empire" % (version)
print '================================================================'
print """
_______ .___ ___. .______ __ .______ _______
| ____|| \/ | | _ \ | | | _ \ | ____|
| |__ | \ / | | |_) | | | | |_) | | |__
| __| | |\/| | | ___/ | | | / | __|
| |____ | | | | | | | | | |\ \----.| |____
|_______||__| |__| | _| |__| | _| `._____||_______|
"""
def loading():
"""
Print and ascii loading screen.
"""
print """
`````````
``````.--::///+
````-+sydmmmNNNNNNN
``./ymmNNNNNNNNNNNNNN
``-ymmNNNNNNNNNNNNNNNNN
```ommmmNNNNNNNNNNNNNNNNN
``.ydmNNNNNNNNNNNNNNNNNNNN
```odmmNNNNNNNNNNNNNNNNNNNN
```/hmmmNNNNNNNNNNNNNNNNMNNN
````+hmmmNNNNNNNNNNNNNNNNNMMN
````..ymmmNNNNNNNNNNNNNNNNNNNN
````:.+so+//:---.......----::-
`````.`````````....----:///++++
``````.-/osy+////:::---...-dNNNN
````:sdyyydy` ```:mNNNNM
````-hmmdhdmm:` ``.+hNNNNNNM
```.odNNmdmmNNo````.:+yNNNNNNNNNN
```-sNNNmdh/dNNhhdNNNNNNNNNNNNNNN
```-hNNNmNo::mNNNNNNNNNNNNNNNNNNN
```-hNNmdNo--/dNNNNNNNNNNNNNNNNNN
````:dNmmdmd-:+NNNNNNNNNNNNNNNNNNm
```/hNNmmddmd+mNNNNNNNNNNNNNNds++o
``/dNNNNNmmmmmmmNNNNNNNNNNNmdoosydd
`sNNNNdyydNNNNmmmmmmNNNNNmyoymNNNNN
:NNmmmdso++dNNNNmmNNNNNdhymNNNNNNNN
-NmdmmNNdsyohNNNNmmNNNNNNNNNNNNNNNN
`sdhmmNNNNdyhdNNNNNNNNNNNNNNNNNNNNN
/yhmNNmmNNNNNNNNNNNNNNNNNNNNNNmhh
`+yhmmNNNNNNNNNNNNNNNNNNNNNNmh+:
`./dmmmmNNNNNNNNNNNNNNNNmmd.
`ommmmmNNNNNNNmNmNNNNmmd:
:dmmmmNNNNNmh../oyhhhy:
`sdmmmmNNNmmh/++-.+oh.
`/dmmmmmmmmdo-:/ossd:
`/ohhdmmmmmmdddddmh/
`-/osyhdddddhyo:
``.----.`
Welcome to the Empire"""
time.sleep(3)
os.system('clear')
def wrap_string(data, width=40, indent=32, indentAll=False, followingHeader=None):
"""
Print a option description message in a nicely
wrapped and formatted paragraph.
followingHeader -> text that also goes on the first line
"""
data = str(data)
if len(data) > width:
lines = textwrap.wrap(textwrap.dedent(data).strip(), width=width)
if indentAll:
returnString = ' ' * indent + lines[0]
if followingHeader:
returnString += " " + followingHeader
else:
returnString = lines[0]
if followingHeader:
returnString += " " + followingHeader
i = 1
while i < len(lines):
returnString += "\n" + ' ' * indent + (lines[i]).strip()
i += 1
return returnString
else:
return data.strip()
def wrap_columns(col1, col2, width1=24, width2=40, indent=31):
"""
Takes two strings of text and turns them into nicely formatted column output.
Used by display_module()
"""
lines1 = textwrap.wrap(textwrap.dedent(col1).strip(), width=width1)
lines2 = textwrap.wrap(textwrap.dedent(col2).strip(), width=width2)
result = ''
limit = max(len(lines1), len(lines2))
for x in xrange(limit):
if x < len(lines1):
if x != 0:
result += ' ' * indent
result += '{line: <0{width}s}'.format(width=width1, line=lines1[x])
else:
if x == 0:
result += ' ' * width1
else:
result += ' ' * (indent + width1)
if x < len(lines2):
result += ' ' + '{line: <0{width}s}'.format(width=width2, line=lines2[x])
if x != limit-1:
result += "\n"
return result
def display_options(options, color=True):
"""
Take a dictionary and display it nicely.
"""
for key in options:
if color:
print "\t%s\t%s" % (helpers.color('{0: <16}'.format(key), "green"), wrap_string(options[key]))
else:
print "\t%s\t%s" % ('{0: <16}'.format(key), wrap_string(options[key]))
def display_agents(agents):
"""
Take a dictionary of agents and build the display for the main menu.
"""
if len(agents) > 0:
print ''
print helpers.color("[*] Active agents:\n")
print " Name Lang Internal IP Machine Name Username Process Delay Last Seen"
print " --------- ---- ----------- ------------ --------- ------- ----- --------------------"
for agent in agents:
if str(agent['high_integrity']) == '1':
# add a * to the username if it's high integrity
agent['username'] = '*' + str(agent['username'])
if not agent['language'] or agent['language'] == '':
agent['language'] = 'X'
elif agent['language'].lower() == 'powershell':
agent['language'] = 'ps'
elif agent['language'].lower() == 'python':
agent['language'] = 'py'
else:
agent['language'] = 'X'
print " %.16s%.6s%.16s%.16s%.20s%.20s%.9s%.20s" % ('{0: <16}'.format(agent['name']), '{0: <6}'.format(agent['language']), '{0: <16}'.format(agent['internal_ip']), '{0: <16}'.format(agent['hostname']), '{0: <20}'.format(agent['username']), '{0: <20}'.format(str(agent['process_name']) + "/" + str(agent['process_id'])), '{0: <9}'.format(str(agent['delay']) + "/" +str(agent['jitter'])), agent['lastseen_time'])
print ''
else:
print helpers.color('[!] No agents currently registered')
def display_agent(agent, returnAsString=False):
"""
Display an agent all nice-like.
Takes in the tuple of the raw agent database results.
"""
if returnAsString:
agentString = "\n[*] Agent info:\n"
for key, value in agent.iteritems():
if key != 'functions' and key != 'takings' and key != 'results':
agentString += " %s\t%s\n" % ('{0: <16}'.format(key), wrap_string(value, width=70))
return agentString + '\n'
else:
print helpers.color("\n[*] Agent info:\n")
for key, value in agent.iteritems():
if key != 'functions' and key != 'takings' and key != 'results':
print "\t%s\t%s" % (helpers.color('{0: <16}'.format(key), "blue"), wrap_string(value, width=70))
print ''
def display_active_listeners(listeners):
"""
Take an active listeners list and display everything nicely.
"""
if len(listeners) > 0:
print ''
print helpers.color("[*] Active listeners:\n")
print " Name Module Host Delay/Jitter KillDate"
print " ---- ------ ---- ------------ --------"
for listenerName, listener in listeners.iteritems():
moduleName = listener['moduleName']
if 'Host' in listener['options']:
host = listener['options']['Host']['Value']
else:
host = ''
if 'DefaultDelay' in listener['options']:
defaultDelay = listener['options']['DefaultDelay']['Value']
else:
defaultDelay = 'n/a'
if 'DefaultJitter' in listener['options']:
defaultJitter = listener['options']['DefaultJitter']['Value']
else:
defaultJitter = ''
if defaultDelay == 'n/a':
connectInterval = 'n/a'
else:
connectInterval = "%s/%s" % (defaultDelay, defaultJitter)
if 'KillDate' in listener['options']:
killDate = listener['options']['KillDate']['Value']
else:
killDate = 'n/a'
print " %s%s%s%s%s" % ('{0: <18}'.format(listenerName), '{0: <16}'.format(moduleName), '{0: <37}'.format(host), '{0: <15}'.format(connectInterval), '{0: <12}'.format(killDate))
print ''
else:
print helpers.color("[!] No listeners currently active ")
def display_active_listener(listener):
"""
Displays an active listener's information structure.
"""
print "\n%s Options:\n" % (listener['options']['Name']['Value'])
print " Name Required Value Description"
print " ---- -------- ------- -----------"
for option, values in listener['options'].iteritems():
# if there's a long value length, wrap it
if len(str(values['Value'])) > 33:
print " %s%s%s" % ('{0: <18}'.format(option), '{0: <12}'.format(("True" if values['Required'] else "False")), '{0: <33}'.format(wrap_string(values['Value'], width=32, indent=32, followingHeader=values['Description'])))
else:
print " %s%s%s%s" % ('{0: <18}'.format(option), '{0: <12}'.format(("True" if values['Required'] else "False")), '{0: <33}'.format(values['Value']), values['Description'])
print "\n"
def display_listener_module(listener):
"""
Displays a listener module's information structure.
"""
print '\n{0: >10}'.format("Name: ") + str(listener.info['Name'])
print '{0: >10}'.format("Category: ") + str(listener.info['Category'])
print "\nAuthors:"
for author in listener.info['Author']:
print " " +author
print "\nDescription:"
desc = wrap_string(listener.info['Description'], width=60, indent=2, indentAll=True)
if len(desc.splitlines()) == 1:
print " " + str(desc)
else:
print desc
if 'Comments' in listener.info:
comments = listener.info['Comments']
if isinstance(comments, list):
comments = ' '.join(comments)
if comments.strip() != '':
print "\nComments:"
if isinstance(comments, list):
comments = ' '.join(comments)
comment = wrap_string(comments, width=60, indent=2, indentAll=True)
if len(comment.splitlines()) == 1:
print " " + str(comment)
else:
print comment
print "\n%s Options:\n" % (listener.info['Name'])
print " Name Required Value Description"
print " ---- -------- ------- -----------"
for option, values in listener.options.iteritems():
# if there's a long value length, wrap it
if len(str(values['Value'])) > 33:
print " %s%s%s" % ('{0: <18}'.format(option), '{0: <12}'.format(("True" if values['Required'] else "False")), '{0: <33}'.format(wrap_string(values['Value'], width=32, indent=32, followingHeader=values['Description'])))
else:
print " %s%s%s%s" % ('{0: <18}'.format(option), '{0: <12}'.format(("True" if values['Required'] else "False")), '{0: <33}'.format(values['Value']), values['Description'])
print "\n"
def display_stager(stager):
"""
Displays a stager's information structure.
"""
print "\nName: " + stager.info['Name']
print "\nDescription:"
desc = wrap_string(stager.info['Description'], width=50, indent=2, indentAll=True)
if len(desc.splitlines()) == 1:
print " " + str(desc)
else:
print desc
# print out any options, if present
if stager.options:
print "\nOptions:\n"
print " Name Required Value Description"
print " ---- -------- ------- -----------"
for option, values in stager.options.iteritems():
print " %s%s%s%s" % ('{0: <17}'.format(option), '{0: <12}'.format(("True" if values['Required'] else "False")), '{0: <18}'.format(values['Value']), wrap_string(values['Description'], indent=49))
print "\n"
def display_module(moduleName, module):
"""
Displays a module's information structure.
"""
print '\n{0: >20}'.format("Name: ") + str(module.info['Name'])
print '{0: >20}'.format("Module: ") + str(moduleName)
if 'NeedsAdmin' in module.info:
print '{0: >20}'.format("NeedsAdmin: ") + ("True" if module.info['NeedsAdmin'] else "False")
if 'OpsecSafe' in module.info:
print '{0: >20}'.format("OpsecSafe: ") + ("True" if module.info['OpsecSafe'] else "False")
if 'Language' in module.info:
print '{0: >20}'.format("Language: ") + str(module.info['Language'])
if 'MinLanguageVersion' in module.info:
print '{0: >20}'.format("MinLanguageVersion: ") + str(module.info['MinLanguageVersion'])
if 'Background' in module.info:
print '{0: >20}'.format("Background: ") + ("True" if module.info['Background'] else "False")
if 'OutputExtension' in module.info:
print '{0: >20}'.format("OutputExtension: ") + (str(module.info['OutputExtension']) if module.info['OutputExtension'] else "None")
print "\nAuthors:"
for author in module.info['Author']:
print " " +author
print "\nDescription:"
desc = wrap_string(module.info['Description'], width=60, indent=2, indentAll=True)
if len(desc.splitlines()) == 1:
print " " + str(desc)
else:
print desc
if 'Comments' in module.info:
comments = module.info['Comments']
if isinstance(comments, list):
comments = ' '.join(comments)
if comments.strip() != '':
print "\nComments:"
if isinstance(comments, list):
comments = ' '.join(comments)
comment = wrap_string(comments, width=60, indent=2, indentAll=True)
if len(comment.splitlines()) == 1:
print " " + str(comment)
else:
print comment
# print out any options, if present
if module.options:
# get the size for the first column
maxNameLen = len(max(module.options.keys(), key=len))
print "\nOptions:\n"
print " %sRequired Value Description" %('{:<{}s}'.format("Name", maxNameLen+1))
print " %s-------- ------- -----------" %('{:<{}s}'.format("----", maxNameLen+1))
for option, values in module.options.iteritems():
print " %s%s%s" % ('{:<{}s}'.format(str(option), maxNameLen+1), '{0: <12}'.format(("True" if values['Required'] else "False")), wrap_columns(str(values['Value']), str(values['Description']), indent=(31 + (maxNameLen-16))))
print ''
def display_module_search(moduleName, module):
"""
Displays the name/description of a module for search results.
"""
# Suffix modules requring elevated context with '*'
if module.info['NeedsAdmin']:
print " %s*\n" % (helpers.color(moduleName, 'blue'))
else:
print " %s\n" % (helpers.color(moduleName, 'blue'))
# width=40, indent=32, indentAll=False,
lines = textwrap.wrap(textwrap.dedent(module.info['Description']).strip(), width=70)
for line in lines:
print "\t" + line
print "\n"
def display_credentials(creds):
"""
Take a credential array and display everything nicely.
"""
print helpers.color("\nCredentials:\n", "blue")
print " CredID CredType Domain UserName Host Password"
print " ------ -------- ------ -------- ---- --------"
for cred in creds:
# (id, credtype, domain, username, password, host, notes, sid)
credID = cred[0]
credType = cred[1]
domain = cred[2]
username = cred[3]
password = cred[4]
host = cred[5]
print " %s%s%s%s%s%s" % ('{0: <8}'.format(credID), '{0: <11}'.format(credType), '{0: <25}'.format(domain), '{0: <17}'.format(username), '{0: <17}'.format(host), password)
print ''
| bsd-3-clause |
augustozuniga/arisgames | zxing-master/cpp/scons/scons-local-2.0.0.final.0/SCons/compat/_scons_collections.py | 34 | 1869 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
collections compatibility module for older (pre-2.4) Python versions
This does not not NOT (repeat, *NOT*) provide complete collections
functionality. It only wraps the portions of collections functionality
used by SCons, in an interface that looks enough like collections for
our purposes.
"""
__revision__ = "src/engine/SCons/compat/_scons_collections.py 5023 2010/06/14 22:05:46 scons"
# Use exec to hide old names from fixers.
exec("""if True:
from UserDict import UserDict
from UserList import UserList
from UserString import UserString""")
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
ychfan/tensorflow | tensorflow/python/profiler/profile_context_test.py | 32 | 4282 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
from tensorflow.python.profiler import profile_context
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
builder = option_builder.ProfileOptionBuilder
class ProfilerContextTest(test.TestCase):
def testBasics(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), "dump")
opts = builder(builder.time_and_memory()
).with_file_output(outfile).build()
x = lib.BuildFullModel()
profile_str = None
profile_step100 = os.path.join(test.get_temp_dir(), "profile_100")
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
pctx.add_auto_profiling("op", options=opts, profile_steps=[15, 50, 100])
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
total_steps = 101
for i in range(total_steps):
sess.run(x)
if i == 14 or i == 49:
self.assertTrue(gfile.Exists(outfile))
gfile.Remove(outfile)
if i == 99:
self.assertTrue(gfile.Exists(profile_step100))
with gfile.Open(outfile, "r") as f:
profile_str = f.read()
gfile.Remove(outfile)
with lib.ProfilerFromFile(
os.path.join(test.get_temp_dir(), "profile_100")) as profiler:
profiler.profile_operations(options=opts)
with gfile.Open(outfile, "r") as f:
self.assertEqual(profile_str, f.read())
def testAutoTracingInDeubMode(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(), debug=True):
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
# Warm up, no tracing.
self.assertFalse("run_meta" in f)
sess.run(x)
self.assertTrue(
gfile.Exists(os.path.join(test.get_temp_dir(), "run_meta_11")))
gfile.Remove(os.path.join(test.get_temp_dir(), "run_meta_11"))
# fetched already.
sess.run(x)
for f in gfile.ListDirectory(test.get_temp_dir()):
self.assertFalse("run_meta" in f)
def testDisabled(self):
ops.reset_default_graph()
x = lib.BuildFullModel()
with profile_context.ProfileContext(test.get_temp_dir(),
enabled=False) as pctx:
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
self.assertTrue(pctx.profiler is None)
self.assertTrue(
getattr(session.BaseSession, "profile_context", None) is None)
with profile_context.ProfileContext(test.get_temp_dir()) as pctx:
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run(x)
self.assertFalse(pctx.profiler is None)
self.assertFalse(
getattr(session.BaseSession, "profile_context", None) is None)
if __name__ == "__main__":
test.main()
| apache-2.0 |
pjurik2/pykarma | feeds/rss.py | 1 | 3205 | import os, sys
import random
import time
import feedparser
import itertools
import HTMLParser
from feed import Feed
if os.getcwd().rstrip(os.sep).endswith('feeds'):
os.chdir('..')
sys.path.insert(0, os.getcwd())
from gui_client import new_rpc
import web
import reddit
class RSSFeed(Feed):
def __init__(self):
self.title = 'RSS Feed'
self.streams = []
self.wait_range = (60, 70)
self.max_error_wait = 600
self.max_subs = 0
self.urls = set()
def configure(self):
pass
def watch(self, new_streams=None):
self.configure()
self.web = web.Web()
try:
self.rpc = new_rpc(self.title)
except:
self.rpc = None
print 'Warning: Running without RPC'
if new_streams is None:
new_streams = []
streams = self.streams + new_streams
for url in itertools.cycle(streams):
print url
self.check_feed(url)
time.sleep(random.randint(*self.wait_range))
def check_feed(self, url):
for fail_count in itertools.count():
try:
datad = feedparser.parse(url)
except:
print 'Parse error for', url
time.sleep(min(2 ** fail_count, self.max_error_wait))
else:
break
try:
posts = datad['items']
except:
print 'No items field for', url
posts = []
for post in posts:
self.check_post(post)
def check_post(self, post):
if ('link' not in post):
return False
url = self.url_pre_filter(post['link'])
try:
req = self.web.get(url)
url = req.geturl()
except:
print 'URL retrieval error for ', url
return False
url = self.url_post_filter(url)
if (url in self.urls) or not url.startswith('http://'):
return False
self.urls.add(url)
feed_title = self.default_title_filter(post.get('title', ''))
page_title = self.default_title_filter(self.web.title(req))
title = self.title_filter(page_title, feed_title)
if self.rpc is not None:
subreddit = self.rpc.get_title_subreddit(title)
keywords = self.rpc.get_title_keywords(title)
if self.rpc.get_link_posted_count(url, title) <= self.max_subs:
stats = self.rpc.get_learned_stats(title, keywords)
self.rpc.gui_link_add(self.title, title, url, subreddit, keywords, **stats)
try:
req.close()
except:
pass
print title
print url
def url_pre_filter(self, url):
return url
def url_post_filter(self, url):
return url
def default_title_filter(self, title):
h = HTMLParser.HTMLParser()
return h.unescape(title)
def title_filter(self, page_title, feed_title):
return page_title
if __name__ == '__main__':
f = RSSFeed()
f.watch(['http://www.physorg.com/rss-feed/'])
| mit |
aman-iitj/scipy | scipy/linalg/tests/test_special_matrices.py | 36 | 22800 | """Tests for functions in special_matrices.py."""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import arange, add, array, eye, copy, sqrt
from numpy.testing import (TestCase, run_module_suite, assert_raises,
assert_equal, assert_array_equal, assert_array_almost_equal,
assert_allclose)
from scipy._lib.six import xrange
from scipy.special import comb
from scipy.linalg import (toeplitz, hankel, circulant, hadamard, leslie,
companion, tri, triu, tril, kron, block_diag,
helmert, hilbert, invhilbert, pascal, invpascal, dft)
from scipy.fftpack import fft
from numpy.linalg import cond
def get_mat(n):
data = arange(n)
data = add.outer(data,data)
return data
class TestTri(TestCase):
def test_basic(self):
assert_equal(tri(4),array([[1,0,0,0],
[1,1,0,0],
[1,1,1,0],
[1,1,1,1]]))
assert_equal(tri(4,dtype='f'),array([[1,0,0,0],
[1,1,0,0],
[1,1,1,0],
[1,1,1,1]],'f'))
def test_diag(self):
assert_equal(tri(4,k=1),array([[1,1,0,0],
[1,1,1,0],
[1,1,1,1],
[1,1,1,1]]))
assert_equal(tri(4,k=-1),array([[0,0,0,0],
[1,0,0,0],
[1,1,0,0],
[1,1,1,0]]))
def test_2d(self):
assert_equal(tri(4,3),array([[1,0,0],
[1,1,0],
[1,1,1],
[1,1,1]]))
assert_equal(tri(3,4),array([[1,0,0,0],
[1,1,0,0],
[1,1,1,0]]))
def test_diag2d(self):
assert_equal(tri(3,4,k=2),array([[1,1,1,0],
[1,1,1,1],
[1,1,1,1]]))
assert_equal(tri(4,3,k=-2),array([[0,0,0],
[0,0,0],
[1,0,0],
[1,1,0]]))
class TestTril(TestCase):
def test_basic(self):
a = (100*get_mat(5)).astype('l')
b = a.copy()
for k in range(5):
for l in range(k+1,5):
b[k,l] = 0
assert_equal(tril(a),b)
def test_diag(self):
a = (100*get_mat(5)).astype('f')
b = a.copy()
for k in range(5):
for l in range(k+3,5):
b[k,l] = 0
assert_equal(tril(a,k=2),b)
b = a.copy()
for k in range(5):
for l in range(max((k-1,0)),5):
b[k,l] = 0
assert_equal(tril(a,k=-2),b)
class TestTriu(TestCase):
def test_basic(self):
a = (100*get_mat(5)).astype('l')
b = a.copy()
for k in range(5):
for l in range(k+1,5):
b[l,k] = 0
assert_equal(triu(a),b)
def test_diag(self):
a = (100*get_mat(5)).astype('f')
b = a.copy()
for k in range(5):
for l in range(max((k-1,0)),5):
b[l,k] = 0
assert_equal(triu(a,k=2),b)
b = a.copy()
for k in range(5):
for l in range(k+3,5):
b[l,k] = 0
assert_equal(triu(a,k=-2),b)
class TestToeplitz(TestCase):
def test_basic(self):
y = toeplitz([1,2,3])
assert_array_equal(y,[[1,2,3],[2,1,2],[3,2,1]])
y = toeplitz([1,2,3],[1,4,5])
assert_array_equal(y,[[1,4,5],[2,1,4],[3,2,1]])
def test_complex_01(self):
data = (1.0 + arange(3.0)) * (1.0 + 1.0j)
x = copy(data)
t = toeplitz(x)
# Calling toeplitz should not change x.
assert_array_equal(x, data)
# According to the docstring, x should be the first column of t.
col0 = t[:,0]
assert_array_equal(col0, data)
assert_array_equal(t[0,1:], data[1:].conj())
def test_scalar_00(self):
"""Scalar arguments still produce a 2D array."""
t = toeplitz(10)
assert_array_equal(t, [[10]])
t = toeplitz(10, 20)
assert_array_equal(t, [[10]])
def test_scalar_01(self):
c = array([1,2,3])
t = toeplitz(c, 1)
assert_array_equal(t, [[1],[2],[3]])
def test_scalar_02(self):
c = array([1,2,3])
t = toeplitz(c, array(1))
assert_array_equal(t, [[1],[2],[3]])
def test_scalar_03(self):
c = array([1,2,3])
t = toeplitz(c, array([1]))
assert_array_equal(t, [[1],[2],[3]])
def test_scalar_04(self):
r = array([10,2,3])
t = toeplitz(1, r)
assert_array_equal(t, [[1,2,3]])
class TestHankel(TestCase):
def test_basic(self):
y = hankel([1,2,3])
assert_array_equal(y, [[1,2,3], [2,3,0], [3,0,0]])
y = hankel([1,2,3], [3,4,5])
assert_array_equal(y, [[1,2,3], [2,3,4], [3,4,5]])
class TestCirculant(TestCase):
def test_basic(self):
y = circulant([1,2,3])
assert_array_equal(y, [[1,3,2], [2,1,3], [3,2,1]])
class TestHadamard(TestCase):
def test_basic(self):
y = hadamard(1)
assert_array_equal(y, [[1]])
y = hadamard(2, dtype=float)
assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]])
y = hadamard(4)
assert_array_equal(y, [[1,1,1,1], [1,-1,1,-1], [1,1,-1,-1], [1,-1,-1,1]])
assert_raises(ValueError, hadamard, 0)
assert_raises(ValueError, hadamard, 5)
class TestLeslie(TestCase):
def test_bad_shapes(self):
assert_raises(ValueError, leslie, [[1,1],[2,2]], [3,4,5])
assert_raises(ValueError, leslie, [3,4,5], [[1,1],[2,2]])
assert_raises(ValueError, leslie, [1,2], [1,2])
assert_raises(ValueError, leslie, [1], [])
def test_basic(self):
a = leslie([1, 2, 3], [0.25, 0.5])
expected = array([
[1.0, 2.0, 3.0],
[0.25, 0.0, 0.0],
[0.0, 0.5, 0.0]])
assert_array_equal(a, expected)
class TestCompanion(TestCase):
def test_bad_shapes(self):
assert_raises(ValueError, companion, [[1,1],[2,2]])
assert_raises(ValueError, companion, [0,4,5])
assert_raises(ValueError, companion, [1])
assert_raises(ValueError, companion, [])
def test_basic(self):
c = companion([1, 2, 3])
expected = array([
[-2.0, -3.0],
[1.0, 0.0]])
assert_array_equal(c, expected)
c = companion([2.0, 5.0, -10.0])
expected = array([
[-2.5, 5.0],
[1.0, 0.0]])
assert_array_equal(c, expected)
class TestBlockDiag:
def test_basic(self):
x = block_diag(eye(2), [[1,2], [3,4], [5,6]], [[1, 2, 3]])
assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0, 0],
[0, 0, 3, 4, 0, 0, 0],
[0, 0, 5, 6, 0, 0, 0],
[0, 0, 0, 0, 1, 2, 3]])
def test_dtype(self):
x = block_diag([[1.5]])
assert_equal(x.dtype, float)
x = block_diag([[True]])
assert_equal(x.dtype, bool)
def test_mixed_dtypes(self):
actual = block_diag([[1]], [[1j]])
desired = np.array([[1, 0], [0, 1j]])
assert_array_equal(actual, desired)
def test_scalar_and_1d_args(self):
a = block_diag(1)
assert_equal(a.shape, (1,1))
assert_array_equal(a, [[1]])
a = block_diag([2,3], 4)
assert_array_equal(a, [[2, 3, 0], [0, 0, 4]])
def test_bad_arg(self):
assert_raises(ValueError, block_diag, [[[1]]])
def test_no_args(self):
a = block_diag()
assert_equal(a.ndim, 2)
assert_equal(a.nbytes, 0)
def test_empty_matrix_arg(self):
# regression test for gh-4596: check the shape of the result for empty matrix inputs
a = block_diag([[1, 0], [0, 1]],
[],
[[2, 3], [4, 5], [6, 7]])
assert_array_equal(a, [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 2, 3],
[0, 0, 4, 5],
[0, 0, 6, 7]])
class TestKron:
def test_basic(self):
a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]]))
assert_array_equal(a, array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]]))
m1 = array([[1, 2], [3, 4]])
m2 = array([[10], [11]])
a = kron(m1, m2)
expected = array([[10, 20],
[11, 22],
[30, 40],
[33, 44]])
assert_array_equal(a, expected)
class TestHelmert(TestCase):
def test_orthogonality(self):
for n in range(1, 7):
H = helmert(n, full=True)
I = np.eye(n)
assert_allclose(H.dot(H.T), I, atol=1e-12)
assert_allclose(H.T.dot(H), I, atol=1e-12)
def test_subspace(self):
for n in range(2, 7):
H_full = helmert(n, full=True)
H_partial = helmert(n)
for U in H_full[1:, :].T, H_partial.T:
C = np.eye(n) - np.ones((n, n)) / n
assert_allclose(U.dot(U.T), C)
assert_allclose(U.T.dot(U), np.eye(n-1), atol=1e-12)
class TestHilbert(TestCase):
def test_basic(self):
h3 = array([[1.0, 1/2., 1/3.],
[1/2., 1/3., 1/4.],
[1/3., 1/4., 1/5.]])
assert_array_almost_equal(hilbert(3), h3)
assert_array_equal(hilbert(1), [[1.0]])
h0 = hilbert(0)
assert_equal(h0.shape, (0,0))
class TestInvHilbert(TestCase):
def test_basic(self):
invh1 = array([[1]])
assert_array_equal(invhilbert(1, exact=True), invh1)
assert_array_equal(invhilbert(1), invh1)
invh2 = array([[4, -6],
[-6, 12]])
assert_array_equal(invhilbert(2, exact=True), invh2)
assert_array_almost_equal(invhilbert(2), invh2)
invh3 = array([[9, -36, 30],
[-36, 192, -180],
[30, -180, 180]])
assert_array_equal(invhilbert(3, exact=True), invh3)
assert_array_almost_equal(invhilbert(3), invh3)
invh4 = array([[16, -120, 240, -140],
[-120, 1200, -2700, 1680],
[240, -2700, 6480, -4200],
[-140, 1680, -4200, 2800]])
assert_array_equal(invhilbert(4, exact=True), invh4)
assert_array_almost_equal(invhilbert(4), invh4)
invh5 = array([[25, -300, 1050, -1400, 630],
[-300, 4800, -18900, 26880, -12600],
[1050, -18900, 79380, -117600, 56700],
[-1400, 26880, -117600, 179200, -88200],
[630, -12600, 56700, -88200, 44100]])
assert_array_equal(invhilbert(5, exact=True), invh5)
assert_array_almost_equal(invhilbert(5), invh5)
invh17 = array([
[289, -41616, 1976760, -46124400, 629598060, -5540462928,
33374693352, -143034400080, 446982500250, -1033026222800,
1774926873720, -2258997839280, 2099709530100, -1384423866000,
613101997800, -163493866080, 19835652870],
[-41616, 7990272, -426980160, 10627061760, -151103534400, 1367702848512,
-8410422724704, 36616806420480, -115857864064800, 270465047424000,
-468580694662080, 600545887119360, -561522320049600, 372133135180800,
-165537539406000, 44316454993920, -5395297580640],
[1976760, -426980160, 24337869120, -630981792000, 9228108708000,
-85267724461920, 532660105897920, -2348052711713280, 7504429831470000,
-17664748409880000, 30818191841236800, -39732544853164800,
37341234283298400, -24857330514030000, 11100752642520000,
-2982128117299200, 364182586693200],
[-46124400, 10627061760, -630981792000, 16826181120000,
-251209625940000, 2358021022156800, -14914482965141760,
66409571644416000, -214015221119700000, 507295338950400000,
-890303319857952000, 1153715376477081600, -1089119333262870000,
727848632044800000, -326170262829600000, 87894302404608000,
-10763618673376800],
[629598060, -151103534400, 9228108708000,
-251209625940000, 3810012660090000, -36210360321495360,
231343968720664800, -1038687206500944000, 3370739732635275000,
-8037460526495400000, 14178080368737885600, -18454939322943942000,
17489975175339030000, -11728977435138600000, 5272370630081100000,
-1424711708039692800, 174908803442373000],
[-5540462928, 1367702848512, -85267724461920, 2358021022156800,
-36210360321495360, 347619459086355456, -2239409617216035264,
10124803292907663360, -33052510749726468000, 79217210949138662400,
-140362995650505067440, 183420385176741672960, -174433352415381259200,
117339159519533952000, -52892422160973595200, 14328529177999196160,
-1763080738699119840],
[33374693352, -8410422724704, 532660105897920,
-14914482965141760, 231343968720664800, -2239409617216035264,
14527452132196331328, -66072377044391477760, 216799987176909536400,
-521925895055522958000, 928414062734059661760, -1217424500995626443520,
1161358898976091015200, -783401860847777371200, 354015418167362952000,
-96120549902411274240, 11851820521255194480],
[-143034400080, 36616806420480, -2348052711713280, 66409571644416000,
-1038687206500944000, 10124803292907663360, -66072377044391477760,
302045152202932469760, -995510145200094810000, 2405996923185123840000,
-4294704507885446054400, 5649058909023744614400,
-5403874060541811254400, 3654352703663101440000,
-1655137020003255360000, 450325202737117593600, -55630994283442749600],
[446982500250, -115857864064800, 7504429831470000, -214015221119700000,
3370739732635275000, -33052510749726468000, 216799987176909536400,
-995510145200094810000, 3293967392206196062500,
-7988661659013106500000, 14303908928401362270000,
-18866974090684772052000, 18093328327706957325000,
-12263364009096700500000, 5565847995255512250000,
-1517208935002984080000, 187754605706619279900],
[-1033026222800, 270465047424000, -17664748409880000,
507295338950400000, -8037460526495400000, 79217210949138662400,
-521925895055522958000, 2405996923185123840000,
-7988661659013106500000, 19434404971634224000000,
-34894474126569249192000, 46141453390504792320000,
-44349976506971935800000, 30121928988527376000000,
-13697025107665828500000, 3740200989399948902400,
-463591619028689580000],
[1774926873720, -468580694662080,
30818191841236800, -890303319857952000, 14178080368737885600,
-140362995650505067440, 928414062734059661760, -4294704507885446054400,
14303908928401362270000, -34894474126569249192000,
62810053427824648545600, -83243376594051600326400,
80177044485212743068000, -54558343880470209780000,
24851882355348879230400, -6797096028813368678400, 843736746632215035600],
[-2258997839280, 600545887119360, -39732544853164800,
1153715376477081600, -18454939322943942000, 183420385176741672960,
-1217424500995626443520, 5649058909023744614400,
-18866974090684772052000, 46141453390504792320000,
-83243376594051600326400, 110552468520163390156800,
-106681852579497947388000, 72720410752415168870400,
-33177973900974346080000, 9087761081682520473600,
-1129631016152221783200],
[2099709530100, -561522320049600, 37341234283298400,
-1089119333262870000, 17489975175339030000, -174433352415381259200,
1161358898976091015200, -5403874060541811254400,
18093328327706957325000, -44349976506971935800000,
80177044485212743068000, -106681852579497947388000,
103125790826848015808400, -70409051543137015800000,
32171029219823375700000, -8824053728865840192000,
1098252376814660067000],
[-1384423866000, 372133135180800,
-24857330514030000, 727848632044800000, -11728977435138600000,
117339159519533952000, -783401860847777371200, 3654352703663101440000,
-12263364009096700500000, 30121928988527376000000,
-54558343880470209780000, 72720410752415168870400,
-70409051543137015800000, 48142941226076592000000,
-22027500987368499000000, 6049545098753157120000,
-753830033789944188000],
[613101997800, -165537539406000,
11100752642520000, -326170262829600000, 5272370630081100000,
-52892422160973595200, 354015418167362952000, -1655137020003255360000,
5565847995255512250000, -13697025107665828500000,
24851882355348879230400, -33177973900974346080000,
32171029219823375700000, -22027500987368499000000,
10091416708498869000000, -2774765838662800128000, 346146444087219270000],
[-163493866080, 44316454993920, -2982128117299200, 87894302404608000,
-1424711708039692800, 14328529177999196160, -96120549902411274240,
450325202737117593600, -1517208935002984080000, 3740200989399948902400,
-6797096028813368678400, 9087761081682520473600,
-8824053728865840192000, 6049545098753157120000,
-2774765838662800128000, 763806510427609497600, -95382575704033754400],
[19835652870, -5395297580640, 364182586693200, -10763618673376800,
174908803442373000, -1763080738699119840, 11851820521255194480,
-55630994283442749600, 187754605706619279900, -463591619028689580000,
843736746632215035600, -1129631016152221783200, 1098252376814660067000,
-753830033789944188000, 346146444087219270000, -95382575704033754400,
11922821963004219300]
])
assert_array_equal(invhilbert(17, exact=True), invh17)
assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12)
def test_inverse(self):
for n in xrange(1, 10):
a = hilbert(n)
b = invhilbert(n)
# The Hilbert matrix is increasingly badly conditioned,
# so take that into account in the test
c = cond(a)
assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c)
class TestPascal(TestCase):
cases = [
(1, array([[1]]), array([[1]])),
(2, array([[1, 1],
[1, 2]]),
array([[1, 0],
[1, 1]])),
(3, array([[1, 1, 1],
[1, 2, 3],
[1, 3, 6]]),
array([[1, 0, 0],
[1, 1, 0],
[1, 2, 1]])),
(4, array([[1, 1, 1, 1],
[1, 2, 3, 4],
[1, 3, 6, 10],
[1, 4, 10, 20]]),
array([[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 2, 1, 0],
[1, 3, 3, 1]])),
]
def check_case(self, n, sym, low):
assert_array_equal(pascal(n), sym)
assert_array_equal(pascal(n, kind='lower'), low)
assert_array_equal(pascal(n, kind='upper'), low.T)
assert_array_almost_equal(pascal(n, exact=False), sym)
assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low)
assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T)
def test_cases(self):
for n, sym, low in self.cases:
self.check_case(n, sym, low)
def test_big(self):
p = pascal(50)
assert_equal(p[-1, -1], comb(98, 49, exact=True))
def test_threshold(self):
# Regression test. An early version of `pascal` returned an
# array of type np.uint64 for n=35, but that data type is too small
# to hold p[-1, -1]. The second assert_equal below would fail
# because p[-1, -1] overflowed.
p = pascal(34)
assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 34")
p = pascal(35)
assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 35")
def test_invpascal():
def check_invpascal(n, kind, exact):
ip = invpascal(n, kind=kind, exact=exact)
p = pascal(n, kind=kind, exact=exact)
# Matrix-multiply ip and p, and check that we get the identity matrix.
# We can't use the simple expression e = ip.dot(p), because when
# n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is
# np.int64. The product of those dtypes is np.float64, which loses
# precision when n is greater than 18. Instead we'll cast both to
# object arrays, and then multiply.
e = ip.astype(object).dot(p.astype(object))
assert_array_equal(e, eye(n), err_msg="n=%d kind=%r exact=%r" %
(n, kind, exact))
kinds = ['symmetric', 'lower', 'upper']
ns = [1, 2, 5, 18]
for n in ns:
for kind in kinds:
for exact in [True, False]:
yield check_invpascal, n, kind, exact
ns = [19, 34, 35, 50]
for n in ns:
for kind in kinds:
yield check_invpascal, n, kind, True
def test_dft():
m = dft(2)
expected = array([[1.0, 1.0], [1.0, -1.0]])
yield (assert_array_almost_equal, m, expected)
m = dft(2, scale='n')
yield (assert_array_almost_equal, m, expected/2.0)
m = dft(2, scale='sqrtn')
yield (assert_array_almost_equal, m, expected/sqrt(2.0))
x = array([0, 1, 2, 3, 4, 5, 0, 1])
m = dft(8)
mx = m.dot(x)
fx = fft(x)
yield (assert_array_almost_equal, mx, fx)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
leedm777/ansible-modules-core | files/acl.py | 47 | 9778 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: acl
version_added: "1.4"
short_description: Sets and retrieves file ACL information.
description:
- Sets and retrieves file ACL information.
options:
name:
required: true
default: null
description:
- The full path of the file or object.
aliases: ['path']
state:
required: false
default: query
choices: [ 'query', 'present', 'absent' ]
description:
- defines whether the ACL should be present or not. The C(query) state gets the current acl without changing it, for use in 'register' operations.
follow:
required: false
default: yes
choices: [ 'yes', 'no' ]
description:
- whether to follow symlinks on the path if a symlink is encountered.
default:
version_added: "1.5"
required: false
default: no
choices: [ 'yes', 'no' ]
description:
- if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if name is a file.
entity:
version_added: "1.5"
required: false
description:
- actual user or group that the ACL applies to when matching entity types user or group are selected.
etype:
version_added: "1.5"
required: false
default: null
choices: [ 'user', 'group', 'mask', 'other' ]
description:
- the entity type of the ACL to apply, see setfacl documentation for more info.
permissions:
version_added: "1.5"
required: false
default: null
description:
- Permissions to apply/remove can be any combination of r, w and x (read, write and execute respectively)
entry:
required: false
default: null
description:
- DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
author: "Brian Coca (@bcoca)"
notes:
- The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed.
'''
EXAMPLES = '''
# Grant user Joe read access to a file
- acl: name=/etc/foo.conf entity=joe etype=user permissions="r" state=present
# Removes the acl for Joe on a specific file
- acl: name=/etc/foo.conf entity=joe etype=user state=absent
# Sets default acl for joe on foo.d
- acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present
# Same as previous but using entry shorthand
- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present
# Obtain the acl for a specific file
- acl: name=/etc/foo.conf
register: acl_info
'''
RETURN = '''
acl:
description: Current acl on provided path (after changes, if any)
returned: success
type: list
sample: [ "user::rwx", "group::rwx", "other::rwx" ]
'''
def normalize_permissions(p):
perms = ['-','-','-']
for char in p:
if char == 'r':
perms[0] = 'r'
if char == 'w':
perms[1] = 'w'
if char == 'x':
perms[2] = 'x'
if char == 'X':
if perms[2] != 'x': # 'x' is more permissive
perms[2] = 'X'
return ''.join(perms)
def split_entry(entry):
''' splits entry and ensures normalized return'''
a = entry.split(':')
a.reverse()
if len(a) == 3:
a.append(False)
try:
p,e,t,d = a
except ValueError, e:
print "wtf?? %s => %s" % (entry,a)
raise e
if d:
d = True
if t.startswith("u"):
t = "user"
elif t.startswith("g"):
t = "group"
elif t.startswith("m"):
t = "mask"
elif t.startswith("o"):
t = "other"
else:
t = None
p = normalize_permissions(p)
return [d,t,e,p]
def get_acls(module,path,follow):
cmd = [ module.get_bin_path('getfacl', True) ]
if not follow:
cmd.append('-h')
# prevents absolute path warnings and removes headers
cmd.append('--omit-header')
cmd.append('--absolute-names')
cmd.append(path)
return _run_acl(module,cmd)
def set_acl(module,path,entry,follow,default):
cmd = [ module.get_bin_path('setfacl', True) ]
if not follow:
cmd.append('-h')
if default:
cmd.append('-d')
cmd.append('-m "%s"' % entry)
cmd.append(path)
return _run_acl(module,cmd)
def rm_acl(module,path,entry,follow,default):
cmd = [ module.get_bin_path('setfacl', True) ]
if not follow:
cmd.append('-h')
if default:
cmd.append('-k')
entry = entry[0:entry.rfind(':')]
cmd.append('-x "%s"' % entry)
cmd.append(path)
return _run_acl(module,cmd,False)
def _run_acl(module,cmd,check_rc=True):
try:
(rc, out, err) = module.run_command(' '.join(cmd), check_rc=check_rc)
except Exception, e:
module.fail_json(msg=e.strerror)
# trim last line as it is always empty
ret = out.splitlines()
return ret[0:len(ret)-1]
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True,aliases=['path'], type='str'),
entry = dict(required=False, etype='str'),
entity = dict(required=False, type='str', default=''),
etype = dict(required=False, choices=['other', 'user', 'group', 'mask'], type='str'),
permissions = dict(required=False, type='str'),
state = dict(required=False, default='query', choices=[ 'query', 'present', 'absent' ], type='str'),
follow = dict(required=False, type='bool', default=True),
default= dict(required=False, type='bool', default=False),
),
supports_check_mode=True,
)
path = os.path.expanduser(module.params.get('name'))
entry = module.params.get('entry')
entity = module.params.get('entity')
etype = module.params.get('etype')
permissions = module.params.get('permissions')
state = module.params.get('state')
follow = module.params.get('follow')
default = module.params.get('default')
if permissions:
permissions = normalize_permissions(permissions)
if not os.path.exists(path):
module.fail_json(msg="path not found or not accessible!")
if state in ['present','absent']:
if not entry and not etype:
module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state)
if entry:
if etype or entity or permissions:
module.fail_json(msg="entry and another incompatible field (entity, etype or permissions) are also set")
if entry.count(":") not in [2,3]:
module.fail_json(msg="Invalid entry: '%s', it requires 3 or 4 sections divided by ':'" % entry)
default, etype, entity, permissions = split_entry(entry)
changed=False
msg = ""
currentacls = get_acls(module,path,follow)
if (state == 'present'):
matched = False
for oldentry in currentacls:
if oldentry.count(":") == 0:
continue
old_default, old_type, old_entity, old_permissions = split_entry(oldentry)
if old_default == default:
if old_type == etype:
if etype in ['user', 'group']:
if old_entity == entity:
matched = True
if not old_permissions == permissions:
changed = True
break
else:
matched = True
if not old_permissions == permissions:
changed = True
break
if not matched:
changed=True
if changed and not module.check_mode:
set_acl(module,path,':'.join([etype, str(entity), permissions]),follow,default)
msg="%s is present" % ':'.join([etype, str(entity), permissions])
elif state == 'absent':
for oldentry in currentacls:
if oldentry.count(":") == 0:
continue
old_default, old_type, old_entity, old_permissions = split_entry(oldentry)
if old_default == default:
if old_type == etype:
if etype in ['user', 'group']:
if old_entity == entity:
changed=True
break
else:
changed=True
break
if changed and not module.check_mode:
rm_acl(module,path,':'.join([etype, entity, '---']),follow,default)
msg="%s is absent" % ':'.join([etype, entity, '---'])
else:
msg="current acl"
if changed:
currentacls = get_acls(module,path,follow)
module.exit_json(changed=changed, msg=msg, acl=currentacls)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
tashaxe/Red-DiscordBot | lib/discord/ext/commands/bot.py | 17 | 27409 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import discord
import inspect
import importlib
import sys
import traceback
import re
from .core import GroupMixin, Command, command
from .view import StringView
from .context import Context
from .errors import CommandNotFound, CommandError
from .formatter import HelpFormatter
def _get_variable(name):
stack = inspect.stack()
try:
for frames in stack:
try:
frame = frames[0]
current_locals = frame.f_locals
if name in current_locals:
return current_locals[name]
finally:
del frame
finally:
del stack
def when_mentioned(bot, msg):
"""A callable that implements a command prefix equivalent
to being mentioned, e.g. ``@bot ``."""
server = msg.server
if server is not None:
return '{0.me.mention} '.format(server)
return '{0.user.mention} '.format(bot)
def when_mentioned_or(*prefixes):
"""A callable that implements when mentioned or other prefixes provided.
Example
--------
.. code-block:: python
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
See Also
----------
:func:`when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r.append(when_mentioned(bot, msg))
return r
return inner
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
@asyncio.coroutine
def _default_help_command(ctx, *commands : str):
"""Shows this message."""
bot = ctx.bot
destination = ctx.message.author if bot.pm_help else ctx.message.channel
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(commands) == 0:
pages = bot.formatter.format_help_for(ctx, bot)
elif len(commands) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, commands[0])
command = None
if name in bot.cogs:
command = bot.cogs[name]
else:
command = bot.commands.get(name)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(name))
return
pages = bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, commands[0])
command = bot.commands.get(name)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(name))
return
for key in commands[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.commands.get(key)
if command is None:
yield from bot.send_message(destination, bot.command_not_found.format(key))
return
except AttributeError:
yield from bot.send_message(destination, bot.command_has_no_subcommands.format(command, key))
return
pages = bot.formatter.format_help_for(ctx, command)
if bot.pm_help is None:
characters = sum(map(lambda l: len(l), pages))
# modify destination based on length of pages.
if characters > 1000:
destination = ctx.message.author
for page in pages:
yield from bot.send_message(destination, page)
class Bot(GroupMixin, discord.Client):
"""Represents a discord bot.
This class is a subclass of :class:`discord.Client` and as a result
anything that you can do with a :class:`discord.Client` you can do with
this bot.
This class also subclasses :class:`GroupMixin` to provide the functionality
to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`discord.Message` as its second
parameter and returns the prefix. This is to facilitate "dynamic"
command prefixes. This callable can be either a regular function or
a coroutine.
The command prefix could also be a list or a tuple indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`Context.prefix`.
description : str
The content prefixed into the default help message.
self_bot : bool
If ``True``, the bot will only listen to commands invoked by itself rather
than ignoring itself. If ``False`` (the default) then the bot will ignore
itself. This cannot be changed once initialised.
formatter : :class:`HelpFormatter`
The formatter used to format the help message. By default, it uses a
the :class:`HelpFormatter`. Check it for more info on how to override it.
If you want to change the help command completely (add aliases, etc) then
a call to :meth:`remove_command` with 'help' as the argument would do the
trick.
pm_help : Optional[bool]
A tribool that indicates if the help command should PM the user instead of
sending it to the channel it received it from. If the boolean is set to
``True``, then all help output is PM'd. If ``False``, none of the help
output is PM'd. If ``None``, then the bot will only PM when the help
message becomes too long (dictated by more than 1000 characters).
Defaults to ``False``.
help_attrs : dict
A dictionary of options to pass in for the construction of the help command.
This allows you to change the command behaviour without actually changing
the implementation of the command. The attributes will be the same as the
ones passed in the :class:`Command` constructor. Note that ``pass_context``
will always be set to ``True`` regardless of what you pass in.
command_not_found : str
The format string used when the help command is invoked with a command that
is not found. Useful for i18n. Defaults to ``"No command called {} found."``.
The only format argument is the name of the command passed.
command_has_no_subcommands : str
The format string used when the help command is invoked with requests for a
subcommand but the command does not have any subcommands. Defaults to
``"Command {0.name} has no subcommands."``. The first format argument is the
:class:`Command` attempted to get a subcommand and the second is the name.
"""
def __init__(self, command_prefix, formatter=None, description=None, pm_help=False, **options):
super().__init__(**options)
self.command_prefix = command_prefix
self.extra_events = {}
self.cogs = {}
self.extensions = {}
self._checks = []
self.description = inspect.cleandoc(description) if description else ''
self.pm_help = pm_help
self.command_not_found = options.pop('command_not_found', 'No command called "{}" found.')
self.command_has_no_subcommands = options.pop('command_has_no_subcommands', 'Command {0.name} has no subcommands.')
self._skip_check = discord.User.__ne__ if options.pop('self_bot', False) else discord.User.__eq__
self.help_attrs = options.pop('help_attrs', {})
self.help_attrs['pass_context'] = True
if 'name' not in self.help_attrs:
self.help_attrs['name'] = 'help'
if formatter is not None:
if not isinstance(formatter, HelpFormatter):
raise discord.ClientException('Formatter must be a subclass of HelpFormatter')
self.formatter = formatter
else:
self.formatter = HelpFormatter()
# pay no mind to this ugliness.
self.command(**self.help_attrs)(_default_help_command)
# internal helpers
@asyncio.coroutine
def _get_prefix(self, message):
prefix = self.command_prefix
if callable(prefix):
ret = prefix(self, message)
if asyncio.iscoroutine(ret):
ret = yield from ret
return ret
else:
return prefix
@asyncio.coroutine
def _run_extra(self, coro, event_name, *args, **kwargs):
try:
yield from coro(*args, **kwargs)
except asyncio.CancelledError:
pass
except Exception:
try:
yield from self.on_error(event_name, *args, **kwargs)
except asyncio.CancelledError:
pass
def dispatch(self, event_name, *args, **kwargs):
super().dispatch(event_name, *args, **kwargs)
ev = 'on_' + event_name
if ev in self.extra_events:
for event in self.extra_events[ev]:
coro = self._run_extra(event, event_name, *args, **kwargs)
discord.compat.create_task(coro, loop=self.loop)
@asyncio.coroutine
def close(self):
for extension in tuple(self.extensions):
try:
self.unload_extension(extension)
except:
pass
for cog in tuple(self.cogs):
try:
self.remove_cog(cog)
except:
pass
yield from super().close()
@asyncio.coroutine
def on_command_error(self, exception, context):
"""|coro|
The default command error handler provided by the bot.
By default this prints to ``sys.stderr`` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get('on_command_error', None):
return
if hasattr(context.command, "on_error"):
return
print('Ignoring exception in command {}'.format(context.command), file=sys.stderr)
traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
# utility "send_*" functions
@asyncio.coroutine
def _augmented_msg(self, coro, **kwargs):
msg = yield from coro
delete_after = kwargs.get('delete_after')
if delete_after is not None:
@asyncio.coroutine
def delete():
yield from asyncio.sleep(delete_after, loop=self.loop)
yield from self.delete_message(msg)
discord.compat.create_task(delete(), loop=self.loop)
return msg
def say(self, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_message(message.channel, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_message`
"""
destination = _get_variable('_internal_channel')
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_message(destination, *args, **kwargs)
return self._augmented_msg(coro, **params)
def whisper(self, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_message(message.author, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_message`
"""
destination = _get_variable('_internal_author')
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_message(destination, *args, **kwargs)
return self._augmented_msg(coro, **params)
def reply(self, content, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
msg = '{0.mention}, {1}'.format(message.author, content)
self.send_message(message.channel, msg, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_message`
"""
author = _get_variable('_internal_author')
destination = _get_variable('_internal_channel')
fmt = '{0.mention}, {1}'.format(author, str(content))
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_message(destination, fmt, *args, **kwargs)
return self._augmented_msg(coro, **params)
def upload(self, *args, **kwargs):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_file(message.channel, *args, **kwargs)
The following keyword arguments are "extensions" that augment the
behaviour of the standard wrapped call.
Parameters
------------
delete_after: float
Number of seconds to wait before automatically deleting the
message.
See Also
---------
:meth:`Client.send_file`
"""
destination = _get_variable('_internal_channel')
extensions = ('delete_after',)
params = {
k: kwargs.pop(k, None) for k in extensions
}
coro = self.send_file(destination, *args, **kwargs)
return self._augmented_msg(coro, **params)
def type(self):
"""|coro|
A helper function that is equivalent to doing
.. code-block:: python
self.send_typing(message.channel)
See Also
---------
The :meth:`Client.send_typing` function.
"""
destination = _get_variable('_internal_channel')
return self.send_typing(destination)
# global check registration
def check(self, func):
"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. warning::
This function must be a *regular* function and not a coroutine.
Similar to a command :func:`check`\, this takes a single parameter
of type :class:`Context` and can only raise exceptions derived from
:exc:`CommandError`.
Example
---------
.. code-block:: python
@bot.check
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func)
return func
def add_check(self, func):
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`check`.
Parameters
-----------
func
The function that was used as a global check.
"""
self._checks.append(func)
def remove_check(self, func):
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
Parameters
-----------
func
The function to remove from the global checks.
"""
try:
self._checks.remove(func)
except ValueError:
pass
def can_run(self, ctx):
return all(f(ctx) for f in self._checks)
# listener registration
def add_listener(self, func, name=None):
"""The non decorator alternative to :meth:`listen`.
Parameters
-----------
func : coroutine
The extra event to listen to.
name : Optional[str]
The name of the command to use. Defaults to ``func.__name__``.
Example
--------
.. code-block:: python
async def on_ready(): pass
async def my_message(message): pass
bot.add_listener(on_ready)
bot.add_listener(my_message, 'on_message')
"""
name = func.__name__ if name is None else name
if not asyncio.iscoroutinefunction(func):
raise discord.ClientException('Listeners must be coroutines')
if name in self.extra_events:
self.extra_events[name].append(func)
else:
self.extra_events[name] = [func]
def remove_listener(self, func, name=None):
"""Removes a listener from the pool of listeners.
Parameters
-----------
func
The function that was used as a listener to remove.
name
The name of the event we want to remove. Defaults to
``func.__name__``.
"""
name = func.__name__ if name is None else name
if name in self.extra_events:
try:
self.extra_events[name].remove(func)
except ValueError:
pass
def listen(self, name=None):
"""A decorator that registers another function as an external
event listener. Basically this allows you to listen to multiple
events from different places e.g. such as :func:`discord.on_ready`
The functions being listened to must be a coroutine.
Example
--------
.. code-block:: python
@bot.listen()
async def on_message(message):
print('one')
# in some other file...
@bot.listen('on_message')
async def my_message(message):
print('two')
Would print one and two in an unspecified order.
Raises
-------
discord.ClientException
The function being listened to is not a coroutine.
"""
def decorator(func):
self.add_listener(func, name)
return func
return decorator
# cogs
def add_cog(self, cog):
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
They are meant as a way to organize multiple relevant commands
into a singular class that shares some state or no state at all.
The cog can also have a ``__check`` member function that allows
you to define a global check. See :meth:`check` for more info.
More information will be documented soon.
Parameters
-----------
cog
The cog to register to the bot.
"""
self.cogs[type(cog).__name__] = cog
try:
check = getattr(cog, '_{.__class__.__name__}__check'.format(cog))
except AttributeError:
pass
else:
self.add_check(check)
members = inspect.getmembers(cog)
for name, member in members:
# register commands the cog has
if isinstance(member, Command):
if member.parent is None:
self.add_command(member)
continue
# register event listeners the cog has
if name.startswith('on_'):
self.add_listener(member)
def get_cog(self, name):
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name : str
The name of the cog you are requesting.
"""
return self.cogs.get(name)
def remove_cog(self, name):
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then ``None`` is returned, otherwise
the cog instance that is being removed is returned.
If the cog defines a special member function named ``__unload``
then it is called when removal has completed. This function
**cannot** be a coroutine. It must be a regular function.
Parameters
-----------
name : str
The name of the cog to remove.
"""
cog = self.cogs.pop(name, None)
if cog is None:
return cog
members = inspect.getmembers(cog)
for name, member in members:
# remove commands the cog has
if isinstance(member, Command):
if member.parent is None:
self.remove_command(member.name)
continue
# remove event listeners the cog has
if name.startswith('on_'):
self.remove_listener(member)
try:
check = getattr(cog, '_{0.__class__.__name__}__check'.format(cog))
except AttributeError:
pass
else:
self.remove_check(check)
unloader_name = '_{0.__class__.__name__}__unload'.format(cog)
try:
unloader = getattr(cog, unloader_name)
except AttributeError:
pass
else:
unloader()
del cog
# extensions
def load_extension(self, name):
if name in self.extensions:
return
lib = importlib.import_module(name)
if not hasattr(lib, 'setup'):
del lib
del sys.modules[name]
raise discord.ClientException('extension does not have a setup function')
lib.setup(self)
self.extensions[name] = lib
def unload_extension(self, name):
lib = self.extensions.get(name)
if lib is None:
return
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.cogs.copy().items():
if inspect.getmodule(cog) is lib:
self.remove_cog(cogname)
# first remove all the commands from the module
for command in self.commands.copy().values():
if command.module is lib:
command.module = None
if isinstance(command, GroupMixin):
command.recursively_remove_all_commands()
self.remove_command(command.name)
# then remove all the listeners from the module
for event_list in self.extra_events.copy().values():
remove = []
for index, event in enumerate(event_list):
if inspect.getmodule(event) is lib:
remove.append(index)
for index in reversed(remove):
del event_list[index]
try:
func = getattr(lib, 'teardown')
except AttributeError:
pass
else:
try:
func(self)
except:
pass
finally:
# finally remove the import..
del lib
del self.extensions[name]
del sys.modules[name]
# command processing
@asyncio.coroutine
def process_commands(self, message):
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`on_message`
event. If you choose to override the :func:`on_message` event, then
you should invoke this coroutine as well.
Warning
--------
This function is necessary for :meth:`say`, :meth:`whisper`,
:meth:`type`, :meth:`reply`, and :meth:`upload` to work due to the
way they are written. It is also required for the :func:`on_command`
and :func:`on_command_completion` events.
Parameters
-----------
message : discord.Message
The message to process commands for.
"""
_internal_channel = message.channel
_internal_author = message.author
view = StringView(message.content)
if self._skip_check(message.author, self.user):
return
prefix = yield from self._get_prefix(message)
invoked_prefix = prefix
if not isinstance(prefix, (tuple, list)):
if not view.skip_string(prefix):
return
else:
invoked_prefix = discord.utils.find(view.skip_string, prefix)
if invoked_prefix is None:
return
invoker = view.get_word()
tmp = {
'bot': self,
'invoked_with': invoker,
'message': message,
'view': view,
'prefix': invoked_prefix
}
ctx = Context(**tmp)
del tmp
if invoker in self.commands:
command = self.commands[invoker]
self.dispatch('command', command, ctx)
try:
yield from command.invoke(ctx)
except CommandError as e:
ctx.command.dispatch_error(e, ctx)
else:
self.dispatch('command_completion', command, ctx)
elif invoker:
exc = CommandNotFound('Command "{}" is not found'.format(invoker))
self.dispatch('command_error', exc, ctx)
@asyncio.coroutine
def on_message(self, message):
yield from self.process_commands(message)
| gpl-3.0 |
slank/ansible | lib/ansible/modules/system/hostname.py | 12 | 23676 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Hiroaki Nakamura <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: hostname
author:
- "Adrian Likins (@alikins)"
- "Hideki Saito (@saito-hideki)"
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname.
- Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI, Alpine Linux.
- Any distribution that uses systemd as their init system.
- Note, this module does *NOT* modify /etc/hosts. You need to modify it yourself using other modules like template or replace.
options:
name:
required: true
description:
- Name of the host
'''
EXAMPLES = '''
- hostname:
name: web01
'''
import socket
from distutils.version import LooseVersion
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
from ansible.module_utils._text import to_bytes, to_native
class UnimplementedStrategy(object):
def __init__(self, module):
self.module = module
def update_current_and_permanent_hostname(self):
self.unimplemented_error()
def update_current_hostname(self):
self.unimplemented_error()
def update_permanent_hostname(self):
self.unimplemented_error()
def get_current_hostname(self):
self.unimplemented_error()
def set_current_hostname(self, name):
self.unimplemented_error()
def get_permanent_hostname(self):
self.unimplemented_error()
def set_permanent_hostname(self, name):
self.unimplemented_error()
def unimplemented_error(self):
platform = get_platform()
distribution = get_distribution()
if distribution is not None:
msg_platform = '%s (%s)' % (platform, distribution)
else:
msg_platform = platform
self.module.fail_json(
msg='hostname module cannot be used on platform %s' % msg_platform)
class Hostname(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
strategy_class = UnimplementedStrategy
def __new__(cls, *args, **kwargs):
return load_platform_subclass(Hostname, args, kwargs)
def __init__(self, module):
self.module = module
self.name = module.params['name']
if self.platform == 'Linux' and Facts(module).is_systemd_managed():
self.strategy = SystemdStrategy(module)
else:
self.strategy = self.strategy_class(module)
def update_current_and_permanent_hostname(self):
return self.strategy.update_current_and_permanent_hostname()
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
class GenericStrategy(object):
"""
This is a generic Hostname manipulation strategy class.
A subclass may wish to override some or all of these methods.
- get_current_hostname()
- get_permanent_hostname()
- set_current_hostname(name)
- set_permanent_hostname(name)
"""
def __init__(self, module):
self.module = module
self.hostname_cmd = self.module.get_bin_path('hostname', True)
self.changed = False
def update_current_and_permanent_hostname(self):
self.update_current_hostname()
self.update_permanent_hostname()
return self.changed
def update_current_hostname(self):
name = self.module.params['name']
current_name = self.get_current_hostname()
if current_name != name:
self.set_current_hostname(name)
self.changed = True
def update_permanent_hostname(self):
name = self.module.params['name']
permanent_name = self.get_permanent_hostname()
if permanent_name != name:
self.set_permanent_hostname(name)
self.changed = True
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
return None
def set_permanent_hostname(self, name):
pass
# ===========================================
class DebianStrategy(GenericStrategy):
"""
This is a Debian family Hostname manipulation strategy class - it edits
the /etc/hostname file.
"""
HOSTNAME_FILE = '/etc/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SLESStrategy(GenericStrategy):
"""
This is a SLES Hostname strategy class - it edits the
/etc/HOSTNAME file.
"""
HOSTNAME_FILE = '/etc/HOSTNAME'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class RedHatStrategy(GenericStrategy):
"""
This is a Redhat Hostname strategy class - it edits the
/etc/sysconfig/network file.
"""
NETWORK_FILE = '/etc/sysconfig/network'
def get_permanent_hostname(self):
try:
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
k, v = line.split('=')
return v.strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
lines = []
found = False
f = open(self.NETWORK_FILE, 'rb')
try:
for line in f.readlines():
if line.startswith('HOSTNAME'):
lines.append("HOSTNAME=%s\n" % name)
found = True
else:
lines.append(line)
finally:
f.close()
if not found:
lines.append("HOSTNAME=%s\n" % name)
f = open(self.NETWORK_FILE, 'w+')
try:
f.writelines(lines)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class AlpineStrategy(GenericStrategy):
"""
This is a Alpine Linux Hostname manipulation strategy class - it edits
the /etc/hostname file then run hostname -F /etc/hostname.
"""
HOSTNAME_FILE = '/etc/hostname'
def update_current_and_permanent_hostname(self):
self.update_permanent_hostname()
self.update_current_hostname()
return self.changed
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, '-F', self.HOSTNAME_FILE]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class SystemdStrategy(GenericStrategy):
"""
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
def get_current_hostname(self):
cmd = ['hostname']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--transient', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
cmd = ['hostnamectl', '--static', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = ['hostnamectl', '--pretty', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
cmd = ['hostnamectl', '--static', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class OpenRCStrategy(GenericStrategy):
"""
This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
the /etc/conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/conf.d/hostname'
def get_permanent_hostname(self):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
finally:
f.close()
# ===========================================
class OpenBSDStrategy(GenericStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file.
"""
HOSTNAME_FILE = '/etc/myname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
f = open(self.HOSTNAME_FILE)
try:
return f.read().strip()
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" %
str(err))
def set_permanent_hostname(self, name):
try:
f = open(self.HOSTNAME_FILE, 'w+')
try:
f.write("%s\n" % name)
finally:
f.close()
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" %
str(err))
# ===========================================
class SolarisStrategy(GenericStrategy):
"""
This is a Solaris11 or later Hostname manipulation strategy class - it
execute hostname command.
"""
def set_current_hostname(self, name):
cmd_option = '-t'
cmd = [self.hostname_cmd, cmd_option, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
def get_permanent_hostname(self):
fmri = 'svc:/system/identity:node'
pattern = 'config/nodename'
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
# ===========================================
class FreeBSDStrategy(GenericStrategy):
"""
This is a FreeBSD hostname manipulation strategy class - it edits
the /etc/rc.conf.d/hostname file.
"""
HOSTNAME_FILE = '/etc/rc.conf.d/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.HOSTNAME_FILE):
try:
open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n")
except IOError:
err = get_exception()
self.module.fail_json(msg="failed to write file: %s" %
str(err))
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
for line in f:
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
finally:
f.close()
return None
def set_permanent_hostname(self, name):
try:
try:
f = open(self.HOSTNAME_FILE, 'r')
lines = [x.strip() for x in f]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
f.close()
f = open(self.HOSTNAME_FILE, 'w')
f.write('\n'.join(lines) + '\n')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
finally:
f.close()
# ===========================================
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
strategy_class = SystemdStrategy
class SLESHostname(Hostname):
platform = 'Linux'
distribution = 'Suse linux enterprise server '
distribution_version = get_distribution_version()
if distribution_version and LooseVersion("10") <= LooseVersion(distribution_version) <= LooseVersion("12"):
strategy_class = SLESStrategy
else:
strategy_class = UnimplementedStrategy
class OpenSUSEHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse '
strategy_class = SystemdStrategy
class ArchHostname(Hostname):
platform = 'Linux'
distribution = 'Arch'
strategy_class = SystemdStrategy
class RedHat5Hostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
strategy_class = RedHatStrategy
class RedHatServerHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux server'
strategy_class = RedHatStrategy
class RedHatWorkstationHostname(Hostname):
platform = 'Linux'
distribution = 'Red hat enterprise linux workstation'
strategy_class = RedHatStrategy
class CentOSHostname(Hostname):
platform = 'Linux'
distribution = 'Centos'
strategy_class = RedHatStrategy
class CentOSLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Centos linux'
strategy_class = RedHatStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
class ScientificLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux'
strategy_class = RedHatStrategy
class ScientificLinuxCERNHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux cern slc'
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle linux server'
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
strategy_class = RedHatStrategy
class DebianHostname(Hostname):
platform = 'Linux'
distribution = 'Debian'
strategy_class = DebianStrategy
class KaliHostname(Hostname):
platform = 'Linux'
distribution = 'Kali'
strategy_class = DebianStrategy
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = DebianStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = DebianStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = DebianStrategy
class GentooHostname(Hostname):
platform = 'Linux'
distribution = 'Gentoo base system'
strategy_class = OpenRCStrategy
class ALTLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Altlinux'
strategy_class = RedHatStrategy
class AlpineLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Alpine'
strategy_class = AlpineStrategy
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
strategy_class = OpenBSDStrategy
class SolarisHostname(Hostname):
platform = 'SunOS'
distribution = None
strategy_class = SolarisStrategy
class FreeBSDHostname(Hostname):
platform = 'FreeBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NetBSDHostname(Hostname):
platform = 'NetBSD'
distribution = None
strategy_class = FreeBSDStrategy
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
name=dict(required=True)
)
)
hostname = Hostname(module)
name = module.params['name']
changed = hostname.update_current_and_permanent_hostname()
module.exit_json(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
if __name__ == '__main__':
main()
| gpl-3.0 |
qtekfun/htcDesire820Kernel | external/chromium_org/chrome/test/mini_installer/verifier.py | 85 | 2063 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Verifier:
"""Verifies that the current machine states match the expectation."""
def VerifyInput(self, verifier_input, variable_expander):
"""Verifies that the current machine states match |verifier_input|.
Args:
verifier_input: An input to the verifier. It is a dictionary where each
key is an expectation name and the associated value is an expectation
dictionary. The expectation dictionary may contain an optional
'condition' property, a string that determines whether the expectation
should be verified. Each subclass can specify a different expectation
name and expectation dictionary.
variable_expander: A VariableExpander object.
"""
for expectation_name, expectation in verifier_input.iteritems():
if 'condition' in expectation:
condition = variable_expander.Expand(expectation['condition'])
if not self._EvaluateCondition(condition):
continue
self._VerifyExpectation(expectation_name, expectation, variable_expander)
def _VerifyExpectation(self, expectation_name, expectation,
variable_expander):
"""Verifies that the current machine states match |verifier_input|.
This is an abstract method for subclasses to override.
Args:
expectation_name: An expectation name. Each subclass can specify a
different expectation name format.
expectation: An expectation dictionary. Each subclass can specify a
different expectation dictionary format.
variable_expander: A VariableExpander object.
"""
raise NotImplementedError()
def _EvaluateCondition(self, condition):
"""Evaluates |condition| using eval().
Args:
condition: A condition string.
Returns:
The result of the evaluated condition.
"""
return eval(condition, {'__builtins__': None}, None)
| gpl-2.0 |
jpike88/crosswalk | tools/reflection_generator/java_method.py | 3 | 29922 | #!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from collections import OrderedDict
from string import Template
def ConvertClassExpressionToClassType(class_name):
""" Turn "final HashMap<String>" to HashMap.class. """
return '%s.class' % class_name.split()[-1].split('<')[0]
def ConvertPrimitiveTypeToObject(class_name):
primitive_map = {
'byte': 'Byte',
'short': 'Short',
'int': 'Integer',
'long': 'Long',
'float': 'Float',
'double': 'Double',
'char': 'Character',
'boolean': 'Boolean',
}
return primitive_map.get(class_name, class_name)
class ParamType(object):
"""Internal representation of the type of a parameter of a method."""
def __init__(self, expression, class_loader):
self._expression = expression
self._modifier = ''
self._generic_type = ''
self._generic_type_parameters = []
self._contains_internal_class = False
self.ParseType(class_loader)
self._contains_internal_class = self._contains_internal_class or\
class_loader.IsInternalClass(self._generic_type)
def ParseType(self, class_loader):
param_type_re = re.compile('(?P<modifier>(\w+ )*)'
'(?P<generic>(\w+))(?P<type_params>(<.*>)?)')
for match in re.finditer(param_type_re, self._expression):
self._modifier = match.group('modifier')
self._generic_type = match.group('generic')
type_params = match.group('type_params')
if len(type_params) > 1:
type_params = type_params[1:-1]
self._generic_type_parameters = [ParamType(param.strip(),
class_loader) for param in type_params.split(',')]
for type_param in self._generic_type_parameters:
if self.generic_type == 'ValueCallback':
print 'value callback with %s' % type_param.generic_type
if type_param.contains_internal_class:
self._contains_internal_class = True
break
@property
def expression(self):
return self._expression
@property
def modifier(self):
return self._modifier
@property
def generic_type(self):
return self._generic_type
@property
def generic_type_parameters(self):
return self._generic_type_parameters
@property
def contains_internal_class(self):
return self._contains_internal_class
class ParamStringType(object):
INTERNAL_DECLARE = 1
BRIDGE_DECLARE = 2
BRIDGE_DECLARE_FOR_WRAPPER = 3
BRIDGE_PASS_TO_SUPER = 4
BRIDGE_PASS_TO_WRAPPER = 5
INTERNAL_PASS_TO_BRIDGE = 6
BRIDGE_OVERRIDE_CONDITION = 7
WRAPPER_DECLARE = 8
WRAPPER_DECLARE_FOR_BRIDGE = 9
WRAPPER_PASS_TO_BRIDGE = 10
class MethodStringType(object):
BRIDGE_CONSTRUCTOR = 1
BRIDGE_STATIC = 2
BRIDGE_SUPER = 3
BRIDGE_OVERRIDE = 4
BRIDGE_WRAPPER = 5
WRAPPER_CONSTRUCTOR = 6
WRAPPER_STATIC = 7
WRAPPER_BRIDGE = 8
WRAPPER_INTERFACE = 9
class Method(object):
"""Internal representaion of a method."""
ANNOTATION_PRE_WRAPLINE = 'preWrapperLines'
ANNOTATION_POST_WRAPLINE = 'postWrapperLines'
def __init__(self, class_name, class_loader,
is_constructor, is_static, is_abstract,
method_name, method_return, params, annotation, doc=''):
self._class_name = class_name
self._class_loader = class_loader
self._is_constructor = is_constructor
self._is_static = is_static
self._is_abstract = is_abstract
self._method_name = method_name
self._method_return = method_return
self._params = OrderedDict() # Use OrderedDict to avoid parameter misorder.
self._typed_params = OrderedDict()
self._method_annotations = {}
self._method_doc = doc
self._class_java_data = ''
self._method_declare_name = ''
self._internal_params_declare = ''
self._bridge_params_declare = ''
self._bridge_params_declare_for_wrapper = ''
self._bridge_params_pass_to_super = ''
self._bridge_params_pass_to_wrapper = ''
self._internal_params_pass_to_bridge = ''
self._bridge_override_condition = ''
self._wrapper_params_declare = ''
self._wrapper_params_declare_for_bridge = ''
self._wrapper_params_pass_to_bridge = ''
self._is_reservable = False
self.ParseMethodParams(params)
self.ParseMethodAnnotation(annotation)
def IsInternalClass(self, clazz):
return self._class_loader.IsInternalClass(clazz)
def GetJavaData(self, clazz):
return self._class_loader.GetJavaData(clazz)
def GenerateDoc(self, doc):
return self._class_loader.GenerateDoc(doc)
@property
def is_constructor(self):
return self._is_constructor
@property
def is_static(self):
return self._is_static
@property
def is_abstract(self):
return self._is_abstract
@property
def is_reservable(self):
return self._is_reservable
@property
def method_name(self):
return self._method_name
@property
def method_return(self):
return self._method_return
@property
def params(self):
return self._params
@property
def typed_params(self):
return self._typed_params
@property
def method_annotations(self):
return self._method_annotations
@property
def method_doc(self):
return self._method_doc
def ParseMethodParams(self, params):
# TODO(shouqun): Currently, generic parameters are not supported.
# The support of generic types should be added if such cases happen.
if not params or params == '':
return
for param in params.split(','):
param = param.strip()
param_list = param.split()
param_type = ' '.join(param_list[:-1]) # To handle modifiers
param_name = param_list[-1]
self._params[param_name] = param_type
self._typed_params[param_name] = ParamType(param_type, self._class_loader)
def ParseMethodAnnotation(self, annotation):
if annotation.find('reservable = true') >= 0:
self._is_reservable = True
pre_wrapline_re = re.compile('preWrapperLines\s*=\s*\{\s*('
'?P<pre_wrapline>(".*")(,\s*".*")*)\s*\}')
for match in re.finditer(pre_wrapline_re, annotation):
pre_wrapline = self.FormatWrapperLine(match.group('pre_wrapline'))
self._method_annotations[self.ANNOTATION_PRE_WRAPLINE] = pre_wrapline
post_wrapline_re = re.compile('postWrapperLines\s*=\s*\{\s*('
'?P<post_wrapline>(".*")(,\s*".*")*)\s*\}')
for match in re.finditer(post_wrapline_re, annotation):
post_wrapline = self.FormatWrapperLine(match.group('post_wrapline'))
self._method_annotations[self.ANNOTATION_POST_WRAPLINE] = post_wrapline
def FormatWrapperLine(self, annotation_value):
""" annotaion_value is a java string array which each element is an
individual line. Probably like: ' "line1",\n "line2"'
This method is turnning it to ' line1\n line2'
"""
lines = []
exec('lines = [%s]' % annotation_value.replace('\n', ''))
template = Template('\n'.join(lines))
values = {}
for arg in range(1, len(self.params.keys())+1):
values['param%d' % arg] = self.params.keys()[arg-1]
return template.substitute(values)
def PrepareStrings(self):
self._class_java_data = self.GetJavaData(self._class_name)
self._method_declare_name = self.GenerateMethodDeclareName()
self._internal_params_declare = ', '.join(
self.GetFormattedParamArray(ParamStringType.INTERNAL_DECLARE))
self._bridge_params_declare = ', '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_DECLARE))
self._bridge_params_declare_for_wrapper = ', '.join(
self.GetFormattedParamArray(
ParamStringType.BRIDGE_DECLARE_FOR_WRAPPER, insert_empty=True))
self._bridge_params_pass_to_super = ', '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_PASS_TO_SUPER))
self._bridge_params_pass_to_wrapper = ', '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_PASS_TO_WRAPPER))
self._internal_params_pass_to_bridge = ', '.join(
self.GetFormattedParamArray(ParamStringType.INTERNAL_PASS_TO_BRIDGE))
self._bridge_override_condition = ' && '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_OVERRIDE_CONDITION))
self._wrapper_params_declare = ', '.join(
self.GetFormattedParamArray(ParamStringType.WRAPPER_DECLARE))
self._wrapper_params_declare_for_bridge = ', '.join(
self.GetFormattedParamArray(
ParamStringType.WRAPPER_DECLARE_FOR_BRIDGE, insert_empty=True))
self._wrapper_params_pass_to_bridge = ', '.join(
self.GetFormattedParamArray(ParamStringType.WRAPPER_PASS_TO_BRIDGE))
def GetFormattedParamArray(self, param_string_type,
append_empty=False, insert_empty=False):
""" Return the array of params with specified format.
append or insert an empty string on demand for cases
that need extra splitter when using the array.
"""
formatted_params = []
for param_name in self._params:
param_type = self._params[param_name]
formatted_param = self.FormatSingleParam(
param_type, param_name, param_string_type)
if formatted_param:
formatted_params.append(formatted_param)
if append_empty:
formatted_params.append('')
if insert_empty:
formatted_params.insert(0, '')
return formatted_params
def FormatSingleParam(self, param_type, param_name, param_string_type):
is_internal_class = self.IsInternalClass(param_type)
if is_internal_class:
java_data = self.GetJavaData(param_type)
typed_param = self._typed_params[param_name]
if param_string_type == ParamStringType.INTERNAL_DECLARE:
# the way internal declares its params, will be used in bridge's override
# call.
# XWalkViewInternal view => XWalkViewInternal view
return '%s %s' % (param_type, param_name)
elif param_string_type == ParamStringType.BRIDGE_DECLARE:
# the way bridge declares its params, will be used in bridge's wrapper
# call and super call.
# XWalkViewInternal view => XWalkViewBridge view
if is_internal_class:
return '%s %s'% (java_data.GetBridgeName(), param_name)
else:
return '%s %s' % (param_type, param_name)
elif param_string_type == ParamStringType.BRIDGE_DECLARE_FOR_WRAPPER:
# the way bridge declares its params for wrapper, will turn the param
# type to class<?> value for reflection to use.
# XWalkViewInternal view => coreBridge.getWrapperClass("XWalkView")
# DirectionInternal direnction =>
# coreBridge.getWrapperClass("XWalkView$Direction")
# String name => String.class
if is_internal_class:
return 'coreBridge.getWrapperClass("%s")' % java_data.GetWrapperName()
else:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
if param_type in self._class_java_data.enums:
return ('coreBridge.getWrapperClass("%s")' %
self._class_java_data.GetWrapperName(param_type))
else:
return ConvertClassExpressionToClassType(param_type)
elif param_string_type == ParamStringType.BRIDGE_PASS_TO_SUPER:
# the way bridge passes the param to super
# XWalkViewInternal view => view
if is_internal_class:
return java_data.UseAsInstanceInBridgeSuperCall(param_name)
else:
return param_name
elif param_string_type == ParamStringType.BRIDGE_PASS_TO_WRAPPER:
# the way bridge passes the param to wrapper
# XWalkViewInternal view => view.getWrapper()
# DirectionInternal direction => ConvertDirectionInternal(direction)
if is_internal_class:
return java_data.UseAsInstanceInBridgeCall(param_name)
elif (typed_param.generic_type == 'ValueCallback' and
typed_param.contains_internal_class):
assert len(typed_param.generic_type_parameters) == 1
internal_generic_type_param = typed_param.generic_type_parameters[0]
internal_generic_type_class = self.GetJavaData(
internal_generic_type_param.generic_type)
return ('new ValueCallback<Object>() {\n' +
' @Override\n' +
' public void onReceiveValue(Object value) {\n' +
' %sFinal.onReceiveValue((%s) ' % (
param_name, internal_generic_type_class.bridge_name) +
'coreBridge.getBridgeObject(value));\n' +
' }\n' +
' }')
else:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
if param_type in self._class_java_data.enums:
return 'Convert%s(%s)' % (param_type, param_name)
else:
return param_name
elif param_string_type == ParamStringType.INTERNAL_PASS_TO_BRIDGE:
# the way bridge accepts param from internal
# XWalkViewInternal view => (XWalkViewBridge) view
if is_internal_class:
return java_data.UseAsInstanceInBridgeOverrideCall(param_name)
else:
return param_name
elif param_string_type == ParamStringType.BRIDGE_OVERRIDE_CONDITION:
# the way bridge uses as the condition for whether call super or
# call wrapper in override call
# XWalkViewInternal view => (view instanceof XWalkViewBridge)
if (is_internal_class and
not java_data.HasInstanceCreateInternallyAnnotation()):
return'(%s instanceof %s)' % (param_name, java_data.GetBridgeName())
else:
return None
elif param_string_type == ParamStringType.WRAPPER_DECLARE:
# the way wrapper declare the param
# XWalkViewInternal view => XWalkView view
# DirectionInternal direction => Direction direction
if is_internal_class:
return '%s %s' % (java_data.UseAsTypeInWrapperCall(), param_name)
elif param_type in self._class_java_data.enums:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
return '%s %s' % (param_type.replace('Internal', ''), param_name)
else:
return '%s %s' % (param_type, param_name)
elif param_string_type == ParamStringType.WRAPPER_DECLARE_FOR_BRIDGE:
# the way wrapper declares its params for bridge, will turn the param
# type to class<?> value for reflection to use.
# XWalkViewInternal view =>
# coreWrapper.getBridgeClass("XWalkViewBridge")
# DirectionInternal direction => enumDirectionClass
# String name => String.class
# TODO(wang16): Currently there is no internal classes for static method.
# Need to support it in future.
if is_internal_class:
return 'coreWrapper.getBridgeClass("%s")' % java_data.GetBridgeName()
else:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
enums = self._class_java_data.enums
if param_type in enums:
return ('coreWrapper.getBridgeClass("%s")' %
self._class_java_data.GetBridgeName(param_type))
else:
return ConvertClassExpressionToClassType(param_type)
elif param_string_type == ParamStringType.WRAPPER_PASS_TO_BRIDGE:
# the way wrapper passes param to bridge
# XWalkViewInternal view => view.getBridge()
# DirectionInternal direction => ConvertDirection(direction)
if is_internal_class:
return java_data.UseAsInstanceInWrapperCall(param_name)
elif param_type in self._class_java_data.enums:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
return 'Convert%s(%s)' % (param_type.replace('Internal', ''),
param_name)
else:
return param_name
else:
pass
def GenerateMethodDeclareName(self):
name = self.method_name
for param_name in self.params:
# Remove modifier and generic type.
name += ConvertClassExpressionToClassType(
self.params[param_name]).replace('.class', '')
if self._is_constructor:
return '%sConstructor' % name
else:
return '%sMethod' % name
def GenerateBridgeConstructor(self):
template = Template("""\
public ${NAME}(${PARAMS}, Object wrapper) {
super(${PARAMS_PASSING});
this.wrapper = wrapper;
reflectionInit();
}
""")
value = {'NAME': self._class_java_data.bridge_name,
'PARAMS': self._bridge_params_declare,
'PARAMS_PASSING': self._bridge_params_pass_to_super}
return template.substitute(value)
def GenerateBridgeStaticMethod(self):
template = Template("""\
public static ${RETURN_TYPE} ${NAME}($PARAMS) {
${RETURN}${CLASS_NAME}.${NAME}(${PARAMS_PASSING});
}
""")
value = {'RETURN_TYPE': self.method_return,
'NAME': self.method_name,
'PARAMS': self._bridge_params_declare,
'RETURN': '' if self._method_return == 'void' else 'return ',
'CLASS_NAME': self._class_name,
'PARAMS_PASSING': self._bridge_params_pass_to_super}
return template.substitute(value)
def GenerateBridgeOverrideMethod(self):
if not self._bridge_override_condition:
return ' @Override'
template = Template("""\
@Override
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
if (${IF_CONDITION}) {
${RETURN}${NAME}(${BRIDGE_PARAMS_PASSING});
} else {
${RETURN}super.${NAME}(${PARAMS_PASSING});
}
}
""")
value = {'NAME': self.method_name,
'RETURN_TYPE': self.method_return,
'PARAMS': self._internal_params_declare,
'RETURN': '' if self._method_return == 'void' else 'return ',
'IF_CONDITION': self._bridge_override_condition,
'PARAMS_PASSING': self._bridge_params_pass_to_super,
'BRIDGE_PARAMS_PASSING': self._internal_params_pass_to_bridge}
return template.substitute(value)
def GenerateBridgeWrapperMethod(self):
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
if return_is_internal:
template = Template("""\
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
${GENERIC_TYPE_DECLARE}${RETURN}coreBridge.getBridgeObject(\
${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING}));
}
""")
else :
template = Template("""\
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
${GENERIC_TYPE_DECLARE}${RETURN}${METHOD_DECLARE_NAME}.invoke(\
${PARAMS_PASSING});
}
""")
if self._method_return == 'void':
return_statement = ''
elif return_is_internal:
return_statement = 'return (%s)' % return_type_java_data.bridge_name
else:
return_statement = ('return (%s)' %
ConvertPrimitiveTypeToObject(self.method_return))
# Handling generic types, current only ValueCallback will be handled.
generic_type_declare = ''
for param_name in self._typed_params:
typed_param = self._typed_params[param_name]
if typed_param.generic_type != 'ValueCallback':
continue
if typed_param.contains_internal_class:
generic_type_declare += 'final %s %sFinal = %s;\n ' % (
typed_param.expression, param_name, param_name)
value = {'RETURN_TYPE': self.method_return,
'NAME': self.method_name,
'METHOD_DECLARE_NAME': self._method_declare_name,
'PARAMS': self._bridge_params_declare,
'RETURN': return_statement,
'GENERIC_TYPE_DECLARE': generic_type_declare,
'PARAMS_PASSING': self._bridge_params_pass_to_wrapper}
return template.substitute(value)
def GenerateBridgeSuperMethod(self):
no_return_value = self._method_return == 'void'
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
if self._is_abstract:
return ''
if self._class_java_data.HasCreateInternallyAnnotation():
if no_return_value:
template = Template("""\
public void ${NAME}Super(${PARAMS}) {
if (internal == null) {
super.${NAME}(${PARAM_PASSING});
} else {
internal.${NAME}(${PARAM_PASSING});
}
}
""")
else:
template = Template("""\
public ${RETURN_TYPE} ${NAME}Super(${PARAMS}) {
${INTERNAL_RETURN_TYPE} ret;
if (internal == null) {
ret = super.${NAME}(${PARAM_PASSING});
} else {
ret = internal.${NAME}(${PARAM_PASSING});
}
${IF_NULL_RETURN_NULL}
return ${RETURN_VALUE};
}
""")
else:
if no_return_value:
template = Template("""\
public void ${NAME}Super(${PARAMS}) {
super.${NAME}(${PARAM_PASSING});
}
""")
else:
template = Template("""\
public ${RETURN_TYPE} ${NAME}Super(${PARAMS}) {
${INTERNAL_RETURN_TYPE} ret;
ret = super.${NAME}(${PARAM_PASSING});
${IF_NULL_RETURN_NULL}
return ${RETURN_VALUE};
}
""")
if return_is_internal:
return_value = return_type_java_data.UseAsReturnInBridgeSuperCall('ret')
method_return = return_type_java_data.bridge_name
else:
return_value = 'ret'
method_return = self._method_return
if ConvertPrimitiveTypeToObject(method_return) != method_return:
# it's returning prmitive type, so it can't be null.
if_null_return_null = ''
else:
if_null_return_null = 'if (ret == null) return null;'
value = {
'RETURN_TYPE': method_return,
'INTERNAL_RETURN_TYPE': self.method_return,
'NAME': self.method_name,
'PARAM_PASSING': self._bridge_params_pass_to_super,
'PARAMS': self._bridge_params_declare,
'IF_NULL_RETURN_NULL': if_null_return_null,
'RETURN_VALUE': return_value
}
return template.substitute(value)
def GenerateWrapperConstructor(self):
# TODO(wang16): Currently, only support pre/post wrapper lines for
# Constructors.
template = Template("""\
${DOC}
public ${CLASS_NAME}(${PARAMS}) {
${PRE_WRAP_LINES}
reflectionInit();
}
""")
pre_wrap_string = self._method_annotations.get(
self.ANNOTATION_PRE_WRAPLINE, '')
post_wrap_string = self._method_annotations.get(
self.ANNOTATION_POST_WRAPLINE, '')
if (pre_wrap_string != ''):
pre_wrap_string += "\n\n"
pre_wrap_string += " constructorTypes = new ArrayList<Object>();\n"
for param_type in self._wrapper_params_declare_for_bridge.split(', '):
if (param_type != ''):
param_type = param_type.replace('coreWrapper.getBridgeClass(', '')
param_type = param_type.replace(')', '')
pre_wrap_string += (" constructorTypes.add(%s);\n" % param_type)
pre_wrap_string += "\n"
pre_wrap_string += " constructorParams = new ArrayList<Object>();\n"
for param_name in self._wrapper_params_pass_to_bridge.split(', '):
param_name = param_name.replace('.getBridge()', '')
pre_wrap_string += " constructorParams.add(%s);\n" % param_name
if (post_wrap_string != ''):
pre_wrap_string += ("""
postWrapperMethod = new ReflectMethod(this,
\"post%s\");\n""" % self._method_declare_name)
value = {'DOC': self.GenerateDoc(self.method_doc),
'CLASS_NAME': self._class_java_data.wrapper_name,
'PARAMS': self._wrapper_params_declare,
'PRE_WRAP_LINES': pre_wrap_string}
ret = template.substitute(value)
if (post_wrap_string != ''):
template = Template("""\
public void post${POST_WRAP_METHOD}() {
${POST_WRAP_LINES}
}
""")
value = {'POST_WRAP_METHOD': self._method_declare_name,
'POST_WRAP_LINES': post_wrap_string}
ret += template.substitute(value)
return ret
def GenerateWrapperStaticMethod(self):
if self.is_reservable:
template = Template("""\
${DOC}
public static ${RETURN_TYPE} ${NAME}(${PARAMS}) {
reflectionInit();
if (${METHOD_DECLARE_NAME}.isNull()) {
${METHOD_DECLARE_NAME}.setArguments(${PARAMS_PASSING});
XWalkCoreWrapper.reserveReflectMethod(${METHOD_DECLARE_NAME});
return;
}
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
}
""")
else:
template = Template("""\
${DOC}
public static ${RETURN_TYPE} ${NAME}(${PARAMS}) {
reflectionInit();
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
}
""")
return_type = ConvertPrimitiveTypeToObject(self.method_return)
if self._method_return == 'void':
return_state = ''
else:
return_state = 'return (%s) ' % return_type
value = {'RETURN_TYPE': self.method_return,
'RETURN': return_state,
'DOC': self.GenerateDoc(self.method_doc),
'NAME': self.method_name,
'PARAMS': self._wrapper_params_declare,
'METHOD_DECLARE_NAME': self._method_declare_name,
'PARAMS_PASSING': self._wrapper_params_pass_to_bridge}
return template.substitute(value)
def GenerateWrapperBridgeMethod(self):
no_return_value = self._method_return == 'void'
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
if self.is_abstract:
template = Template(
'${DOC}\n' +
' public abstract ${RETURN_TYPE} ${NAME}(${PARAMS});\n\n')
elif return_is_internal:
template = Template("""\
${DOC}
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
return (${RETURN_TYPE}) coreWrapper.getWrapperObject(\
${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING}));
}
""")
elif self.is_reservable:
template = Template("""\
${DOC}
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
if (${METHOD_DECLARE_NAME}.isNull()) {
${METHOD_DECLARE_NAME}.setArguments(${PARAMS_RESERVING});
XWalkCoreWrapper.reserveReflectMethod(${METHOD_DECLARE_NAME});
return;
}
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
}
""")
else:
template = Template("""\
${DOC}
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
}
""")
if return_is_internal:
return_type = return_type_java_data.wrapper_name
else:
return_type = self.method_return
if no_return_value:
return_state = ''
else:
return_state = 'return (%s)' % ConvertPrimitiveTypeToObject(return_type)
params_reserving = []
for param in self._wrapper_params_pass_to_bridge.split(', '):
if (param.find("getBridge()") > 0):
param = param.replace('.getBridge()', '')
params_reserving.append(
'new ReflectMethod(%s, "getBridge")' % param)
else:
params_reserving.append(param)
value = {'RETURN_TYPE': return_type,
'RETURN': return_state,
'DOC': self.GenerateDoc(self.method_doc),
'NAME': self.method_name,
'PARAMS': re.sub(r'ValueCallback<([A-Za-z]+)Internal>',
r'ValueCallback<\1>',self._wrapper_params_declare),
'METHOD_DECLARE_NAME': self._method_declare_name,
'PARAMS_RESERVING': ', '.join(params_reserving),
'PARAMS_PASSING': self._wrapper_params_pass_to_bridge}
return template.substitute(value)
def GenerateWrapperInterface(self):
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
template = Template(
'${DOC}\n' +
' public ${RETURN_TYPE} ${NAME}(${PARAMS});\n\n')
if return_is_internal:
return_type = return_type_java_data.wrapper_name
else:
return_type = self.method_return
value = {'RETURN_TYPE': return_type,
'DOC': self.GenerateDoc(self.method_doc),
'NAME': self.method_name,
'PARAMS': self._wrapper_params_declare}
return template.substitute(value)
def GenerateMethodsStringForBridge(self):
if self._is_constructor:
return self.GenerateBridgeConstructor()
elif self._is_static:
return self.GenerateBridgeStaticMethod()
else:
return '%s\n%s\n%s\n%s\n' % (
self.GenerateBridgeOverrideMethod(),
self.GenerateBridgeWrapperMethod(),
self.GenerateBridgeSuperMethod(),
' private ReflectMethod %s = new ReflectMethod(null, "%s");\n' %
(self._method_declare_name, self._method_name))
def GenerateMethodsStringForWrapper(self):
if self._is_constructor:
return self.GenerateWrapperConstructor()
elif self._is_static:
return '%s\n%s\n' % (
self.GenerateWrapperStaticMethod(), """\
private static ReflectMethod %s = new ReflectMethod(null, "%s");\n""" %
(self._method_declare_name, self._method_name))
elif self._is_abstract:
return self.GenerateWrapperBridgeMethod()
else:
return '%s\n%s\n' % (
self.GenerateWrapperBridgeMethod(),
' private ReflectMethod %s = new ReflectMethod(null, "%s");\n' %
(self._method_declare_name, self._method_name))
def GenerateMethodsStringForInterface(self):
return self.GenerateWrapperInterface()
| bsd-3-clause |
polypmer/obligarcy | obligarcy/migrations/0040_action.py | 1 | 1184 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
('obligarcy', '0039_auto_20160406_2124'),
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('verb', models.CharField(max_length=255)),
('target_id', models.PositiveIntegerField(blank=True, null=True, db_index=True)),
('created', models.DateTimeField(db_index=True, auto_now_add=True)),
('actor', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='actions')),
('target_ct', models.ForeignKey(to='contenttypes.ContentType', related_name='target_obj', blank=True, null=True)),
],
options={
'ordering': ('-created',),
},
),
]
| gpl-3.0 |
pombredanne/disco | tests/test_util.py | 10 | 1840 | import os
from datetime import datetime
from disco.test import TestCase
from disco.util import flatten, iterify, urlsplit
def function(x):
return x + 0
sequence = 0, [1, [2, 3], [[4, [5, [6]]]]]
class UtilTestCase(TestCase):
def test_flatten(self):
self.assertEquals(list(range(7)), list(flatten(sequence)))
def test_iterify(self):
self.assertEquals([5], list(iterify(5)))
self.assertEquals([5], list(iterify([5])))
def test_urlsplit(self):
port = self.settings['DISCO_PORT']
ddfs = self.settings['DDFS_DATA']
data = self.settings['DISCO_DATA']
self.assertEquals(urlsplit('http://host/path'),
('http', ('host', ''), 'path'))
self.assertEquals(urlsplit('http://host:port/path'),
('http', ('host', 'port'), 'path'))
self.assertEquals(urlsplit('disco://master/long/path'),
('http', ('master', '{0}'.format(port)), 'long/path'))
self.assertEquals(urlsplit('disco://localhost/ddfs/path',
localhost='localhost',
ddfs_data=ddfs),
('file', ('', ''), os.path.join(ddfs, 'path')))
self.assertEquals(urlsplit('disco://localhost/data/path',
localhost='localhost',
disco_data=data),
('file', ('', ''), os.path.join(data, 'path')))
self.assertEquals(urlsplit('tag://tag', ''),
('tag', ('', ''), 'tag'))
self.assertEquals(urlsplit('tag://host/tag', ''),
('tag', ('host', ''), 'tag'))
self.assertEquals(urlsplit('tag://host:port/tag', ''),
('tag', ('host', 'port'), 'tag'))
| bsd-3-clause |
napsternxg/twitter_nlp | hbc/python/Vocab.py | 10 | 1046 | class Vocab:
def __init__(self, vocabFile=None):
self.nextId = 1
self.word2id = {}
self.id2word = {}
if vocabFile:
for line in open(vocabFile):
line = line.rstrip('\n')
(word, wid) = line.split('\t')
self.word2id[word] = int(wid)
self.id2word[wid] = word
self.nextId = max(self.nextId, int(wid) + 1)
def GetID(self, word):
if not self.word2id.has_key(word):
self.word2id[word] = self.nextId
self.nextId += 1
return self.word2id[word]
def HasWord(self, word):
return self.word2id.has_key(word)
def HasId(self, wid):
return self.id2word.has_key(wid)
def GetWord(self, wid):
return self.id2word[wid]
def SaveVocab(self, vocabFile):
fOut = open(vocabFile, 'w')
for word in self.word2id.keys():
fOut.write("%s\t%s\n" % (word, self.word2id[word]))
def GetVocabSize(self):
return self.nextId-1
| gpl-3.0 |
michaelWagner/oppia | extensions/triggers/trigger_classes.py | 19 | 2484 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for defining triggers.
Although this module is in extensions/, it is not provided as an extension
framework for third-party developers. This is because reacting to triggers
involves changes to core code.
"""
from extensions import domain
class BaseTrigger(object):
"""Base trigger definition class.
This class is not meant to be user-editable. The only methods on it should
be get()-type methods.
"""
# Customization arg specifications for the trigger, including their
# descriptions, schemas and default values. Overridden in subclasses.
_customization_arg_specs = []
@classmethod
def get_trigger_type(cls):
return cls.__name__
@property
def customization_arg_specs(self):
return [
domain.CustomizationArgSpec(**cas)
for cas in self._customization_arg_specs]
class NthResubmission(BaseTrigger):
"""This trigger is invoked when an answer is submitted to the same state
for the nth time in succession, and the destination that would result due
to normal evaluation would cause a further loop-around to the same state.
"""
_customization_arg_specs = [{
'name': 'num_submits',
'description': (
'The number of submissions after which to react, if the last '
'submission would result in a further loop-around'),
'schema': {
'type': 'int'
},
'default_value': 3,
}]
class ClickButton(BaseTrigger):
"""The presence of this trigger adds a button to the UI. The trigger is
invoked when the learner clicks this button.
"""
_customization_arg_specs = [{
'name': 'button_text',
'description': 'The text of the button',
'schema': {
'type': 'unicode',
},
'default_value': 'Help, I\'m stuck',
}]
| apache-2.0 |
jermowery/xos | xos/tosca/tests/computetest.py | 4 | 4688 | from basetest import BaseToscaTest
from core.models import Instance, Slice
class ComputeTest(BaseToscaTest):
tests = [ # "create_compute_m1_tiny", XXX m1.tiny does not exist on cloudlab
"create_compute_m1_small",
"create_compute_m1_large_8192MB",
"create_compute_m1_large_8GB",
"destroy_compute",
"create_compute_scalable",
"destroy_compute_scalable",
]
def cleanup(self):
self.try_to_delete(Instance, name="test_compute1")
self.try_to_delete(Instance, name="test_compute1-0")
self.try_to_delete(Instance, name="test_compute1-1")
self.try_to_delete(Instance, name="test_compute1-2")
self.try_to_delete(Instance, name="test_compute1-3")
self.try_to_delete(Slice, name="testsite_slice1")
def get_base_templates(self):
return self.make_nodetemplate("testsite", "tosca.nodes.Site") + \
self.make_nodetemplate("testsite_slice1", "tosca.nodes.Slice", reqs=[("testsite", "tosca.relationships.MemberOfSite")])
def create_compute_m1_tiny(self):
self.assert_noobj(Instance, "test_compute1")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", disk_size="1 GB", mem_size="500 MB"))
instance = self.assert_obj(Instance, "test_compute1")
assert(instance.flavor.name == "m1.tiny")
def create_compute_m1_small(self):
self.assert_noobj(Instance, "test_compute1")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", disk_size="1 GB", mem_size="513 MB"))
instance = self.assert_obj(Instance, "test_compute1")
assert(instance.flavor.name == "m1.small")
def create_compute_m1_large_8192MB(self):
self.assert_noobj(Instance, "test_compute1")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", mem_size="8192 MB"))
instance = self.assert_obj(Instance, "test_compute1")
assert(instance.flavor.name == "m1.large")
def create_compute_m1_large_8GB(self):
self.assert_noobj(Instance, "test_compute1")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", mem_size="8 GB"))
instance = self.assert_obj(Instance, "test_compute1")
assert(instance.flavor.name == "m1.large")
def destroy_compute(self):
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1"))
self.assert_obj(Instance, "test_compute1")
self.destroy(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1"))
self.assert_noobj(Instance, "test_compute1")
def create_compute_scalable(self):
self.assert_noobj(Instance, "test_compute1-1")
self.assert_noobj(Instance, "test_compute1-2")
self.assert_noobj(Instance, "test_compute1-3")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", mem_size="8 GB",
caps={"scalable": {"min_instances": 2, "max_instances": 3, "default_instances": 2}}))
# there should be two instances
instance0 = self.assert_obj(Instance, "test_compute1-0")
instance1 = self.assert_obj(Instance, "test_compute1-1")
self.assert_noobj(Instance, "test_compute1-2")
def destroy_compute_scalable(self):
self.assert_noobj(Instance, "test_compute1-1")
self.assert_noobj(Instance, "test_compute1-2")
self.assert_noobj(Instance, "test_compute1-3")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", mem_size="8 GB",
caps={"scalable": {"min_instances": 2, "max_instances": 3, "default_instances": 2}}))
# there should be two instances
instance0 = self.assert_obj(Instance, "test_compute1-0")
instance1 = self.assert_obj(Instance, "test_compute1-1")
self.destroy(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", mem_size="8 GB",
caps={"scalable": {"min_instances": 2, "max_instances": 3, "default_instances": 2}}))
self.assert_noobj(Instance, "test_compute1-0")
self.assert_noobj(Instance, "test_compute1-1")
if __name__ == "__main__":
ComputeTest()
| apache-2.0 |
yampiopl/Yamcoin | contrib/bitrpc/bitrpc.py | 2348 | 7835 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit |
ashray/VTK-EVM | ThirdParty/Twisted/twisted/web/test/test_static.py | 28 | 56293 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.static}.
"""
import inspect
import mimetypes
import os
import re
import StringIO
from zope.interface.verify import verifyObject
from twisted.internet import abstract, interfaces
from twisted.python.runtime import platform
from twisted.python.filepath import FilePath
from twisted.python import log
from twisted.trial.unittest import TestCase
from twisted.web import static, http, script, resource
from twisted.web.server import UnsupportedMethod
from twisted.web.test.test_web import DummyRequest
from twisted.web.test._util import _render
class StaticDataTests(TestCase):
"""
Tests for L{Data}.
"""
def test_headRequest(self):
"""
L{Data.render} returns an empty response body for a I{HEAD} request.
"""
data = static.Data("foo", "bar")
request = DummyRequest([''])
request.method = 'HEAD'
d = _render(data, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), "")
d.addCallback(cbRendered)
return d
def test_invalidMethod(self):
"""
L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
data = static.Data("foo", "bar")
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(UnsupportedMethod, data.render, request)
class StaticFileTests(TestCase):
"""
Tests for the basic behavior of L{File}.
"""
def _render(self, resource, request):
return _render(resource, request)
def test_invalidMethod(self):
"""
L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
"""
request = DummyRequest([''])
request.method = 'POST'
path = FilePath(self.mktemp())
path.setContent("foo")
file = static.File(path.path)
self.assertRaises(UnsupportedMethod, file.render, request)
def test_notFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['foobar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_emptyChild(self):
"""
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
def test_securityViolationNotFound(self):
"""
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
"""
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest(['..'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_forbiddenResource(self):
"""
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
"""
base = FilePath(self.mktemp())
base.setContent('')
# Make sure we can delete the file later.
self.addCleanup(base.chmod, 0700)
# Get rid of our own read permission.
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([''])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
if platform.isWindows():
test_forbiddenResource.skip = "Cannot remove read permission on Windows"
def test_indexNames(self):
"""
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
file.indexNames = ['foo.bar']
request = DummyRequest([''])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFile(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent("baz")
file = static.File(base.path)
request = DummyRequest(['foo.bar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
self.assertEqual(request.outgoingHeaders['content-length'], '3')
d.addCallback(cbRendered)
return d
def test_staticFileDeletedGetChild(self):
"""
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
child = staticFile.getChild("foo.bar", request)
self.assertEqual(child, staticFile.childNotFound)
def test_staticFileDeletedRender(self):
"""
A L{static.File} created for a file which does not exist should render
its C{childNotFound} page.
"""
staticFile = static.File(self.mktemp())
request = DummyRequest(['foo.bar'])
request2 = DummyRequest(['foo.bar'])
d = self._render(staticFile, request)
d2 = self._render(staticFile.childNotFound, request2)
def cbRendered2(ignored):
def cbRendered(ignored):
self.assertEqual(''.join(request.written),
''.join(request2.written))
d.addCallback(cbRendered)
return d
d2.addCallback(cbRendered2)
return d2
def test_headRequest(self):
"""
L{static.File.render} returns an empty response body for I{HEAD}
requests.
"""
path = FilePath(self.mktemp())
path.setContent("foo")
file = static.File(path.path)
request = DummyRequest([''])
request.method = 'HEAD'
d = _render(file, request)
def cbRendered(ignored):
self.assertEqual("".join(request.written), "")
d.addCallback(cbRendered)
return d
def test_processors(self):
"""
If a request is made which encounters a L{File} before a final segment
which names a file with an extension which is in the L{File}'s
C{processors} mapping, the processor associated with that extension is
used to serve the response to the request.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child("foo.bar").setContent(
"from twisted.web.static import Data\n"
"resource = Data('dynamic world','text/plain')\n")
file = static.File(base.path)
file.processors = {'.bar': script.ResourceScript}
request = DummyRequest(["foo.bar"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'dynamic world')
self.assertEqual(request.outgoingHeaders['content-length'], '13')
d.addCallback(cbRendered)
return d
def test_ignoreExt(self):
"""
The list of ignored extensions can be set by passing a value to
L{File.__init__} or by calling L{File.ignoreExt} later.
"""
file = static.File(".")
self.assertEqual(file.ignoredExts, [])
file.ignoreExt(".foo")
file.ignoreExt(".bar")
self.assertEqual(file.ignoredExts, [".foo", ".bar"])
file = static.File(".", ignoredExts=(".bar", ".baz"))
self.assertEqual(file.ignoredExts, [".bar", ".baz"])
def test_ignoredExtensionsIgnored(self):
"""
A request for the I{base} child of a L{File} succeeds with a resource
for the I{base<extension>} file in the path the L{File} was created
with if such a file exists and the L{File} has been configured to
ignore the I{<extension>} extension.
"""
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent('baz')
base.child('foo.quux').setContent('foobar')
file = static.File(base.path, ignoredExts=(".bar",))
request = DummyRequest(["foo"])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(''.join(request.written), 'baz')
d.addCallback(cbRendered)
return d
class StaticMakeProducerTests(TestCase):
"""
Tests for L{File.makeProducer}.
"""
def makeResourceWithContent(self, content, type=None, encoding=None):
"""
Make a L{static.File} resource that has C{content} for its content.
@param content: The bytes to use as the contents of the resource.
@param type: Optional value for the content type of the resource.
"""
fileName = self.mktemp()
fileObject = open(fileName, 'w')
fileObject.write(content)
fileObject.close()
resource = static.File(fileName)
resource.encoding = encoding
resource.type = type
return resource
def contentHeaders(self, request):
"""
Extract the content-* headers from the L{DummyRequest} C{request}.
This returns the subset of C{request.outgoingHeaders} of headers that
start with 'content-'.
"""
contentHeaders = {}
for k, v in request.outgoingHeaders.iteritems():
if k.startswith('content-'):
contentHeaders[k] = v
return contentHeaders
def test_noRangeHeaderGivesNoRangeStaticProducer(self):
"""
makeProducer when no Range header is set returns an instance of
NoRangeStaticProducer.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.NoRangeStaticProducer)
def test_noRangeHeaderSets200OK(self):
"""
makeProducer when no Range header is set sets the responseCode on the
request to 'OK'.
"""
resource = self.makeResourceWithContent('')
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(http.OK, request.responseCode)
def test_noRangeHeaderSetsContentHeaders(self):
"""
makeProducer when no Range header is set sets the Content-* headers
for the response.
"""
length = 123
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent(
'a'*length, type=contentType, encoding=contentEncoding)
request = DummyRequest([])
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-length': str(length),
'content-encoding': contentEncoding},
self.contentHeaders(request))
def test_singleRangeGivesSingleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of SingleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the response code on the request to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_singleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3'
contentType = "text/plain"
contentEncoding = 'gzip'
resource = self.makeResourceWithContent('abcdef', type=contentType, encoding=contentEncoding)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': contentType, 'content-encoding': contentEncoding,
'content-range': 'bytes 1-3/6', 'content-length': '3'},
self.contentHeaders(request))
def test_singleUnsatisfiableRangeReturnsSingleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests a single unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.SingleRangeStaticProducer)
def test_singleUnsatisfiableRangeSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests a single
unsatisfiable byte range.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_singleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, unsatisfiable
byte range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
contentType = "text/plain"
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': 'text/plain', 'content-length': '0',
'content-range': 'bytes */3'},
self.contentHeaders(request))
def test_singlePartiallyOverlappingRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single byte range that
partly overlaps the resource sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=2-10'
contentType = "text/plain"
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-type': 'text/plain', 'content-length': '1',
'content-range': 'bytes 2-2/3'},
self.contentHeaders(request))
def test_multipleRangeGivesMultipleRangeStaticProducer(self):
"""
makeProducer when the Range header requests a single byte range
returns an instance of MultipleRangeStaticProducer.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent('abcdef')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleRangeSets206PartialContent(self):
"""
makeProducer when the Range header requests a multiple satisfiable
byte ranges sets the response code on the request to 'Partial
Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
def test_mutipleRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests a single, satisfiable byte
range sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,5-6'
resource = self.makeResourceWithContent(
'abcdefghijkl', encoding='gzip')
producer = resource.makeProducer(request, resource.openForReading())
contentHeaders = self.contentHeaders(request)
# The only content-* headers set are content-type and content-length.
self.assertEqual(
set(['content-length', 'content-type']),
set(contentHeaders.keys()))
# The content-length depends on the boundary used in the response.
expectedLength = 5
for boundary, offset, size in producer.rangeInfo:
expectedLength += len(boundary)
self.assertEqual(expectedLength, contentHeaders['content-length'])
# Content-type should be set to a value indicating a multipart
# response and the boundary used to separate the parts.
self.assertIn('content-type', contentHeaders)
contentType = contentHeaders['content-type']
self.assertNotIdentical(
None, re.match(
'multipart/byteranges; boundary="[^"]*"\Z', contentType))
# Content-encoding is not set in the response to a multiple range
# response, which is a bit wussy but works well enough with the way
# static.File does content-encodings...
self.assertNotIn('content-encoding', contentHeaders)
def test_multipleUnsatisfiableRangesReturnsMultipleRangeStaticProducer(self):
"""
makeProducer still returns an instance of L{SingleRangeStaticProducer}
when the Range header requests multiple ranges, none of which are
satisfiable.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc')
producer = resource.makeProducer(request, resource.openForReading())
self.assertIsInstance(producer, static.MultipleRangeStaticProducer)
def test_multipleUnsatisfiableRangesSets416ReqestedRangeNotSatisfiable(self):
"""
makeProducer sets the response code of the request to of 'Requested
Range Not Satisfiable' when the Range header requests multiple ranges,
none of which are satisfiable.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode)
def test_multipleUnsatisfiableRangeSetsContentHeaders(self):
"""
makeProducer when the Range header requests multiple ranges, none of
which are satisfiable, sets the Content-* headers appropriately.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=4-10'
contentType = "text/plain"
request.headers['range'] = 'bytes=10-12,15-20'
resource = self.makeResourceWithContent('abc', type=contentType)
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
{'content-length': '0', 'content-range': 'bytes */3'},
self.contentHeaders(request))
def test_oneSatisfiableRangeIsEnough(self):
"""
makeProducer when the Range header requests multiple ranges, at least
one of which matches, sets the response code to 'Partial Content'.
"""
request = DummyRequest([])
request.headers['range'] = 'bytes=1-3,100-200'
resource = self.makeResourceWithContent('abcdef')
resource.makeProducer(request, resource.openForReading())
self.assertEqual(
http.PARTIAL_CONTENT, request.responseCode)
class StaticProducerTests(TestCase):
"""
Tests for the abstract L{StaticProducer}.
"""
def test_stopProducingClosesFile(self):
"""
L{StaticProducer.stopProducing} closes the file object the producer is
producing data from.
"""
fileObject = StringIO.StringIO()
producer = static.StaticProducer(None, fileObject)
producer.stopProducing()
self.assertTrue(fileObject.closed)
def test_stopProducingSetsRequestToNone(self):
"""
L{StaticProducer.stopProducing} sets the request instance variable to
None, which indicates to subclasses' resumeProducing methods that no
more data should be produced.
"""
fileObject = StringIO.StringIO()
producer = static.StaticProducer(DummyRequest([]), fileObject)
producer.stopProducing()
self.assertIdentical(None, producer.request)
class NoRangeStaticProducerTests(TestCase):
"""
Tests for L{NoRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{NoRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.NoRangeStaticProducer(None, None))
def test_resumeProducingProducesContent(self):
"""
L{NoRangeStaticProducer.resumeProducing} writes content from the
resource to the request.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual(content, ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{NoRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = 'a' * (2*bufferSize + 1)
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO(content))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
expected = [
content[0:bufferSize],
content[bufferSize:2*bufferSize],
content[2*bufferSize:]
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{NoRangeStaticProducer.resumeProducing} calls finish() on the request
after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.NoRangeStaticProducer(
request, StringIO.StringIO('abcdef'))
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class SingleRangeStaticProducerTests(TestCase):
"""
Tests for L{SingleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{SingleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.SingleRangeStaticProducer(None, None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{SingleRangeStaticProducer.resumeProducing} writes the given amount
of content, starting at the given offset, from the resource to the
request.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO(content), 1, 3)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual(content[1:4], ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{SingleRangeStaticProducer.start} writes at most
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
"""
request = DummyRequest([])
bufferSize = abstract.FileDescriptor.bufferSize
content = 'abc' * bufferSize
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO(content), 1, bufferSize+10)
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
content[1:bufferSize+1],
content[bufferSize+1:bufferSize+11],
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{SingleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.SingleRangeStaticProducer(
request, StringIO.StringIO('abcdef'), 1, 1)
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class MultipleRangeStaticProducerTests(TestCase):
"""
Tests for L{MultipleRangeStaticProducer}.
"""
def test_implementsIPullProducer(self):
"""
L{MultipleRangeStaticProducer} implements L{IPullProducer}.
"""
verifyObject(
interfaces.IPullProducer,
static.MultipleRangeStaticProducer(None, None, None))
def test_resumeProducingProducesContent(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} writes the requested
chunks of content from the resource to the request, with the supplied
boundaries in between each chunk.
"""
request = DummyRequest([])
content = 'abcdef'
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO(content), [('1', 1, 3), ('2', 5, 1)])
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
self.assertEqual('1bcd2f', ''.join(request.written))
def test_resumeProducingBuffersOutput(self):
"""
L{MultipleRangeStaticProducer.start} writes about
C{abstract.FileDescriptor.bufferSize} bytes of content from the
resource to the request at once.
To be specific about the 'about' above: it can write slightly more,
for example in the case where the first boundary plus the first chunk
is less than C{bufferSize} but first boundary plus the first chunk
plus the second boundary is more, but this is unimportant as in
practice the boundaries are fairly small. On the other side, it is
important for performance to bundle up several small chunks into one
call to request.write.
"""
request = DummyRequest([])
content = '0123456789' * 2
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO(content),
[('a', 0, 2), ('b', 5, 10), ('c', 0, 0)])
producer.bufferSize = 10
# DummyRequest.registerProducer pulls all output from the producer, so
# we just need to call start.
producer.start()
expected = [
'a' + content[0:2] + 'b' + content[5:11],
content[11:15] + 'c',
]
self.assertEqual(expected, request.written)
def test_finishCalledWhenDone(self):
"""
L{MultipleRangeStaticProducer.resumeProducing} calls finish() on the
request after it is done producing content.
"""
request = DummyRequest([])
finishDeferred = request.notifyFinish()
callbackList = []
finishDeferred.addCallback(callbackList.append)
producer = static.MultipleRangeStaticProducer(
request, StringIO.StringIO('abcdef'), [('', 1, 2)])
# start calls registerProducer on the DummyRequest, which pulls all
# output from the producer and so we just need this one call.
producer.start()
self.assertEqual([None], callbackList)
class RangeTests(TestCase):
"""
Tests for I{Range-Header} support in L{twisted.web.static.File}.
@type file: L{file}
@ivar file: Temporary (binary) file containing the content to be served.
@type resource: L{static.File}
@ivar resource: A leaf web resource using C{file} as content.
@type request: L{DummyRequest}
@ivar request: A fake request, requesting C{resource}.
@type catcher: L{list}
@ivar catcher: List which gathers all log information.
"""
def setUp(self):
"""
Create a temporary file with a fixed payload of 64 bytes. Create a
resource for that file and create a request which will be for that
resource. Each test can set a different range header to test different
aspects of the implementation.
"""
path = FilePath(self.mktemp())
# This is just a jumble of random stuff. It's supposed to be a good
# set of data for this test, particularly in order to avoid
# accidentally seeing the right result by having a byte sequence
# repeated at different locations or by having byte values which are
# somehow correlated with their position in the string.
self.payload = ('\xf8u\xf3E\x8c7\xce\x00\x9e\xb6a0y0S\xf0\xef\xac\xb7'
'\xbe\xb5\x17M\x1e\x136k{\x1e\xbe\x0c\x07\x07\t\xd0'
'\xbckY\xf5I\x0b\xb8\x88oZ\x1d\x85b\x1a\xcdk\xf2\x1d'
'&\xfd%\xdd\x82q/A\x10Y\x8b')
path.setContent(self.payload)
self.file = path.open()
self.resource = static.File(self.file.name)
self.resource.isLeaf = 1
self.request = DummyRequest([''])
self.request.uri = self.file.name
self.catcher = []
log.addObserver(self.catcher.append)
def tearDown(self):
"""
Clean up the resource file and the log observer.
"""
self.file.close()
log.removeObserver(self.catcher.append)
def _assertLogged(self, expected):
"""
Asserts that a given log message occurred with an expected message.
"""
logItem = self.catcher.pop()
self.assertEqual(logItem["message"][0], expected)
self.assertEqual(
self.catcher, [], "An additional log occured: %r" % (logItem,))
def test_invalidRanges(self):
"""
L{File._parseRangeHeader} raises L{ValueError} when passed
syntactically invalid byte ranges.
"""
f = self.resource._parseRangeHeader
# there's no =
self.assertRaises(ValueError, f, 'bytes')
# unknown isn't a valid Bytes-Unit
self.assertRaises(ValueError, f, 'unknown=1-2')
# there's no - in =stuff
self.assertRaises(ValueError, f, 'bytes=3')
# both start and end are empty
self.assertRaises(ValueError, f, 'bytes=-')
# start isn't an integer
self.assertRaises(ValueError, f, 'bytes=foo-')
# end isn't an integer
self.assertRaises(ValueError, f, 'bytes=-foo')
# end isn't equal to or greater than start
self.assertRaises(ValueError, f, 'bytes=5-4')
def test_rangeMissingStop(self):
"""
A single bytes range without an explicit stop position is parsed into a
two-tuple giving the start position and C{None}.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=0-'), [(0, None)])
def test_rangeMissingStart(self):
"""
A single bytes range without an explicit start position is parsed into
a two-tuple of C{None} and the end position.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=-3'), [(None, 3)])
def test_range(self):
"""
A single bytes range with explicit start and stop positions is parsed
into a two-tuple of those positions.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=2-5'), [(2, 5)])
def test_rangeWithSpace(self):
"""
A single bytes range with whitespace in allowed places is parsed in
the same way as it would be without the whitespace.
"""
self.assertEqual(
self.resource._parseRangeHeader(' bytes=1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes =1-2 '), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes= 1-2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1 -2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1- 2'), [(1, 2)])
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2 '), [(1, 2)])
def test_nullRangeElements(self):
"""
If there are multiple byte ranges but only one is non-null, the
non-null range is parsed and its start and stop returned.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2,\r\n, ,\t'), [(1, 2)])
def test_multipleRanges(self):
"""
If multiple byte ranges are specified their starts and stops are
returned.
"""
self.assertEqual(
self.resource._parseRangeHeader('bytes=1-2,3-4'),
[(1, 2), (3, 4)])
def test_bodyLength(self):
"""
A correct response to a range request is as long as the length of the
requested range.
"""
self.request.headers['range'] = 'bytes=0-43'
self.resource.render(self.request)
self.assertEqual(len(''.join(self.request.written)), 44)
def test_invalidRangeRequest(self):
"""
An incorrect range request (RFC 2616 defines a correct range request as
a Bytes-Unit followed by a '=' character followed by a specific range.
Only 'bytes' is defined) results in the range header value being logged
and a normal 200 response being sent.
"""
self.request.headers['range'] = range = 'foobar=0-43'
self.resource.render(self.request)
expected = "Ignoring malformed Range header %r" % (range,)
self._assertLogged(expected)
self.assertEqual(''.join(self.request.written), self.payload)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(
self.request.outgoingHeaders['content-length'],
str(len(self.payload)))
def parseMultipartBody(self, body, boundary):
"""
Parse C{body} as a multipart MIME response separated by C{boundary}.
Note that this with fail the calling test on certain syntactic
problems.
"""
sep = "\r\n--" + boundary
parts = ''.join(body).split(sep)
self.assertEqual('', parts[0])
self.assertEqual('--\r\n', parts[-1])
parsed_parts = []
for part in parts[1:-1]:
before, header1, header2, blank, partBody = part.split('\r\n', 4)
headers = header1 + '\n' + header2
self.assertEqual('', before)
self.assertEqual('', blank)
partContentTypeValue = re.search(
'^content-type: (.*)$', headers, re.I|re.M).group(1)
start, end, size = re.search(
'^content-range: bytes ([0-9]+)-([0-9]+)/([0-9]+)$',
headers, re.I|re.M).groups()
parsed_parts.append(
{'contentType': partContentTypeValue,
'contentRange': (start, end, size),
'body': partBody})
return parsed_parts
def test_multipleRangeRequest(self):
"""
The response to a request for multipe bytes ranges is a MIME-ish
multipart response.
"""
startEnds = [(0, 2), (20, 30), (40, 50)]
rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds])
self.request.headers['range'] = 'bytes=' + rangeHeaderValue
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
'^multipart/byteranges; boundary="(.*)"$',
self.request.outgoingHeaders['content-type']).group(1)
parts = self.parseMultipartBody(''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(self.resource.type, part['contentType'])
start, end, size = part['contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), e)
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part['body'])
def test_multipleRangeRequestWithRangeOverlappingEnd(self):
"""
The response to a request for multipe bytes ranges is a MIME-ish
multipart response, even when one of the ranged falls off the end of
the resource.
"""
startEnds = [(0, 2), (40, len(self.payload) + 10)]
rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds])
self.request.headers['range'] = 'bytes=' + rangeHeaderValue
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
boundary = re.match(
'^multipart/byteranges; boundary="(.*)"$',
self.request.outgoingHeaders['content-type']).group(1)
parts = self.parseMultipartBody(''.join(self.request.written), boundary)
self.assertEqual(len(startEnds), len(parts))
for part, (s, e) in zip(parts, startEnds):
self.assertEqual(self.resource.type, part['contentType'])
start, end, size = part['contentRange']
self.assertEqual(int(start), s)
self.assertEqual(int(end), min(e, self.resource.getFileSize()-1))
self.assertEqual(int(size), self.resource.getFileSize())
self.assertEqual(self.payload[s:e+1], part['body'])
def test_implicitEnd(self):
"""
If the end byte position is omitted, then it is treated as if the
length of the resource was specified by the end byte position.
"""
self.request.headers['range'] = 'bytes=23-'
self.resource.render(self.request)
self.assertEqual(''.join(self.request.written), self.payload[23:])
self.assertEqual(len(''.join(self.request.written)), 41)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 23-63/64')
self.assertEqual(self.request.outgoingHeaders['content-length'], '41')
def test_implicitStart(self):
"""
If the start byte position is omitted but the end byte position is
supplied, then the range is treated as requesting the last -N bytes of
the resource, where N is the end byte position.
"""
self.request.headers['range'] = 'bytes=-17'
self.resource.render(self.request)
self.assertEqual(''.join(self.request.written), self.payload[-17:])
self.assertEqual(len(''.join(self.request.written)), 17)
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 47-63/64')
self.assertEqual(self.request.outgoingHeaders['content-length'], '17')
def test_explicitRange(self):
"""
A correct response to a bytes range header request from A to B starts
with the A'th byte and ends with (including) the B'th byte. The first
byte of a page is numbered with 0.
"""
self.request.headers['range'] = 'bytes=3-43'
self.resource.render(self.request)
written = ''.join(self.request.written)
self.assertEqual(written, self.payload[3:44])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 3-43/64')
self.assertEqual(
str(len(written)), self.request.outgoingHeaders['content-length'])
def test_explicitRangeOverlappingEnd(self):
"""
A correct response to a bytes range header request from A to B when B
is past the end of the resource starts with the A'th byte and ends
with the last byte of the resource. The first byte of a page is
numbered with 0.
"""
self.request.headers['range'] = 'bytes=40-100'
self.resource.render(self.request)
written = ''.join(self.request.written)
self.assertEqual(written, self.payload[40:])
self.assertEqual(self.request.responseCode, http.PARTIAL_CONTENT)
self.assertEqual(
self.request.outgoingHeaders['content-range'], 'bytes 40-63/64')
self.assertEqual(
str(len(written)), self.request.outgoingHeaders['content-length'])
def test_statusCodeRequestedRangeNotSatisfiable(self):
"""
If a range is syntactically invalid due to the start being greater than
the end, the range header is ignored (the request is responded to as if
it were not present).
"""
self.request.headers['range'] = 'bytes=20-13'
self.resource.render(self.request)
self.assertEqual(self.request.responseCode, http.OK)
self.assertEqual(''.join(self.request.written), self.payload)
self.assertEqual(
self.request.outgoingHeaders['content-length'],
str(len(self.payload)))
def test_invalidStartBytePos(self):
"""
If a range is unsatisfiable due to the start not being less than the
length of the resource, the response is 416 (Requested range not
satisfiable) and no data is written to the response body (RFC 2616,
section 14.35.1).
"""
self.request.headers['range'] = 'bytes=67-108'
self.resource.render(self.request)
self.assertEqual(
self.request.responseCode, http.REQUESTED_RANGE_NOT_SATISFIABLE)
self.assertEqual(''.join(self.request.written), '')
self.assertEqual(self.request.outgoingHeaders['content-length'], '0')
# Sections 10.4.17 and 14.16
self.assertEqual(
self.request.outgoingHeaders['content-range'],
'bytes */%d' % (len(self.payload),))
class DirectoryListerTest(TestCase):
"""
Tests for L{static.DirectoryLister}.
"""
def _request(self, uri):
request = DummyRequest([''])
request.uri = uri
return request
def test_renderHeader(self):
"""
L{static.DirectoryLister} prints the request uri as header of the
rendered content.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
self.assertIn("<h1>Directory listing for foo</h1>", data)
self.assertIn("<title>Directory listing for foo</title>", data)
def test_renderUnquoteHeader(self):
"""
L{static.DirectoryLister} unquote the request uri before printing it.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo%20bar'))
self.assertIn("<h1>Directory listing for foo bar</h1>", data)
self.assertIn("<title>Directory listing for foo bar</title>", data)
def test_escapeHeader(self):
"""
L{static.DirectoryLister} escape "&", "<" and ">" after unquoting the
request uri.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo%26bar'))
self.assertIn("<h1>Directory listing for foo&bar</h1>", data)
self.assertIn("<title>Directory listing for foo&bar</title>", data)
def test_renderFiles(self):
"""
L{static.DirectoryLister} is able to list all the files inside a
directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1').setContent("content1")
path.child('file2').setContent("content2" * 1000)
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="file1">file1</a></td>
<td>8B</td>
<td>[text/html]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="file2">file2</a></td>
<td>7K</td>
<td>[text/html]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderDirectories(self):
"""
L{static.DirectoryLister} is able to list all the directories inside
a directory.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2 & 3').makedirs()
lister = static.DirectoryLister(path.path)
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir2%20%26%203/">dir2 & 3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_renderFiltered(self):
"""
L{static.DirectoryLister} takes a optional C{dirs} argument that
filter out the list of of directories and files printed.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('dir1').makedirs()
path.child('dir2').makedirs()
path.child('dir3').makedirs()
lister = static.DirectoryLister(path.path, dirs=["dir1", "dir3"])
data = lister.render(self._request('foo'))
body = """<tr class="odd">
<td><a href="dir1/">dir1/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>
<tr class="even">
<td><a href="dir3/">dir3/</a></td>
<td></td>
<td>[Directory]</td>
<td></td>
</tr>"""
self.assertIn(body, data)
def test_oddAndEven(self):
"""
L{static.DirectoryLister} gives an alternate class for each odd and
even rows in the table.
"""
lister = static.DirectoryLister(None)
elements = [{"href": "", "text": "", "size": "", "type": "",
"encoding": ""} for i in xrange(5)]
content = lister._buildTableContent(elements)
self.assertEqual(len(content), 5)
self.assertTrue(content[0].startswith('<tr class="odd">'))
self.assertTrue(content[1].startswith('<tr class="even">'))
self.assertTrue(content[2].startswith('<tr class="odd">'))
self.assertTrue(content[3].startswith('<tr class="even">'))
self.assertTrue(content[4].startswith('<tr class="odd">'))
def test_contentType(self):
"""
L{static.DirectoryLister} produces a MIME-type that indicates that it is
HTML, and includes its charset (UTF-8).
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
req = self._request('')
lister.render(req)
self.assertEqual(req.outgoingHeaders['content-type'],
"text/html; charset=utf-8")
def test_mimeTypeAndEncodings(self):
"""
L{static.DirectoryLister} is able to detect mimetype and encoding of
listed files.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child('file1.txt').setContent("file1")
path.child('file2.py').setContent("python")
path.child('file3.conf.gz').setContent("conf compressed")
path.child('file4.diff.bz2').setContent("diff compressed")
directory = os.listdir(path.path)
directory.sort()
contentTypes = {
".txt": "text/plain",
".py": "text/python",
".conf": "text/configuration",
".diff": "text/diff"
}
lister = static.DirectoryLister(path.path, contentTypes=contentTypes)
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [
{'encoding': '',
'href': 'file1.txt',
'size': '5B',
'text': 'file1.txt',
'type': '[text/plain]'},
{'encoding': '',
'href': 'file2.py',
'size': '6B',
'text': 'file2.py',
'type': '[text/python]'},
{'encoding': '[gzip]',
'href': 'file3.conf.gz',
'size': '15B',
'text': 'file3.conf.gz',
'type': '[text/configuration]'},
{'encoding': '[bzip2]',
'href': 'file4.diff.bz2',
'size': '15B',
'text': 'file4.diff.bz2',
'type': '[text/diff]'}])
def test_brokenSymlink(self):
"""
If on the file in the listing points to a broken symlink, it should not
be returned by L{static.DirectoryLister._getFilesAndDirectories}.
"""
path = FilePath(self.mktemp())
path.makedirs()
file1 = path.child('file1')
file1.setContent("file1")
file1.linkTo(path.child("file2"))
file1.remove()
lister = static.DirectoryLister(path.path)
directory = os.listdir(path.path)
directory.sort()
dirs, files = lister._getFilesAndDirectories(directory)
self.assertEqual(dirs, [])
self.assertEqual(files, [])
if getattr(os, "symlink", None) is None:
test_brokenSymlink.skip = "No symlink support"
def test_childrenNotFound(self):
"""
Any child resource of L{static.DirectoryLister} renders an HTTP
I{NOT FOUND} response code.
"""
path = FilePath(self.mktemp())
path.makedirs()
lister = static.DirectoryLister(path.path)
request = self._request('')
child = resource.getChildForRequest(lister, request)
result = _render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, http.NOT_FOUND)
result.addCallback(cbRendered)
return result
def test_repr(self):
"""
L{static.DirectoryLister.__repr__} gives the path of the lister.
"""
path = FilePath(self.mktemp())
lister = static.DirectoryLister(path.path)
self.assertEqual(repr(lister),
"<DirectoryLister of %r>" % (path.path,))
self.assertEqual(str(lister),
"<DirectoryLister of %r>" % (path.path,))
def test_formatFileSize(self):
"""
L{static.formatFileSize} format an amount of bytes into a more readable
format.
"""
self.assertEqual(static.formatFileSize(0), "0B")
self.assertEqual(static.formatFileSize(123), "123B")
self.assertEqual(static.formatFileSize(4567), "4K")
self.assertEqual(static.formatFileSize(8900000), "8M")
self.assertEqual(static.formatFileSize(1234000000), "1G")
self.assertEqual(static.formatFileSize(1234567890000), "1149G")
class LoadMimeTypesTests(TestCase):
"""
Tests for the MIME type loading routine.
@cvar UNSET: A sentinel to signify that C{self.paths} has not been set by
the mock init.
"""
UNSET = object()
def setUp(self):
self.paths = self.UNSET
def _fakeInit(self, paths):
"""
A mock L{mimetypes.init} that records the value of the passed C{paths}
argument.
@param paths: The paths that will be recorded.
"""
self.paths = paths
def test_defaultArgumentIsNone(self):
"""
By default, C{None} is passed to C{mimetypes.init}.
"""
static.loadMimeTypes(init=self._fakeInit)
self.assertIdentical(self.paths, None)
def test_extraLocationsWork(self):
"""
Passed MIME type files are passed to C{mimetypes.init}.
"""
paths = ["x", "y", "z"]
static.loadMimeTypes(paths, init=self._fakeInit)
self.assertIdentical(self.paths, paths)
def test_usesGlobalInitFunction(self):
"""
By default, C{mimetypes.init} is called.
"""
# Checking mimetypes.inited doesn't always work, because
# something, somewhere, calls mimetypes.init. Yay global
# mutable state :)
args, _, _, defaults = inspect.getargspec(static.loadMimeTypes)
defaultInit = defaults[args.index("init")]
self.assertIdentical(defaultInit, mimetypes.init)
| bsd-3-clause |
liukaijv/XlsxWriter | xlsxwriter/test/worksheet/test_worksheet03.py | 8 | 4143 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...format import Format
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with column formatting set."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
worksheet.set_column(1, 3, 5)
worksheet.set_column(5, 5, 8, None, {'hidden': True})
worksheet.set_column(7, 7, None, cell_format)
worksheet.set_column(9, 9, 2)
worksheet.set_column(11, 11, None, None, {'hidden': True})
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="F1:H1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<cols>
<col min="2" max="4" width="5.7109375" customWidth="1"/>
<col min="6" max="6" width="8.7109375" hidden="1" customWidth="1"/>
<col min="8" max="8" width="9.140625" style="1"/>
<col min="10" max="10" width="2.7109375" customWidth="1"/>
<col min="12" max="12" width="0" hidden="1" customWidth="1"/>
</cols>
<sheetData/>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
def test_assemble_xml_file_A1(self):
"""
Test writing a worksheet with column formatting set using
A1 Notation.
"""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
worksheet.set_column('B:D', 5)
worksheet.set_column('F:F', 8, None, {'hidden': True})
worksheet.set_column('H:H', None, cell_format)
worksheet.set_column('J:J', 2)
worksheet.set_column('L:L', None, None, {'hidden': True})
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="F1:H1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<cols>
<col min="2" max="4" width="5.7109375" customWidth="1"/>
<col min="6" max="6" width="8.7109375" hidden="1" customWidth="1"/>
<col min="8" max="8" width="9.140625" style="1"/>
<col min="10" max="10" width="2.7109375" customWidth="1"/>
<col min="12" max="12" width="0" hidden="1" customWidth="1"/>
</cols>
<sheetData/>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| bsd-2-clause |
petertodd/bitcoin | contrib/devtools/update-translations.py | 54 | 2334 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
- remove 'unfinished' translation items
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'bitcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def postprocess_translations():
print('Postprocessing...')
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts') or filename == SOURCE_LANG:
continue
filepath = os.path.join(LOCALE_DIR, filename)
with open(filepath, 'rb') as f:
data = f.read()
# remove non-allowed control characters
data = re.sub('[\x00-\x09\x0b\x0c\x0e-\x1f]', '', data)
data = data.split('\n')
# strip locations from non-origin translation
# location tags are used to guide translators, they are not necessary for compilation
# TODO: actually process XML instead of relying on Transifex's one-tag-per-line output format
data = [line for line in data if not '<location' in line]
with open(filepath, 'wb') as f:
f.write('\n'.join(data))
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| mit |
traveloka/ansible | lib/ansible/modules/network/eos/eos_config.py | 25 | 12504 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = """
---
module: eos_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage Arista EOS configuration sections
description:
- Arista EOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with eos configuration sections in
a deterministic way. This module works with either CLI or eAPI
transports.
extends_documentation_fragment: eos
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block', 'config']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: ['yes', 'no']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
defaults:
description:
- The I(defaults) argument will influence how the running-config
is collected from the device. When the value is set to true,
the command used to collect the running-config is append with
the all keyword. When the value is set to false, the command
is issued without the all keyword
required: false
default: false
version_added: "2.2"
save:
description:
- The C(save) argument instructs the module to save the
running-config to startup-config. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
startup config. This option will always cause the module to
return changed.
required: false
default: false
version_added: "2.2"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
- eos_config:
lines: hostname {{ inventory_hostname }}
provider: "{{ cli }}"
- eos_config:
lines:
- 10 permit ip 1.1.1.1/32 any log
- 20 permit ip 2.2.2.2/32 any log
- 30 permit ip 3.3.3.3/32 any log
- 40 permit ip 4.4.4.4/32 any log
- 50 permit ip 5.5.5.5/32 any log
parents: ip access-list test
before: no ip access-list test
match: exact
provider: "{{ cli }}"
- eos_config:
lines:
- 10 permit ip 1.1.1.1/32 any log
- 20 permit ip 2.2.2.2/32 any log
- 30 permit ip 3.3.3.3/32 any log
- 40 permit ip 4.4.4.4/32 any log
parents: ip access-list test
before: no ip access-list test
replace: block
provider: "{{ cli }}"
- name: load configuration from file
eos_config:
src: eos.cfg
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/eos_config.2016-07-16@22:28:34
"""
import time
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.eos import NetworkModule, NetworkError
from ansible.module_utils.basic import get_exception
def check_args(module, warnings):
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
'removed in the future')
if not module.connection.supports_sessions():
warnings.append('The current version of EOS on the remote device does '
'not support configuration sessions. The commit '
'argument will be ignored')
def get_candidate(module):
candidate = NetworkConfig(indent=3)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def get_config(module, defaults=False):
contents = module.params['config']
if not contents:
defaults = module.params['defaults']
contents = module.config.get_config(include_defaults=defaults)
return NetworkConfig(indent=3, contents=contents)
def load_config(module, commands, result):
replace = module.params['replace'] == 'config'
commit = not module.check_mode
diff = module.config.load_config(commands, replace=replace, commit=commit)
if diff and module.connection.supports_sessions():
result['diff'] = dict(prepared=diff)
result['changed'] = True
elif diff:
result['changed'] = True
def run(module, result):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
config = get_config(module)
configobjs = candidate.difference(config, match=match, replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['updates'] = commands
module.log('commands: %s' % commands)
load_config(module, commands, result)
if module.params['save']:
if not module.check_mode:
module.config.save_config()
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
# this argument is deprecated in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
config=dict(),
defaults=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(default=False, type='bool'),
)
mutually_exclusive = [('lines', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['src'])]
module = NetworkModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
if module.params['force'] is True:
module.params['match'] = 'none'
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
run(module, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
0k/odoo | addons/web_tip/__openerp__.py | 31 | 1291 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Tips',
'category': 'Usability',
'description': """
OpenERP Web tips.
========================
""",
'version': '0.1',
'author': 'OpenERP SA',
'depends': ['web'],
'data': [
'security/ir.model.access.csv',
'views/tip.xml',
'web_tip_view.xml'
],
'auto_install': True
}
| agpl-3.0 |
alex/warehouse | tests/unit/test_sessions.py | 1 | 21090 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import msgpack
import redis
import pretend
import pytest
from pyramid import viewderivers
import warehouse.sessions
from warehouse.sessions import (
InvalidSession, Session, SessionFactory, includeme, session_view,
)
from warehouse.utils import crypto
class TestInvalidSession:
@pytest.mark.parametrize(
"method",
[
# IDict methods
"__contains__",
"__delitem__",
"__getitem__",
"__iter__",
"__len__",
"__setitem__",
"clear",
"copy",
"fromkeys",
"get",
"items",
"keys",
"pop",
"popitem",
"setdefault",
"update",
"values",
# ISession methods
"invalidate",
"flash",
"changed",
"get_csrf_token",
"peek_flash",
"new_csrf_token",
"pop_flash",
# Our custom methods.
"should_save",
],
)
def test_methods_raise(self, method):
session = InvalidSession()
with pytest.raises(RuntimeError):
getattr(session, method)()
@pytest.mark.parametrize("name", ["created", "new", "sid"])
def test_propery_raises(self, name):
session = InvalidSession()
with pytest.raises(RuntimeError):
getattr(session, name)
class TestSession:
@pytest.mark.parametrize(
("data", "expected"),
[
(None, {}),
({}, {}),
({"foo": "bar"}, {"foo": "bar"}),
]
)
def test_create_new(self, monkeypatch, data, expected):
monkeypatch.setattr(time, "time", lambda: 100)
monkeypatch.setattr(crypto, "random_token", lambda: "123456")
session = Session(data)
assert session == expected
assert session.sid == "123456"
assert session.new
assert session.created == 100
assert not session.invalidated
@pytest.mark.parametrize(
("data", "expected", "new"),
[
(None, {}, True),
({}, {}, True),
({"foo": "bar"}, {"foo": "bar"}, True),
(None, {}, False),
({}, {}, False),
({"foo": "bar"}, {"foo": "bar"}, False),
]
)
def test_create_with_session_id(self, monkeypatch, data, expected, new):
monkeypatch.setattr(time, "time", lambda: 100)
session = Session(data, "wat", new)
assert session == expected
assert session.sid == "wat"
assert session.new is new
assert session.created == 100
assert not session.invalidated
def test_changed_marks_as_changed(self):
session = Session()
assert not session._changed
session.changed()
assert session._changed
def test_invalidate(self, monkeypatch):
session_ids = iter(["123456", "7890"])
monkeypatch.setattr(crypto, "random_token", lambda: next(session_ids))
session = Session({"foo": "bar"}, "original id", False)
assert session == {"foo": "bar"}
assert session.sid == "original id"
assert not session.new
assert not session.invalidated
session.invalidate()
assert session == {}
assert session.sid == "123456"
assert session.new
assert session.invalidated == {"original id"}
session.invalidate()
assert session == {}
assert session.sid == "7890"
assert session.new
assert session.invalidated == {"original id", "123456"}
def test_invalidate_empty(self):
session = Session({"foo": "bar"})
session.invalidate()
assert session == {}
assert session.invalidated == set()
def test_should_save(self):
session = Session()
assert not session.should_save()
session.changed()
assert session.should_save()
@pytest.mark.parametrize(
("data", "method", "args"),
[
({"foo": "bar"}, "__delitem__", ["foo"]),
({}, "__setitem__", ["foo", "bar"]),
({}, "clear", []),
({"foo": "bar"}, "pop", ["foo"]),
({"foo": "bar"}, "popitem", []),
({}, "setdefault", ["foo", "bar"]),
({}, "update", [{"foo": "bar"}]),
],
)
def test_methods_call_changed(self, data, method, args):
session = Session(data)
session.changed = pretend.call_recorder(lambda: None)
getattr(session, method)(*args)
assert session.changed.calls == [pretend.call()]
@pytest.mark.parametrize(
("queue", "expected"),
[
(None, "_flash_messages"),
("foobar", "_flash_messages.foobar"),
],
)
def test_generate_flash_key(self, queue, expected):
session = Session()
assert session._get_flash_queue_key(queue) == expected
def test_flash_messages(self):
session = Session()
assert session.peek_flash() == []
assert session.peek_flash(queue="foo") == []
assert session.pop_flash() == []
assert session.pop_flash(queue="foo") == []
session.flash("A Flash Message")
assert session.peek_flash() == ["A Flash Message"]
assert session.peek_flash(queue="foo") == []
session.flash("Another Flash Message", queue="foo")
assert session.peek_flash() == ["A Flash Message"]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message")
assert session.peek_flash() == ["A Flash Message", "A Flash Message"]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message", allow_duplicate=True)
assert session.peek_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
session.flash("A Flash Message", allow_duplicate=False)
assert session.peek_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.peek_flash(queue="foo") == ["Another Flash Message"]
assert session.pop_flash() == [
"A Flash Message",
"A Flash Message",
"A Flash Message",
]
assert session.pop_flash(queue="foo") == ["Another Flash Message"]
assert session.peek_flash() == []
assert session.peek_flash(queue="foo") == []
assert session.pop_flash() == []
assert session.pop_flash(queue="foo") == []
def test_csrf_token(self, monkeypatch):
tokens = iter(["123456", "7890"])
monkeypatch.setattr(crypto, "random_token", lambda: next(tokens))
session = Session()
assert session._csrf_token_key not in session
assert session.new_csrf_token() == "123456"
assert session._csrf_token_key in session
assert session.get_csrf_token() == "123456"
assert session.new_csrf_token() == "7890"
assert session._csrf_token_key in session
assert session.get_csrf_token() == "7890"
def test_get_csrf_token_empty(self):
session = Session()
session.new_csrf_token = pretend.call_recorder(lambda: "123456")
assert session.get_csrf_token() == "123456"
assert session.new_csrf_token.calls == [pretend.call()]
class TestSessionFactory:
def test_initialize(self, monkeypatch):
timestamp_signer_obj = pretend.stub()
timestamp_signer_create = pretend.call_recorder(
lambda secret, salt: timestamp_signer_obj
)
monkeypatch.setattr(crypto, "TimestampSigner", timestamp_signer_create)
strict_redis_obj = pretend.stub()
strict_redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda url: strict_redis_obj),
)
monkeypatch.setattr(redis, "StrictRedis", strict_redis_cls)
session_factory = SessionFactory("mysecret", "my url")
assert session_factory.signer is timestamp_signer_obj
assert session_factory.redis is strict_redis_obj
assert timestamp_signer_create.calls == [
pretend.call("mysecret", salt="session"),
]
assert strict_redis_cls.from_url.calls == [pretend.call("my url")]
def test_redis_key(self):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
assert session_factory._redis_key("my_session_id") == \
"warehouse/session/data/my_session_id"
def test_no_current_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_invalid_session_id(self, pyramid_request):
pyramid_request.cookies["session_id"] = "invalid!"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_no_data(self, pyramid_request):
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: None),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_invalid_data(self, pyramid_request):
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: b"invalid data"),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert isinstance(session, Session)
assert session._sid is None
assert session.new
def test_valid_session_id_valid_data(self, monkeypatch, pyramid_request):
msgpack_unpackb = pretend.call_recorder(
lambda bdata, encoding, use_list: {"foo": "bar"}
)
monkeypatch.setattr(msgpack, "unpackb", msgpack_unpackb)
pyramid_request.cookies["session_id"] = "123456"
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.signer.unsign = pretend.call_recorder(
lambda session_id, max_age: b"123456"
)
session_factory.redis = pretend.stub(
get=pretend.call_recorder(lambda key: b"valid data"),
)
session_factory._process_response = pretend.stub()
session = session_factory(pyramid_request)
assert len(pyramid_request.response_callbacks) == 1
assert pyramid_request.response_callbacks[0] is \
session_factory._process_response
assert session_factory.signer.unsign.calls == [
pretend.call("123456", max_age=12 * 60 * 60),
]
assert session_factory.redis.get.calls == [
pretend.call("warehouse/session/data/123456"),
]
assert msgpack_unpackb.calls == [
pretend.call(b"valid data", encoding="utf8", use_list=True),
]
assert isinstance(session, Session)
assert session == {"foo": "bar"}
assert session.sid == "123456"
assert not session.new
def test_no_save_invalid_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub()
pyramid_request.session = InvalidSession()
response = pretend.stub()
session_factory._process_response(pyramid_request, response)
def test_noop_unused_session(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub()
pyramid_request.session.invalidated = set()
pyramid_request.session.should_save = pretend.call_recorder(
lambda: False
)
response = pretend.stub()
session_factory._process_response(pyramid_request, response)
assert pyramid_request.session.should_save.calls == [pretend.call()]
def test_invalidated_deletes_no_save(self, pyramid_request):
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub(
delete=pretend.call_recorder(lambda key: None)
)
pyramid_request.session.invalidated = ["1", "2"]
pyramid_request.session.should_save = pretend.call_recorder(
lambda: False
)
response = pretend.stub(
delete_cookie=pretend.call_recorder(lambda cookie: None),
)
session_factory._process_response(pyramid_request, response)
assert session_factory.redis.delete.calls == [
pretend.call("warehouse/session/data/1"),
pretend.call("warehouse/session/data/2"),
]
assert pyramid_request.session.should_save.calls == [
pretend.call(),
pretend.call(),
]
assert response.delete_cookie.calls == [pretend.call("session_id")]
def test_invalidated_deletes_save_non_secure(self, monkeypatch,
pyramid_request):
msgpack_packb = pretend.call_recorder(
lambda data, encoding, use_bin_type: b"msgpack data"
)
monkeypatch.setattr(msgpack, "packb", msgpack_packb)
session_factory = SessionFactory(
"mysecret", "redis://redis://localhost:6379/0",
)
session_factory.redis = pretend.stub(
delete=pretend.call_recorder(lambda key: None),
setex=pretend.call_recorder(lambda key, age, data: None),
)
session_factory.signer.sign = pretend.call_recorder(
lambda data: "cookie data"
)
pyramid_request.scheme = "http"
pyramid_request.session.sid = "123456"
pyramid_request.session.invalidated = ["1", "2"]
pyramid_request.session.should_save = pretend.call_recorder(
lambda: True
)
response = pretend.stub(
set_cookie=pretend.call_recorder(
lambda cookie, data, max_age, httponly, secure: None
)
)
session_factory._process_response(pyramid_request, response)
assert session_factory.redis.delete.calls == [
pretend.call("warehouse/session/data/1"),
pretend.call("warehouse/session/data/2"),
]
assert msgpack_packb.calls == [
pretend.call(
pyramid_request.session,
encoding="utf8",
use_bin_type=True,
),
]
assert session_factory.redis.setex.calls == [
pretend.call(
"warehouse/session/data/123456",
12 * 60 * 60,
b"msgpack data",
),
]
assert pyramid_request.session.should_save.calls == [
pretend.call(),
pretend.call(),
]
assert session_factory.signer.sign.calls == [pretend.call(b"123456")]
assert response.set_cookie.calls == [
pretend.call(
"session_id",
"cookie data",
max_age=12 * 60 * 60,
httponly=True,
secure=False,
),
]
class TestSessionView:
def test_has_options(self):
assert set(session_view.options) == {"uses_session"}
@pytest.mark.parametrize("uses_session", [False, None])
def test_invalid_session(self, uses_session):
context = pretend.stub()
request = pretend.stub(session=pretend.stub())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.session, InvalidSession)
return response
info = pretend.stub(options={}, exception_only=False)
if uses_session is not None:
info.options["uses_session"] = uses_session
derived_view = session_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
def test_valid_session(self, monkeypatch):
add_vary_cb = pretend.call_recorder(lambda fn: fn)
add_vary = pretend.call_recorder(lambda vary: add_vary_cb)
monkeypatch.setattr(warehouse.sessions, "add_vary", add_vary)
context = pretend.stub()
request = pretend.stub(session=Session())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.session, Session)
return response
info = pretend.stub(options={"uses_session": True})
derived_view = session_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
assert add_vary.calls == [pretend.call("Cookie")]
assert add_vary_cb.calls == [pretend.call(view)]
def test_includeme(monkeypatch):
session_factory_obj = pretend.stub()
session_factory_cls = pretend.call_recorder(
lambda secret, url: session_factory_obj
)
monkeypatch.setattr(
warehouse.sessions,
"SessionFactory",
session_factory_cls,
)
config = pretend.stub(
set_session_factory=pretend.call_recorder(lambda factory: None),
registry=pretend.stub(
settings={
"sessions.secret": "my secret",
"sessions.url": "my url",
},
),
add_view_deriver=pretend.call_recorder(lambda *a, **kw: None),
)
includeme(config)
assert config.set_session_factory.calls == [
pretend.call(session_factory_obj),
]
assert session_factory_cls.calls == [pretend.call("my secret", "my url")]
assert config.add_view_deriver.calls == [
pretend.call(
session_view,
over="csrf_view",
under=viewderivers.INGRESS,
),
]
| apache-2.0 |
tensorflow/tensorflow | tensorflow/python/ops/structured/structured_array_ops.py | 6 | 20742 | # Lint as python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""StructuredTensor array ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Sequence
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged.row_partition import RowPartition
from tensorflow.python.ops.structured.structured_tensor import StructuredTensor
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
@dispatch.dispatch_for_types(array_ops.expand_dims, StructuredTensor)
@deprecation.deprecated_args(None, 'Use the `axis` argument instead', 'dim')
def expand_dims(input, axis=None, name=None, dim=None): # pylint: disable=redefined-builtin
"""Creates a StructuredTensor with a length 1 axis inserted at index `axis`.
This is an implementation of tf.expand_dims for StructuredTensor. Note
that the `axis` must be less than or equal to rank.
>>> st = StructuredTensor.from_pyval([[{"x": 1}, {"x": 2}], [{"x": 3}]])
>>> tf.expand_dims(st, 0).to_pyval()
[[[{'x': 1}, {'x': 2}], [{'x': 3}]]]
>>> tf.expand_dims(st, 1).to_pyval()
[[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, 2).to_pyval()
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
Args:
input: the original StructuredTensor.
axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank`
name: the name of the op.
dim: deprecated: use axis.
Returns:
a new structured tensor with larger rank.
Raises:
an error if `axis < -(rank + 1)` or `rank < axis`.
"""
axis = deprecation.deprecated_argument_lookup('axis', axis, 'dim', dim)
return _expand_dims_impl(input, axis, name=name)
@dispatch.dispatch_for_types(array_ops.expand_dims_v2, StructuredTensor)
def expand_dims_v2(input, axis, name=None): # pylint: disable=redefined-builtin
"""Creates a StructuredTensor with a length 1 axis inserted at index `axis`.
This is an implementation of tf.expand_dims for StructuredTensor. Note
that the `axis` must be less than or equal to rank.
>>> st = StructuredTensor.from_pyval([[{"x": 1}, {"x": 2}], [{"x": 3}]])
>>> tf.expand_dims(st, 0).to_pyval()
[[[{'x': 1}, {'x': 2}], [{'x': 3}]]]
>>> tf.expand_dims(st, 1).to_pyval()
[[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, 2).to_pyval()
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
Args:
input: the original StructuredTensor.
axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank`
name: the name of the op.
Returns:
a new structured tensor with larger rank.
Raises:
an error if `axis < -(rank + 1)` or `rank < axis`.
"""
return _expand_dims_impl(input, axis, name=name)
@dispatch.dispatch_for_types(array_ops.gather, StructuredTensor)
def gather(params,
indices,
validate_indices=None,
name=None,
axis=None,
batch_dims=0):
"""tf.gather for structured tensors.
Does not support (yet) checks on illegal axis values, et cetera.
Indices must be a ragged or dense tensor.
Args:
params: a structured tensor to be gathered
indices: a ragged tensor or tensor to gather by.
validate_indices: whether to validate the indices
name: the name of the op(s).
axis: the axis in params to gather on.
batch_dims: the number of batch dimensions.
Returns:
the params reorganized according to indices.
"""
if name is None:
name = 'gather'
with ops.name_scope(name):
if axis is None:
axis = batch_dims
axis = array_ops.get_positive_axis(axis, params.shape.rank,
ndims_name='params.shape.rank')
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices')
def leaf_op(p):
return array_ops.gather(
p,
indices,
validate_indices=validate_indices,
axis=axis,
batch_dims=batch_dims,
name=None)
return _extend_op_single(params, leaf_op)
@dispatch.dispatch_for_types(array_ops.concat, StructuredTensor)
def concat(values, axis, name: str = 'concat'):
"""tf.concat for structured tensors.
Does not support (yet) checks on illegal axis values, et cetera.
Args:
values: a sequence of StructuredTensors.
axis: an axis to concatenate upon.
name: the name of the op(s).
Returns:
the params reorganized according to indices.
"""
if name is None:
name = 'concat'
_assert_concat_compatible_structured_tensors(values)
def leaf_op(values):
return array_ops.concat(values, axis)
# TODO(martinz): handle axis when it is a tensor.
axis = array_ops.get_positive_axis(axis, values[0].rank)
with ops.name_scope(name, 'StructuredConcat', values):
return _extend_op(values, leaf_op)
@dispatch.dispatch_for_types(random_ops.random_shuffle, StructuredTensor)
def random_shuffle(value, seed=None, name=None):
"""Shuffle a structured tensor on the zeroth axis.
Args:
value: a structured tensor of rank at least one.
seed: the seed for shuffling.
name: the name for shuffle.
Returns:
The shuffled structured tensor.
"""
with ops.name_scope(name, 'shuffle', [value, seed]):
if value.rank == 0:
raise ValueError('Cannot shuffle a scalar StructuredTensor')
first_dimension = value.nrows()
index = random_ops.random_shuffle(math_ops.range(first_dimension),
seed=seed)
return gather(value, index, axis=0)
@dispatch.dispatch_for_types(array_ops.size_v2, StructuredTensor)
def size_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor."""
return size(input, name=name, out_type=out_type)
# pylint: disable=protected-access
@dispatch.dispatch_for_types(array_ops.size, StructuredTensor)
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor."""
with ops.name_scope(name, 'size', [input]) as name:
if not input._row_partitions:
if input._nrows is not None:
return math_ops.cast(input._nrows, out_type) # vector.
else:
return math_ops.cast(1, out_type) # scalar.
# 2D and up.
last_row_partition = input._row_partitions[-1]
return last_row_partition.nvals(out_type)
# pylint: disable=protected-access
@dispatch.dispatch_for_types(array_ops.zeros_like, StructuredTensor)
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Implementation of zeros_like for StructuredTensor for TF v1."""
del optimize
return zeros_like_v2(tensor, dtype=dtype, name=name)
# pylint: disable=protected-access
@dispatch.dispatch_for_types(array_ops.zeros_like_v2, StructuredTensor)
def zeros_like_v2(input, dtype=None, name=None): # pylint: disable=redefined-builtin
"""Replace every object with a zero.
Example:
>>> st = StructuredTensor.from_pyval([{"x":[3]}, {"x":[4,5]}])
>>> tf.zeros_like(st)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([0.0, 0.0], dtype=float32)>
>>> st = StructuredTensor.from_pyval([[{"x":[3]}], [{"x":[4,5]}, {"x":[]}]])
>>> tf.zeros_like(st, dtype=tf.int32)
<tf.RaggedTensor [[0], [0, 0]]>
Args:
input: a structured tensor.
dtype: the dtype of the resulting zeros. (default is tf.float32)
name: a name for the op.
Returns:
a tensor of zeros of the same shape.
"""
if dtype is None:
dtype = dtypes.float32
with ops.name_scope(name, 'zeros_like', [input]) as name:
if not input._row_partitions:
if input._nrows is not None:
return array_ops.zeros([input._nrows], dtype) # vector.
else:
return array_ops.zeros([], dtype) # scalar.
# 2D and up.
last_row_partition = input._row_partitions[-1]
result = ragged_tensor.RaggedTensor._from_nested_row_partitions(
array_ops.zeros(last_row_partition.nvals(), dtype=dtype),
input._row_partitions)
return result
# pylint: disable=protected-access
@dispatch.dispatch_for_types(array_ops.ones_like, StructuredTensor)
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Implementation of zeros_like for StructuredTensor for TF v1."""
del optimize
return ones_like_v2(tensor, dtype=dtype, name=name)
# pylint: disable=protected-access
@dispatch.dispatch_for_types(array_ops.ones_like_v2, StructuredTensor)
def ones_like_v2(input, dtype=None, name=None): # pylint: disable=redefined-builtin
"""Replace every object with a zero.
Example:
>>> st = StructuredTensor.from_pyval([{"x":[3]}, {"x":[4,5]}])
>>> tf.ones_like(st)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1.0, 1.0], dtype=float32)>
>>> st = StructuredTensor.from_pyval([[{"x":[3]}], [{"x":[4,5]}, {"x":[]}]])
>>> tf.ones_like(st, dtype=tf.int32)
<tf.RaggedTensor [[1], [1, 1]]>
Args:
input: a structured tensor.
dtype: the dtype of the resulting zeros. (default is tf.float32)
name: a name for the op.
Returns:
a tensor of zeros of the same shape.
"""
if dtype is None:
dtype = dtypes.float32
with ops.name_scope(name, 'ones_like', [input]) as name:
if not input._row_partitions:
if input._nrows is not None:
return array_ops.ones([input._nrows], dtype) # vector.
else:
return array_ops.ones([], dtype) # scalar.
# 2D and up.
last_row_partition = input._row_partitions[-1]
result = ragged_tensor.RaggedTensor._from_nested_row_partitions(
array_ops.ones(last_row_partition.nvals(), dtype=dtype),
input._row_partitions)
return result
@dispatch.dispatch_for_types(array_ops.rank, StructuredTensor)
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor."""
with ops.name_scope(name, 'rank', [input]) as name:
return constant_op.constant(input.rank, dtype=dtypes.int32)
def _expand_dims_impl(st, axis, name=None): # pylint: disable=redefined-builtin
"""Creates a StructuredTensor with a length 1 axis inserted at index `axis`.
This is an implementation of tf.expand_dims for StructuredTensor. Note
that the `axis` must be less than or equal to rank.
>>> st = StructuredTensor.from_pyval([[{"x": 1}, {"x": 2}], [{"x": 3}]])
>>> tf.expand_dims(st, 0).to_pyval()
[[[{'x': 1}, {'x': 2}], [{'x': 3}]]]
>>> tf.expand_dims(st, 1).to_pyval()
[[[{'x': 1}, {'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, 2).to_pyval()
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
>>> tf.expand_dims(st, -1).to_pyval() # -1 is the same as 2
[[[{'x': 1}], [{'x': 2}]], [[{'x': 3}]]]
Args:
st: the original StructuredTensor.
axis: the axis to insert the dimension: `-(rank + 1) <= axis <= rank`
name: the name of the op.
Returns:
a new structured tensor with larger rank.
Raises:
an error if `axis < -(rank + 1)` or `rank < axis`.
"""
axis = array_ops.get_positive_axis(
axis, st.rank + 1, axis_name='axis', ndims_name='rank(st)')
with ops.name_scope(name, 'ExpandDims', [st, axis]):
new_fields = {
k: array_ops.expand_dims(v, axis) for (k, v) in st._fields.items()
}
new_shape = st.shape[:axis] + (1,) + st.shape[axis:]
new_row_partitions = _expand_st_row_partitions(st, axis)
new_nrows = st.nrows() if (axis > 0) else 1
return StructuredTensor.from_fields(
new_fields,
shape=new_shape,
row_partitions=new_row_partitions,
nrows=new_nrows)
def _expand_st_row_partitions(st, axis):
"""Create the row_partitions for expand_dims."""
if axis == 0:
if st.shape.rank == 0:
return ()
nvals = st.nrows()
new_partition = RowPartition.from_uniform_row_length(
nvals, nvals, nrows=1, validate=False)
return (new_partition,) + st.row_partitions
elif axis == st.rank:
nvals = (
st.row_partitions[axis - 2].nvals() if (axis - 2 >= 0) else st.nrows())
return st.row_partitions + (RowPartition.from_uniform_row_length(
1, nvals, nrows=nvals, validate=False),)
else:
nvals = (
st.row_partitions[axis - 1].nrows() if (axis - 1 >= 0) else st.nrows())
return st.row_partitions[:axis - 1] + (RowPartition.from_uniform_row_length(
1, nvals, nrows=nvals, validate=False),) + st.row_partitions[axis - 1:]
# TODO(martinz): consider allowing values to be nested.
def _extend_op(values, leaf_op, empty_st_op=None):
"""Extend an op from RaggedTensor and Tensor to StructuredTensor.
Visits all children of the structured tensor, and children of children,
applying leaf_op whenever it reaches a leaf, and empty_st_op whenever
it reaches an internal node without children.
Args:
values: a list of structured tensors, ragged tensors, or tensors. All must
have the same type. If they are structured tensors, they must have the
same paths.
leaf_op: an op for handling non-structured tensor.
empty_st_op: op to create a structured tensor without fields.
Returns:
the result of the extended op (a StructuredTensor, RaggedTensor, or Tensor)
Raises:
ValueError:
If values is not a Sequence or is empty.
"""
if not isinstance(values, Sequence):
raise ValueError('Expected a list')
if not values:
raise ValueError('List cannot be empty')
if empty_st_op is None:
empty_st_op = empty_st_op_like_zeros(leaf_op)
# Use the structure of the first StructuredTensor. They are all assumed to
# be the same.
value = values[0]
if isinstance(value, StructuredTensor):
# TODO(martinz): Calling empty_st_op may add unnecessary ops. Revisit later.
empty_result = empty_st_op(values)
if not value.field_names():
return empty_result
new_fields = {}
for k in value.field_names():
new_fields[k] = _extend_op([v.field_value(k) for v in values], leaf_op,
empty_st_op)
return StructuredTensor.from_fields(new_fields, shape=empty_result.shape)
else:
return leaf_op(values)
def _extend_op_single(value, leaf_op, empty_st_op=None):
"""Extend an op to a value instead of a list of values."""
def to_list_op(element_op):
if element_op is None:
return None
def list_op(values):
[value] = values
return element_op(value)
return list_op
return _extend_op([value], to_list_op(leaf_op), to_list_op(empty_st_op))
def empty_st_op_like_zeros(leaf_op):
def empty_st_op(values):
as_zeros = [
zeros_like_v2(value, dtype=dtypes.int32) for value in values
]
result = leaf_op(as_zeros)
return _structured_tensor_like(result)
return empty_st_op
def _structured_tensor_from_dense_tensor(t):
"""Create a structured tensor with the shape of a dense tensor."""
# Note: If a tensor will have rank 0,
# it either has a fully defined shape or has unknown rank.
if t.shape.is_fully_defined():
return StructuredTensor.from_fields({}, shape=t.shape)
elif t.shape.rank is None:
raise ValueError("Can't build StructuredTensor w/ unknown rank")
elif t.shape.rank == 1:
return StructuredTensor.from_fields({}, shape=t.shape,
nrows=array_ops.shape(t)[0])
else:
rt = ragged_tensor.RaggedTensor.from_tensor(t)
return _structured_tensor_from_row_partitions(t.shape,
rt._nested_row_partitions)
def _structured_tensor_from_row_partitions(shape, row_partitions):
return StructuredTensor.from_fields({},
shape=shape,
row_partitions=row_partitions)
# pylint: disable=protected_access
def _all_nested_row_partitions(rt):
"""Returns all nested row partitions in rt, including for dense dimensions."""
if isinstance(rt, ops.Tensor):
if rt.shape.rank <= 1:
return ()
else:
rt2 = ragged_tensor.RaggedTensor.from_tensor(rt)
return rt2._nested_row_partitions
else:
tail_partitions = _all_nested_row_partitions(rt.flat_values)
head_partitions = rt._nested_row_partitions # pylint: disable=protected_access
return head_partitions + tail_partitions
def _structured_tensor_like(t):
"""Create a StructuredTensor with the shape of a (composite) tensor."""
if isinstance(t, ops.Tensor):
return _structured_tensor_from_dense_tensor(t)
if ragged_tensor.is_ragged(t):
return StructuredTensor.from_fields(
{}, shape=t.get_shape(), row_partitions=_all_nested_row_partitions(t))
# here, it is a StructuredTensor
return StructuredTensor.from_fields({},
shape=t.shape,
row_partitions=t.row_partitions,
nrows=t.nrows())
def _get_all_paths(st):
"""Get all the paths from a StructuredTensor."""
fields = st.field_names()
all_paths = {()}
for k in fields:
v = st.field_value(k)
if isinstance(v, StructuredTensor):
all_paths = all_paths.union([(k,) + p for p in _get_all_paths(v)])
else:
all_paths.add((k,))
return all_paths
def _get_all_ranks(st):
"""Get ranks of all submessages of a StructuredTensor."""
fields = st.field_names()
all_ranks = {(): st.rank}
for k in fields:
v = st.field_value(k)
if isinstance(v, StructuredTensor):
for (k2, v2) in _get_all_ranks(v).items():
all_ranks[(k,) + k2] = v2
return all_ranks
def _assert_all_paths_match(values):
"""Raises an error if the paths are not identical."""
paths = [_get_all_paths(st) for st in values]
path_diff = set()
for other_paths in paths[1:]:
path_diff = path_diff.union(paths[0].symmetric_difference(other_paths))
if path_diff:
raise ValueError(
'Some paths are present in some, but not all, structured tensors: %r' %
(path_diff,))
def _assert_all_ranks_match(values):
"""Raises an error if the ranks of submessages are not identical."""
ranks = [_get_all_ranks(st) for st in values]
for other_ranks in ranks[1:]:
if other_ranks != ranks[0]:
# TODO(martinz): If this becomes common, we can provide more detail.
# e.g.: which path is inconsistent.
raise ValueError('Ranks of sub-message do not match')
def _assert_concat_compatible_structured_tensors(values):
"""Sometimes raises an error if concat doesn't make sense statically on values.
values must be a sequence, and each element in values must be a structured
tensor, and must have the same paths. Additionally, each path that is a
submessage must have the same rank.
These constraints are sufficient for concat on the fields to be the same
as concat on structured tensors. This is meant to capture scenarios like
paths that are not in the first structured tensor, but are in later
structured tensors, which will just be ignored by the recursive algorithm.
If the rank of a submessage was different for two structured tensors,
then that is also a non-sensical merge.
Note that all of these checks are static, as paths and submessage ranks
are known.
Args:
values: a Sequence of StructuredTensors.
Raises:
ValueError: if there is any inconsistency as described above.
"""
if not isinstance(values, Sequence):
raise ValueError('values must be a list of StructuredTensors (not a list)')
if not values:
raise ValueError('values must not be an empty list')
for st in values:
if not isinstance(st, StructuredTensor):
raise ValueError('values must be a list of StructuredTensors')
_assert_all_paths_match(values)
_assert_all_ranks_match(values)
| apache-2.0 |
theheros/kbengine | kbe/res/scripts/common/Lib/test/test_winreg.py | 3 | 18930 | # Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
import os, sys
import unittest
from test import support
threading = support.import_module("threading")
from platform import machine
# Do this first so test will be skipped if module doesn't exist
support.import_module('winreg')
# Now import everything
from winreg import *
try:
REMOTE_NAME = sys.argv[sys.argv.index("--remote")+1]
except (IndexError, ValueError):
REMOTE_NAME = None
# tuple of (major, minor)
WIN_VER = sys.getwindowsversion()[:2]
# Some tests should only run on 64-bit architectures where WOW64 will be.
WIN64_MACHINE = True if machine() == "AMD64" else False
# Starting with Windows 7 and Windows Server 2008 R2, WOW64 no longer uses
# registry reflection and formerly reflected keys are shared instead.
# Windows 7 and Windows Server 2008 R2 are version 6.1. Due to this, some
# tests are only valid up until 6.1
HAS_REFLECTION = True if WIN_VER < (6, 1) else False
test_key_name = "SOFTWARE\\Python Registry Test Key - Delete Me"
# On OS'es that support reflection we should test with a reflected key
test_reflect_key_name = "SOFTWARE\\Classes\\Python Test Key - Delete Me"
test_data = [
("Int Value", 45, REG_DWORD),
("String Val", "A string value", REG_SZ),
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
("Raw Data", b"binary\x00data", REG_BINARY),
("Big String", "x"*(2**14-1), REG_SZ),
("Big Binary", b"x"*(2**14), REG_BINARY),
# Two and three kanjis, meaning: "Japan" and "Japanese")
("Japanese 日本", "日本語", REG_SZ),
]
class BaseWinregTests(unittest.TestCase):
def setUp(self):
# Make sure that the test key is absent when the test
# starts.
self.delete_tree(HKEY_CURRENT_USER, test_key_name)
def delete_tree(self, root, subkey):
try:
hkey = OpenKey(root, subkey, KEY_ALL_ACCESS)
except WindowsError:
# subkey does not exist
return
while True:
try:
subsubkey = EnumKey(hkey, 0)
except WindowsError:
# no more subkeys
break
self.delete_tree(hkey, subsubkey)
CloseKey(hkey)
DeleteKey(root, subkey)
def _write_test_data(self, root_key, subkeystr="sub_key",
CreateKey=CreateKey):
# Set the default value for this key.
SetValue(root_key, test_key_name, REG_SZ, "Default value")
key = CreateKey(root_key, test_key_name)
self.assertTrue(key.handle != 0)
# Create a sub-key
sub_key = CreateKey(key, subkeystr)
# Give the sub-key some named values
for value_name, value_data, value_type in test_data:
SetValueEx(sub_key, value_name, 0, value_type, value_data)
# Check we wrote as many items as we thought.
nkeys, nvalues, since_mod = QueryInfoKey(key)
self.assertEqual(nkeys, 1, "Not the correct number of sub keys")
self.assertEqual(nvalues, 1, "Not the correct number of values")
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "Not the correct number of sub keys")
self.assertEqual(nvalues, len(test_data),
"Not the correct number of values")
# Close this key this way...
# (but before we do, copy the key as an integer - this allows
# us to test that the key really gets closed).
int_sub_key = int(sub_key)
CloseKey(sub_key)
try:
QueryInfoKey(int_sub_key)
self.fail("It appears the CloseKey() function does "
"not close the actual key!")
except EnvironmentError:
pass
# ... and close that key that way :-)
int_key = int(key)
key.Close()
try:
QueryInfoKey(int_key)
self.fail("It appears the key.Close() function "
"does not close the actual key!")
except EnvironmentError:
pass
def _read_test_data(self, root_key, subkeystr="sub_key", OpenKey=OpenKey):
# Check we can get default value for this key.
val = QueryValue(root_key, test_key_name)
self.assertEqual(val, "Default value",
"Registry didn't give back the correct value")
key = OpenKey(root_key, test_key_name)
# Read the sub-keys
with OpenKey(key, subkeystr) as sub_key:
# Check I can enumerate over the values.
index = 0
while 1:
try:
data = EnumValue(sub_key, index)
except EnvironmentError:
break
self.assertEqual(data in test_data, True,
"Didn't read back the correct test data")
index = index + 1
self.assertEqual(index, len(test_data),
"Didn't read the correct number of items")
# Check I can directly access each item
for value_name, value_data, value_type in test_data:
read_val, read_typ = QueryValueEx(sub_key, value_name)
self.assertEqual(read_val, value_data,
"Could not directly read the value")
self.assertEqual(read_typ, value_type,
"Could not directly read the value")
sub_key.Close()
# Enumerate our main key.
read_val = EnumKey(key, 0)
self.assertEqual(read_val, subkeystr, "Read subkey value wrong")
try:
EnumKey(key, 1)
self.fail("Was able to get a second key when I only have one!")
except EnvironmentError:
pass
key.Close()
def _delete_test_data(self, root_key, subkeystr="sub_key"):
key = OpenKey(root_key, test_key_name, 0, KEY_ALL_ACCESS)
sub_key = OpenKey(key, subkeystr, 0, KEY_ALL_ACCESS)
# It is not necessary to delete the values before deleting
# the key (although subkeys must not exist). We delete them
# manually just to prove we can :-)
for value_name, value_data, value_type in test_data:
DeleteValue(sub_key, value_name)
nkeys, nvalues, since_mod = QueryInfoKey(sub_key)
self.assertEqual(nkeys, 0, "subkey not empty before delete")
self.assertEqual(nvalues, 0, "subkey not empty before delete")
sub_key.Close()
DeleteKey(key, subkeystr)
try:
# Shouldnt be able to delete it twice!
DeleteKey(key, subkeystr)
self.fail("Deleting the key twice succeeded")
except EnvironmentError:
pass
key.Close()
DeleteKey(root_key, test_key_name)
# Opening should now fail!
try:
key = OpenKey(root_key, test_key_name)
self.fail("Could open the non-existent key")
except WindowsError: # Use this error name this time
pass
def _test_all(self, root_key, subkeystr="sub_key"):
self._write_test_data(root_key, subkeystr)
self._read_test_data(root_key, subkeystr)
self._delete_test_data(root_key, subkeystr)
def _test_named_args(self, key, sub_key):
with CreateKeyEx(key=key, sub_key=sub_key, reserved=0,
access=KEY_ALL_ACCESS) as ckey:
self.assertTrue(ckey.handle != 0)
with OpenKeyEx(key=key, sub_key=sub_key, reserved=0,
access=KEY_ALL_ACCESS) as okey:
self.assertTrue(okey.handle != 0)
class LocalWinregTests(BaseWinregTests):
def test_registry_works(self):
self._test_all(HKEY_CURRENT_USER)
self._test_all(HKEY_CURRENT_USER, "日本-subkey")
def test_registry_works_extended_functions(self):
# Substitute the regular CreateKey and OpenKey calls with their
# extended counterparts.
# Note: DeleteKeyEx is not used here because it is platform dependent
cke = lambda key, sub_key: CreateKeyEx(key, sub_key, 0, KEY_ALL_ACCESS)
self._write_test_data(HKEY_CURRENT_USER, CreateKey=cke)
oke = lambda key, sub_key: OpenKeyEx(key, sub_key, 0, KEY_READ)
self._read_test_data(HKEY_CURRENT_USER, OpenKey=oke)
self._delete_test_data(HKEY_CURRENT_USER)
def test_named_arguments(self):
self._test_named_args(HKEY_CURRENT_USER, test_key_name)
# Use the regular DeleteKey to clean up
# DeleteKeyEx takes named args and is tested separately
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_connect_registry_to_local_machine_works(self):
# perform minimal ConnectRegistry test which just invokes it
h = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
self.assertNotEqual(h.handle, 0)
h.Close()
self.assertEqual(h.handle, 0)
def test_inexistant_remote_registry(self):
connect = lambda: ConnectRegistry("abcdefghijkl", HKEY_CURRENT_USER)
self.assertRaises(WindowsError, connect)
def testExpandEnvironmentStrings(self):
r = ExpandEnvironmentStrings("%windir%\\test")
self.assertEqual(type(r), str)
self.assertEqual(r, os.environ["windir"] + "\\test")
def test_context_manager(self):
# ensure that the handle is closed if an exception occurs
try:
with ConnectRegistry(None, HKEY_LOCAL_MACHINE) as h:
self.assertNotEqual(h.handle, 0)
raise WindowsError
except WindowsError:
self.assertEqual(h.handle, 0)
def test_changing_value(self):
# Issue2810: A race condition in 2.6 and 3.1 may cause
# EnumValue or QueryValue to throw "WindowsError: More data is
# available"
done = False
class VeryActiveThread(threading.Thread):
def run(self):
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
use_short = True
long_string = 'x'*2000
while not done:
s = 'x' if use_short else long_string
use_short = not use_short
SetValue(key, 'changing_value', REG_SZ, s)
thread = VeryActiveThread()
thread.start()
try:
with CreateKey(HKEY_CURRENT_USER,
test_key_name+'\\changing_value') as key:
for _ in range(1000):
num_subkeys, num_values, t = QueryInfoKey(key)
for i in range(num_values):
name = EnumValue(key, i)
QueryValue(key, name[0])
finally:
done = True
thread.join()
DeleteKey(HKEY_CURRENT_USER, test_key_name+'\\changing_value')
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_long_key(self):
# Issue2810, in 2.6 and 3.1 when the key name was exactly 256
# characters, EnumKey threw "WindowsError: More data is
# available"
name = 'x'*256
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as key:
SetValue(key, name, REG_SZ, 'x')
num_subkeys, num_values, t = QueryInfoKey(key)
EnumKey(key, 0)
finally:
DeleteKey(HKEY_CURRENT_USER, '\\'.join((test_key_name, name)))
DeleteKey(HKEY_CURRENT_USER, test_key_name)
def test_dynamic_key(self):
# Issue2810, when the value is dynamically generated, these
# throw "WindowsError: More data is available" in 2.6 and 3.1
EnumValue(HKEY_PERFORMANCE_DATA, 0)
QueryValueEx(HKEY_PERFORMANCE_DATA, "")
# Reflection requires XP x64/Vista at a minimum. XP doesn't have this stuff
# or DeleteKeyEx so make sure their use raises NotImplementedError
@unittest.skipUnless(WIN_VER < (5, 2), "Requires Windows XP")
def test_reflection_unsupported(self):
try:
with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
self.assertNotEqual(ck.handle, 0)
key = OpenKey(HKEY_CURRENT_USER, test_key_name)
self.assertNotEqual(key.handle, 0)
with self.assertRaises(NotImplementedError):
DisableReflectionKey(key)
with self.assertRaises(NotImplementedError):
EnableReflectionKey(key)
with self.assertRaises(NotImplementedError):
QueryReflectionKey(key)
with self.assertRaises(NotImplementedError):
DeleteKeyEx(HKEY_CURRENT_USER, test_key_name)
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
@unittest.skipUnless(REMOTE_NAME, "Skipping remote registry tests")
class RemoteWinregTests(BaseWinregTests):
def test_remote_registry_works(self):
remote_key = ConnectRegistry(REMOTE_NAME, HKEY_CURRENT_USER)
self._test_all(remote_key)
@unittest.skipUnless(WIN64_MACHINE, "x64 specific registry tests")
class Win64WinregTests(BaseWinregTests):
def test_named_arguments(self):
self._test_named_args(HKEY_CURRENT_USER, test_key_name)
# Clean up and also exercise the named arguments
DeleteKeyEx(key=HKEY_CURRENT_USER, sub_key=test_key_name,
access=KEY_ALL_ACCESS, reserved=0)
def test_reflection_functions(self):
# Test that we can call the query, enable, and disable functions
# on a key which isn't on the reflection list with no consequences.
with OpenKey(HKEY_LOCAL_MACHINE, "Software") as key:
# HKLM\Software is redirected but not reflected in all OSes
self.assertTrue(QueryReflectionKey(key))
self.assertIsNone(EnableReflectionKey(key))
self.assertIsNone(DisableReflectionKey(key))
self.assertTrue(QueryReflectionKey(key))
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_reflection(self):
# Test that we can create, open, and delete keys in the 32-bit
# area. Because we are doing this in a key which gets reflected,
# test the differences of 32 and 64-bit keys before and after the
# reflection occurs (ie. when the created key is closed).
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
self.assertNotEqual(created_key.handle, 0)
# The key should now be available in the 32-bit area
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
# Write a value to what currently is only in the 32-bit area
SetValueEx(created_key, "", 0, REG_SZ, "32KEY")
# The key is not reflected until created_key is closed.
# The 64-bit version of the key should not be available yet.
open_fail = lambda: OpenKey(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(WindowsError, open_fail)
# Now explicitly open the 64-bit version of the key
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_64KEY) as key:
self.assertNotEqual(key.handle, 0)
# Make sure the original value we set is there
self.assertEqual("32KEY", QueryValue(key, ""))
# Set a new value, which will get reflected to 32-bit
SetValueEx(key, "", 0, REG_SZ, "64KEY")
# Reflection uses a "last-writer wins policy, so the value we set
# on the 64-bit key should be the same on 32-bit
with OpenKey(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertEqual("64KEY", QueryValue(key, ""))
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
@unittest.skipUnless(HAS_REFLECTION, "OS doesn't support reflection")
def test_disable_reflection(self):
# Make use of a key which gets redirected and reflected
try:
with CreateKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_ALL_ACCESS | KEY_WOW64_32KEY) as created_key:
# QueryReflectionKey returns whether or not the key is disabled
disabled = QueryReflectionKey(created_key)
self.assertEqual(type(disabled), bool)
# HKCU\Software\Classes is reflected by default
self.assertFalse(disabled)
DisableReflectionKey(created_key)
self.assertTrue(QueryReflectionKey(created_key))
# The key is now closed and would normally be reflected to the
# 64-bit area, but let's make sure that didn't happen.
open_fail = lambda: OpenKeyEx(HKEY_CURRENT_USER,
test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_64KEY)
self.assertRaises(WindowsError, open_fail)
# Make sure the 32-bit key is actually there
with OpenKeyEx(HKEY_CURRENT_USER, test_reflect_key_name, 0,
KEY_READ | KEY_WOW64_32KEY) as key:
self.assertNotEqual(key.handle, 0)
finally:
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
def test_main():
support.run_unittest(LocalWinregTests, RemoteWinregTests,
Win64WinregTests)
if __name__ == "__main__":
if not REMOTE_NAME:
print("Remote registry calls can be tested using",
"'test_winreg.py --remote \\\\machine_name'")
test_main()
| lgpl-3.0 |
vhaupert/mitmproxy | mitmproxy/proxy/protocol/http2.py | 1 | 30035 | import threading
import time
import functools
from typing import Dict, Callable, Any, List, Optional # noqa
import h2.exceptions
from h2 import connection
from h2 import events
import queue
from mitmproxy import connections # noqa
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy.proxy.protocol import base
from mitmproxy.proxy.protocol import http as httpbase
import mitmproxy.net.http
from mitmproxy.net import tcp
from mitmproxy.coretypes import basethread
from mitmproxy.net.http import http2, headers, url
from mitmproxy.utils import human
class SafeH2Connection(connection.H2Connection):
def __init__(self, conn, *args, **kwargs):
super().__init__(*args, **kwargs)
self.conn = conn
self.lock = threading.RLock()
def safe_acknowledge_received_data(self, acknowledged_size: int, stream_id: int):
if acknowledged_size == 0:
return
with self.lock:
self.acknowledge_received_data(acknowledged_size, stream_id)
self.conn.send(self.data_to_send())
def safe_reset_stream(self, stream_id: int, error_code: int):
with self.lock:
try:
self.reset_stream(stream_id, error_code)
except h2.exceptions.StreamClosedError: # pragma: no cover
# stream is already closed - good
pass
self.conn.send(self.data_to_send())
def safe_update_settings(self, new_settings: Dict[int, Any]):
with self.lock:
self.update_settings(new_settings)
self.conn.send(self.data_to_send())
def safe_send_headers(self, raise_zombie: Callable, stream_id: int, headers: headers.Headers, **kwargs):
with self.lock:
raise_zombie()
self.send_headers(stream_id, headers.fields, **kwargs)
self.conn.send(self.data_to_send())
def safe_send_body(self, raise_zombie: Callable, stream_id: int, chunks: List[bytes], end_stream=True):
for chunk in chunks:
position = 0
while position < len(chunk):
self.lock.acquire()
raise_zombie(self.lock.release)
max_outbound_frame_size = self.max_outbound_frame_size
frame_chunk = chunk[position:position + max_outbound_frame_size]
if self.local_flow_control_window(stream_id) < len(frame_chunk): # pragma: no cover
self.lock.release()
time.sleep(0.1)
continue
self.send_data(stream_id, frame_chunk)
try:
self.conn.send(self.data_to_send())
except Exception as e: # pragma: no cover
raise e
finally:
self.lock.release()
position += max_outbound_frame_size
if end_stream:
with self.lock:
raise_zombie()
self.end_stream(stream_id)
self.conn.send(self.data_to_send())
class Http2Layer(base.Layer):
if False:
# mypy type hints
client_conn: connections.ClientConnection = None
class H2ConnLogger:
def __init__(self, name, log):
self.name = name
self.log = log
def debug(self, fmtstr, *args):
msg = "H2Conn {}: {}".format(self.name, fmtstr % args)
self.log(msg, "debug")
def trace(self, fmtstr, *args):
pass
def __init__(self, ctx, mode: str) -> None:
super().__init__(ctx)
self.mode = mode
self.streams: Dict[int, Http2SingleStreamLayer] = dict()
self.server_to_client_stream_ids: Dict[int, int] = dict([(0, 0)])
self.connections: Dict[object, SafeH2Connection] = {}
config = h2.config.H2Configuration(
client_side=False,
header_encoding=False,
validate_outbound_headers=False,
validate_inbound_headers=False,
logger=self.H2ConnLogger("client", self.log))
self.connections[self.client_conn] = SafeH2Connection(self.client_conn, config=config)
def _initiate_server_conn(self):
if self.server_conn.connected():
config = h2.config.H2Configuration(
client_side=True,
header_encoding=False,
validate_outbound_headers=False,
validate_inbound_headers=False,
logger=self.H2ConnLogger("server", self.log))
self.connections[self.server_conn] = SafeH2Connection(self.server_conn, config=config)
self.connections[self.server_conn].initiate_connection()
self.server_conn.send(self.connections[self.server_conn].data_to_send())
def _complete_handshake(self):
preamble = self.client_conn.rfile.read(24)
self.connections[self.client_conn].initiate_connection()
self.connections[self.client_conn].receive_data(preamble)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
def next_layer(self): # pragma: no cover
# WebSocket over HTTP/2?
# CONNECT for proxying?
raise NotImplementedError()
def _handle_event(self, event, source_conn, other_conn, is_server):
self.log(
"HTTP2 Event from {}".format("server" if is_server else "client"),
"debug",
[repr(event)]
)
eid = None
if hasattr(event, 'stream_id'):
if is_server and event.stream_id % 2 == 1:
eid = self.server_to_client_stream_ids[event.stream_id]
else:
eid = event.stream_id
if isinstance(event, events.RequestReceived):
return self._handle_request_received(eid, event)
elif isinstance(event, events.ResponseReceived):
return self._handle_response_received(eid, event)
elif isinstance(event, events.DataReceived):
return self._handle_data_received(eid, event, source_conn)
elif isinstance(event, events.StreamEnded):
return self._handle_stream_ended(eid)
elif isinstance(event, events.StreamReset):
return self._handle_stream_reset(eid, event, is_server, other_conn)
elif isinstance(event, events.RemoteSettingsChanged):
return self._handle_remote_settings_changed(event, other_conn)
elif isinstance(event, events.ConnectionTerminated):
return self._handle_connection_terminated(event, is_server)
elif isinstance(event, events.PushedStreamReceived):
return self._handle_pushed_stream_received(event)
elif isinstance(event, events.PriorityUpdated):
return self._handle_priority_updated(eid, event)
elif isinstance(event, events.TrailersReceived):
return self._handle_trailers(eid, event, is_server, other_conn)
# fail-safe for unhandled events
return True
def _handle_request_received(self, eid, event):
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid] = Http2SingleStreamLayer(self, self.connections[self.client_conn], eid, headers)
self.streams[eid].timestamp_start = time.time()
if event.priority_updated is not None:
self.streams[eid].priority_exclusive = event.priority_updated.exclusive
self.streams[eid].priority_depends_on = event.priority_updated.depends_on
self.streams[eid].priority_weight = event.priority_updated.weight
self.streams[eid].handled_priority_event = event.priority_updated
self.streams[eid].start()
self.streams[eid].request_message.arrived.set()
return True
def _handle_response_received(self, eid, event):
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid].queued_data_length = 0
self.streams[eid].timestamp_start = time.time()
self.streams[eid].response_message.headers = headers
self.streams[eid].response_message.arrived.set()
return True
def _handle_data_received(self, eid, event, source_conn):
bsl = human.parse_size(self.config.options.body_size_limit)
if bsl and self.streams[eid].queued_data_length > bsl:
self.streams[eid].kill()
self.connections[source_conn].safe_reset_stream(
event.stream_id,
h2.errors.ErrorCodes.REFUSED_STREAM
)
self.log("HTTP body too large. Limit is {}.".format(bsl), "info")
else:
self.streams[eid].data_queue.put(event.data)
self.streams[eid].queued_data_length += len(event.data)
# always acknowledge receved data with a WINDOW_UPDATE frame
self.connections[source_conn].safe_acknowledge_received_data(
event.flow_controlled_length,
event.stream_id
)
return True
def _handle_stream_ended(self, eid):
self.streams[eid].timestamp_end = time.time()
self.streams[eid].stream_ended.set()
return True
def _handle_stream_reset(self, eid, event, is_server, other_conn):
if eid in self.streams:
self.streams[eid].kill()
if is_server:
other_stream_id = self.streams[eid].client_stream_id
else:
other_stream_id = self.streams[eid].server_stream_id
if other_stream_id is not None:
self.connections[other_conn].safe_reset_stream(other_stream_id, event.error_code)
return True
def _handle_trailers(self, eid, event, is_server, other_conn):
trailers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
self.streams[eid].trailers = trailers
return True
def _handle_remote_settings_changed(self, event, other_conn):
new_settings = dict([(key, cs.new_value) for (key, cs) in event.changed_settings.items()])
self.connections[other_conn].safe_update_settings(new_settings)
return True
def _handle_connection_terminated(self, event, is_server):
self.log("HTTP/2 connection terminated by {}: error code: {}, last stream id: {}, additional data: {}".format(
"server" if is_server else "client",
event.error_code,
event.last_stream_id,
event.additional_data), "info")
if event.error_code != h2.errors.ErrorCodes.NO_ERROR:
# Something terrible has happened - kill everything!
self.connections[self.client_conn].close_connection(
error_code=event.error_code,
last_stream_id=event.last_stream_id,
additional_data=event.additional_data
)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
self._kill_all_streams()
else:
"""
Do not immediately terminate the other connection.
Some streams might be still sending data to the client.
"""
return False
def _handle_pushed_stream_received(self, event):
# pushed stream ids should be unique and not dependent on race conditions
# only the parent stream id must be looked up first
parent_eid = self.server_to_client_stream_ids[event.parent_stream_id]
with self.connections[self.client_conn].lock:
self.connections[self.client_conn].push_stream(parent_eid, event.pushed_stream_id, event.headers)
self.client_conn.send(self.connections[self.client_conn].data_to_send())
headers = mitmproxy.net.http.Headers([[k, v] for k, v in event.headers])
layer = Http2SingleStreamLayer(self, self.connections[self.client_conn], event.pushed_stream_id, headers)
self.streams[event.pushed_stream_id] = layer
self.streams[event.pushed_stream_id].timestamp_start = time.time()
self.streams[event.pushed_stream_id].pushed = True
self.streams[event.pushed_stream_id].parent_stream_id = parent_eid
self.streams[event.pushed_stream_id].timestamp_end = time.time()
self.streams[event.pushed_stream_id].request_message.arrived.set()
self.streams[event.pushed_stream_id].request_message.stream_ended.set()
self.streams[event.pushed_stream_id].start()
return True
def _handle_priority_updated(self, eid, event):
if not self.config.options.http2_priority:
self.log("HTTP/2 PRIORITY frame suppressed. Use --http2-priority to enable forwarding.", "debug")
return True
if eid in self.streams and self.streams[eid].handled_priority_event is event:
# this event was already handled during stream creation
# HeadersFrame + Priority information as RequestReceived
return True
with self.connections[self.server_conn].lock:
mapped_stream_id = event.stream_id
if mapped_stream_id in self.streams and self.streams[mapped_stream_id].server_stream_id:
# if the stream is already up and running and was sent to the server,
# use the mapped server stream id to update priority information
mapped_stream_id = self.streams[mapped_stream_id].server_stream_id
if eid in self.streams:
self.streams[eid].priority_exclusive = event.exclusive
self.streams[eid].priority_depends_on = event.depends_on
self.streams[eid].priority_weight = event.weight
self.connections[self.server_conn].prioritize(
mapped_stream_id,
weight=event.weight,
depends_on=self._map_depends_on_stream_id(mapped_stream_id, event.depends_on),
exclusive=event.exclusive
)
self.server_conn.send(self.connections[self.server_conn].data_to_send())
return True
def _map_depends_on_stream_id(self, stream_id, depends_on):
mapped_depends_on = depends_on
if mapped_depends_on in self.streams and self.streams[mapped_depends_on].server_stream_id:
# if the depends-on-stream is already up and running and was sent to the server
# use the mapped server stream id to update priority information
mapped_depends_on = self.streams[mapped_depends_on].server_stream_id
if stream_id == mapped_depends_on:
# looks like one of the streams wasn't opened yet
# prevent self-dependent streams which result in ProtocolError
mapped_depends_on += 2
return mapped_depends_on
def _cleanup_streams(self):
death_time = time.time() - 10
zombie_streams = [(stream_id, stream) for stream_id, stream in list(self.streams.items()) if stream.zombie]
outdated_streams = [stream_id for stream_id, stream in zombie_streams if stream.zombie <= death_time]
for stream_id in outdated_streams: # pragma: no cover
self.streams.pop(stream_id, None)
def _kill_all_streams(self):
for stream in self.streams.values():
stream.kill()
def __call__(self):
self._initiate_server_conn()
self._complete_handshake()
conns = [c.connection for c in self.connections.keys()]
try:
while True:
r = tcp.ssl_read_select(conns, 0.1)
for conn in r:
source_conn = self.client_conn if conn == self.client_conn.connection else self.server_conn
other_conn = self.server_conn if conn == self.client_conn.connection else self.client_conn
is_server = (source_conn == self.server_conn)
with self.connections[source_conn].lock:
try:
raw_frame = b''.join(http2.read_raw_frame(source_conn.rfile))
except:
# read frame failed: connection closed
self._kill_all_streams()
return
if self.connections[source_conn].state_machine.state == h2.connection.ConnectionState.CLOSED:
self.log("HTTP/2 connection entered closed state already", "debug")
return
incoming_events = self.connections[source_conn].receive_data(raw_frame)
source_conn.send(self.connections[source_conn].data_to_send())
for event in incoming_events:
if not self._handle_event(event, source_conn, other_conn, is_server):
# connection terminated: GoAway
self._kill_all_streams()
return
self._cleanup_streams()
except Exception as e: # pragma: no cover
self.log(repr(e), "info")
self._kill_all_streams()
def detect_zombie_stream(func): # pragma: no cover
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.raise_zombie()
result = func(self, *args, **kwargs)
self.raise_zombie()
return result
return wrapper
class Http2SingleStreamLayer(httpbase._HttpTransmissionLayer, basethread.BaseThread):
class Message:
def __init__(self, headers=None):
self.headers: Optional[mitmproxy.net.http.Headers] = headers # headers are the first thing to be received on a new stream
self.data_queue: queue.Queue[bytes] = queue.Queue() # contains raw contents of DATA frames
self.queued_data_length = 0 # used to enforce mitmproxy's config.options.body_size_limit
self.trailers: Optional[mitmproxy.net.http.Headers] = None # trailers are received after stream_ended is set
self.arrived = threading.Event() # indicates the HEADERS+CONTINUTATION frames have been received
self.stream_ended = threading.Event() # indicates the a frame with the END_STREAM flag has been received
def __init__(self, ctx, h2_connection, stream_id: int, request_headers: mitmproxy.net.http.Headers) -> None:
super().__init__(
ctx, name="Http2SingleStreamLayer-{}".format(stream_id)
)
self.h2_connection = h2_connection
self.zombie: Optional[float] = None
self.client_stream_id: int = stream_id
self.server_stream_id: Optional[int] = None
self.pushed = False
self.timestamp_start: Optional[float] = None
self.timestamp_end: Optional[float] = None
self.request_message = self.Message(request_headers)
self.response_message = self.Message()
self.priority_exclusive: bool
self.priority_depends_on: Optional[int] = None
self.priority_weight: Optional[int] = None
self.handled_priority_event: Any = None
def kill(self):
if not self.zombie:
self.zombie = time.time()
self.request_message.stream_ended.set()
self.request_message.arrived.set()
self.response_message.arrived.set()
self.response_message.stream_ended.set()
def connect(self): # pragma: no cover
raise exceptions.Http2ProtocolException("HTTP2 layer should already have a connection.")
def disconnect(self): # pragma: no cover
raise exceptions.Http2ProtocolException("Cannot dis- or reconnect in HTTP2 connections.")
def set_server(self, address): # pragma: no cover
raise exceptions.SetServerNotAllowedException(repr(address))
def check_close_connection(self, flow):
# This layer only handles a single stream.
# RFC 7540 8.1: An HTTP request/response exchange fully consumes a single stream.
return True
@property
def data_queue(self):
if self.response_message.arrived.is_set():
return self.response_message.data_queue
else:
return self.request_message.data_queue
@property
def queued_data_length(self):
if self.response_message.arrived.is_set():
return self.response_message.queued_data_length
else:
return self.request_message.queued_data_length
@queued_data_length.setter
def queued_data_length(self, v):
self.request_message.queued_data_length = v
@property
def stream_ended(self):
# This indicates that all message headers, the full message body, and all trailers have been received
# https://tools.ietf.org/html/rfc7540#section-8.1
if self.response_message.arrived.is_set():
return self.response_message.stream_ended
else:
return self.request_message.stream_ended
@property
def trailers(self):
if self.response_message.arrived.is_set():
return self.response_message.trailers
else:
return self.request_message.trailers
@trailers.setter
def trailers(self, v):
if self.response_message.arrived.is_set():
self.response_message.trailers = v
else:
self.request_message.trailers = v
def raise_zombie(self, pre_command=None): # pragma: no cover
connection_closed = self.h2_connection.state_machine.state == h2.connection.ConnectionState.CLOSED
if self.zombie is not None or connection_closed:
if pre_command is not None:
pre_command()
raise exceptions.Http2ZombieException("Connection or stream already dead: {}, {}".format(self.zombie, connection_closed))
@detect_zombie_stream
def read_request_headers(self, flow):
self.request_message.arrived.wait()
self.raise_zombie()
if self.pushed:
flow.metadata['h2-pushed-stream'] = True
# pseudo header must be present, see https://http2.github.io/http2-spec/#rfc.section.8.1.2.3
authority = self.request_message.headers.pop(':authority', "")
method = self.request_message.headers.pop(':method')
scheme = self.request_message.headers.pop(':scheme')
path = self.request_message.headers.pop(':path')
host, port = url.parse_authority(authority, check=True)
port = port or url.default_port(scheme) or 0
return http.HTTPRequest(
host,
port,
method.encode(),
scheme.encode(),
authority.encode(),
path.encode(),
b"HTTP/2.0",
self.request_message.headers,
None,
None,
self.timestamp_start,
self.timestamp_end,
)
@detect_zombie_stream
def read_request_body(self, request):
if not request.stream:
self.request_message.stream_ended.wait()
while True:
try:
yield self.request_message.data_queue.get(timeout=0.1)
except queue.Empty: # pragma: no cover
pass
if self.request_message.stream_ended.is_set():
self.raise_zombie()
while self.request_message.data_queue.qsize() > 0:
yield self.request_message.data_queue.get()
break
self.raise_zombie()
@detect_zombie_stream
def read_request_trailers(self, request):
return self.request_message.trailers
@detect_zombie_stream
def send_request_headers(self, request):
if self.pushed:
# nothing to do here
return
while True:
self.raise_zombie()
self.connections[self.server_conn].lock.acquire()
max_streams = self.connections[self.server_conn].remote_settings.max_concurrent_streams
if self.connections[self.server_conn].open_outbound_streams + 1 >= max_streams:
# wait until we get a free slot for a new outgoing stream
self.connections[self.server_conn].lock.release()
time.sleep(0.1)
continue
# keep the lock
break
# We must not assign a stream id if we are already a zombie.
self.raise_zombie()
self.server_stream_id = self.connections[self.server_conn].get_next_available_stream_id()
self.server_to_client_stream_ids[self.server_stream_id] = self.client_stream_id
headers = request.headers.copy()
if request.authority:
headers.insert(0, ":authority", request.authority)
headers.insert(0, ":path", request.path)
headers.insert(0, ":method", request.method)
headers.insert(0, ":scheme", request.scheme)
priority_exclusive = None
priority_depends_on = None
priority_weight = None
if self.handled_priority_event:
# only send priority information if they actually came with the original HeadersFrame
# and not if they got updated before/after with a PriorityFrame
if not self.config.options.http2_priority:
self.log("HTTP/2 PRIORITY information in HEADERS frame suppressed. Use --http2-priority to enable forwarding.", "debug")
else:
priority_exclusive = self.priority_exclusive
priority_depends_on = self._map_depends_on_stream_id(self.server_stream_id, self.priority_depends_on)
priority_weight = self.priority_weight
try:
self.connections[self.server_conn].safe_send_headers(
self.raise_zombie,
self.server_stream_id,
headers,
priority_exclusive=priority_exclusive,
priority_depends_on=priority_depends_on,
priority_weight=priority_weight,
)
except Exception as e: # pragma: no cover
raise e
finally:
self.raise_zombie()
self.connections[self.server_conn].lock.release()
@detect_zombie_stream
def send_request_body(self, request, chunks):
if self.pushed:
# nothing to do here
return
self.connections[self.server_conn].safe_send_body(
self.raise_zombie,
self.server_stream_id,
chunks,
end_stream=(request.trailers is None),
)
@detect_zombie_stream
def send_request_trailers(self, request):
self._send_trailers(self.server_conn, request.trailers)
@detect_zombie_stream
def send_request(self, request):
self.send_request_headers(request)
self.send_request_body(request, [request.content])
self.send_request_trailers(request)
@detect_zombie_stream
def read_response_headers(self):
self.response_message.arrived.wait()
self.raise_zombie()
status_code = int(self.response_message.headers.get(':status', 502))
headers = self.response_message.headers.copy()
headers.pop(":status", None)
return http.HTTPResponse(
http_version=b"HTTP/2.0",
status_code=status_code,
reason=b'',
headers=headers,
content=None,
trailers=None,
timestamp_start=self.timestamp_start,
timestamp_end=self.timestamp_end,
)
@detect_zombie_stream
def read_response_body(self, request, response):
while True:
try:
yield self.response_message.data_queue.get(timeout=0.1)
except queue.Empty: # pragma: no cover
pass
if self.response_message.stream_ended.is_set():
self.raise_zombie()
while self.response_message.data_queue.qsize() > 0:
yield self.response_message.data_queue.get()
break
self.raise_zombie()
@detect_zombie_stream
def read_response_trailers(self, request, response):
return self.response_message.trailers
@detect_zombie_stream
def send_response_headers(self, response):
headers = response.headers.copy()
headers.insert(0, ":status", str(response.status_code))
with self.connections[self.client_conn].lock:
self.connections[self.client_conn].safe_send_headers(
self.raise_zombie,
self.client_stream_id,
headers
)
@detect_zombie_stream
def send_response_body(self, response, chunks):
self.connections[self.client_conn].safe_send_body(
self.raise_zombie,
self.client_stream_id,
chunks,
end_stream=(response.trailers is None),
)
@detect_zombie_stream
def send_response_trailers(self, response):
self._send_trailers(self.client_conn, response.trailers)
def _send_trailers(self, conn, trailers):
if not trailers:
return
with self.connections[conn].lock:
self.connections[conn].safe_send_headers(
self.raise_zombie,
self.client_stream_id,
trailers,
end_stream=True
)
def __call__(self): # pragma: no cover
raise EnvironmentError('Http2SingleStreamLayer must be run as thread')
def run(self):
layer = httpbase.HttpLayer(self, self.mode)
try:
layer()
except exceptions.Http2ZombieException: # pragma: no cover
# zombies can be safely terminated - no need to kill them twice
return
except exceptions.ProtocolException as e: # pragma: no cover
self.log(repr(e), "info")
except exceptions.SetServerNotAllowedException as e: # pragma: no cover
self.log("Changing the Host server for HTTP/2 connections not allowed: {}".format(e), "info")
except exceptions.Kill: # pragma: no cover
self.log("Connection killed", "info")
self.kill()
| mit |
creasyw/IMTAphy | documentation/doctools/tags/0.4.3/sphinx/linkcheck.py | 11 | 3581 | # -*- coding: utf-8 -*-
"""
sphinx.linkcheck
~~~~~~~~~~~~~~~~
The CheckExternalLinksBuilder class.
:copyright: 2008 by Georg Brandl, Thomas Lamb.
:license: BSD.
"""
import socket
from os import path
from urllib2 import build_opener, HTTPError
from docutils import nodes
from sphinx.builder import Builder
from sphinx.util.console import purple, red, darkgreen
# create an opener that will simulate a browser user-agent
opener = build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
class CheckExternalLinksBuilder(Builder):
"""
Checks for broken external links.
"""
name = 'linkcheck'
def init(self):
self.good = set()
self.broken = {}
self.redirected = {}
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
open(path.join(self.outdir, 'output.txt'), 'w').close()
def get_target_uri(self, docname, typ=None):
return ''
def get_outdated_docs(self):
return self.env.found_docs
def prepare_writing(self, docnames):
return
def write_doc(self, docname, doctree):
self.info()
for node in doctree.traverse(nodes.reference):
try:
self.check(node, docname)
except KeyError:
continue
def check(self, node, docname):
uri = node['refuri']
if '#' in uri:
uri = uri.split('#')[0]
if uri in self.good:
return
lineno = None
while lineno is None and node:
node = node.parent
lineno = node.line
if uri[0:5] == 'http:' or uri[0:6] == 'https:':
self.info(uri, nonl=1)
if uri in self.broken:
(r, s) = self.broken[uri]
elif uri in self.redirected:
(r, s) = self.redirected[uri]
else:
(r, s) = self.resolve(uri)
if r == 0:
self.info(' - ' + darkgreen('working'))
self.good.add(uri)
elif r == 2:
self.info(' - ' + red('broken: ') + s)
self.broken[uri] = (r, s)
self.write_entry('broken', docname, lineno, uri + ': ' + s)
else:
self.info(' - ' + purple('redirected') + ' to ' + s)
self.redirected[uri] = (r, s)
self.write_entry('redirected', docname, lineno, uri + ' to ' + s)
elif len(uri) == 0 or uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:':
return
else:
self.info(uri + ' - ' + red('malformed!'))
self.write_entry('malformed', docname, lineno, uri)
return
def write_entry(self, what, docname, line, uri):
output = open(path.join(self.outdir, 'output.txt'), 'a')
output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
line, what, uri))
output.close()
def resolve(self, uri):
try:
f = opener.open(uri)
f.close()
except HTTPError, err:
#if err.code == 403 and uri.startswith('http://en.wikipedia.org/'):
# # Wikipedia blocks requests from urllib User-Agent
# return (0, 0)
return (2, str(err))
except Exception, err:
return (2, str(err))
if f.url.rstrip('/') == uri.rstrip('/'):
return (0, 0)
else:
return (1, f.url)
def finish(self):
return
| gpl-2.0 |
ahmadiga/min_edx | common/test/acceptance/tests/studio/test_studio_asset.py | 37 | 1708 | """
Acceptance tests for Studio related to the asset index page.
"""
from ...pages.studio.asset_index import AssetIndexPage
from .base_studio_test import StudioCourseTest
from ...fixtures.base import StudioApiLoginError
class AssetIndexTest(StudioCourseTest):
"""
Tests for the Asset index page.
"""
def setUp(self, is_staff=False):
super(AssetIndexTest, self).setUp()
self.asset_page = AssetIndexPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.course_fixture.add_asset(['image.jpg', 'textbook.pdf'])
def test_page_existence(self):
"""
Make sure that the page is accessible.
"""
self.asset_page.visit()
def test_type_filter_exists(self):
"""
Make sure type filter is on the page.
"""
self.asset_page.visit()
assert self.asset_page.type_filter_on_page() is True
def test_filter_results(self):
"""
Make sure type filter actually filters the results.
"""
self.asset_page.visit()
all_results = len(self.asset_page.return_results_set())
if self.asset_page.select_type_filter(1):
filtered_results = len(self.asset_page.return_results_set())
assert self.asset_page.type_filter_header_label_visible()
assert all_results > filtered_results
else:
msg = "Could not open select Type filter"
raise StudioApiLoginError(msg)
| agpl-3.0 |
renfredxh/pylletTown | pylletTown.py | 1 | 8123 | import pygame
import tmx
class Player(pygame.sprite.Sprite):
def __init__(self, location, orientation, *groups):
super(Player, self).__init__(*groups)
self.image = pygame.image.load('sprites/player.png')
self.imageDefault = self.image.copy()
self.rect = pygame.Rect(location, (64,64))
self.orient = orientation
self.holdTime = 0
self.walking = False
self.dx = 0
self.step = 'rightFoot'
# Set default orientation
self.setSprite()
def setSprite(self):
# Resets the player sprite sheet to its default position
# and scrolls it to the necessary position for the current orientation
self.image = self.imageDefault.copy()
if self.orient == 'up':
self.image.scroll(0, -64)
elif self.orient == 'down':
self.image.scroll(0, 0)
elif self.orient == 'left':
self.image.scroll(0, -128)
elif self.orient == 'right':
self.image.scroll(0, -192)
def update(self, dt, game):
key = pygame.key.get_pressed()
# Setting orientation and sprite based on key input:
if key[pygame.K_UP]:
if not self.walking:
if self.orient != 'up':
self.orient = 'up'
self.setSprite()
self.holdTime += dt
elif key[pygame.K_DOWN]:
if not self.walking:
if self.orient != 'down':
self.orient = 'down'
self.setSprite()
self.holdTime += dt
elif key[pygame.K_LEFT]:
if not self.walking:
if self.orient != 'left':
self.orient = 'left'
self.setSprite()
self.holdTime += dt
elif key[pygame.K_RIGHT]:
if not self.walking:
if self.orient != 'right':
self.orient = 'right'
self.setSprite()
self.holdTime += dt
else:
self.holdTime = 0
self.step = 'rightFoot'
# Walking mode enabled if a button is held for 0.1 seconds
if self.holdTime >= 100:
self.walking = True
lastRect = self.rect.copy()
# Walking at 8 pixels per frame in the direction the player is facing
if self.walking and self.dx < 64:
if self.orient == 'up':
self.rect.y -= 8
elif self.orient == 'down':
self.rect.y += 8
elif self.orient == 'left':
self.rect.x -= 8
elif self.orient == 'right':
self.rect.x += 8
self.dx += 8
# Collision detection:
# Reset to the previous rectangle if player collides
# with anything in the foreground layer
if len(game.tilemap.layers['triggers'].collide(self.rect,
'solid')) > 0:
self.rect = lastRect
# Area entry detection:
elif len(game.tilemap.layers['triggers'].collide(self.rect,
'entry')) > 0:
entryCell = game.tilemap.layers['triggers'].find('entry')[0]
game.fadeOut()
game.initArea(entryCell['entry'])
return
# Switch to the walking sprite after 32 pixels
if self.dx == 32:
# Self.step keeps track of when to flip the sprite so that
# the character appears to be taking steps with different feet.
if (self.orient == 'up' or
self.orient == 'down') and self.step == 'leftFoot':
self.image = pygame.transform.flip(self.image, True, False)
self.step = 'rightFoot'
else:
self.image.scroll(-64, 0)
self.step = 'leftFoot'
# After traversing 64 pixels, the walking animation is done
if self.dx == 64:
self.walking = False
self.setSprite()
self.dx = 0
game.tilemap.set_focus(self.rect.x, self.rect.y)
class SpriteLoop(pygame.sprite.Sprite):
"""A simple looped animated sprite.
SpriteLoops require certain properties to be defined in the relevant
tmx tile:
src - the source of the image that contains the sprites
width, height - the width and height of each section of the sprite that
will be displayed on-screen during animation
mspf - milliseconds per frame, or how many milliseconds must pass to
advance onto the next frame in the sprite's animation
frames - the number individual frames that compose the animation
"""
def __init__(self, location, cell, *groups):
super(SpriteLoop, self).__init__(*groups)
self.image = pygame.image.load(cell['src'])
self.defaultImage = self.image.copy()
self.width = int(cell['width'])
self.height = int(cell['height'])
self.rect = pygame.Rect(location, (self.width,self.height))
self.frames = int(cell['frames'])
self.frameCount = 0
self.mspf = int(cell['mspf']) # milliseconds per frame
self.timeCount = 0
def update(self, dt, game):
self.timeCount += dt
if self.timeCount > self.mspf:
# Advance animation to the appropriate frame
self.image = self.defaultImage.copy()
self.image.scroll(-1*self.width*self.frameCount, 0)
self.timeCount = 0
self.frameCount += 1
if self.frameCount == self.frames:
self.frameCount = 0
class Game(object):
def __init__(self, screen):
self.screen = screen
def fadeOut(self):
"""Animate the screen fading to black for entering a new area"""
clock = pygame.time.Clock()
blackRect = pygame.Surface(self.screen.get_size())
blackRect.set_alpha(100)
blackRect.fill((0,0,0))
# Continuously draw a transparent black rectangle over the screen
# to create a fadeout effect
for i in range(0,5):
clock.tick(15)
self.screen.blit(blackRect, (0,0))
pygame.display.flip()
clock.tick(15)
screen.fill((255,255,255,50))
pygame.display.flip()
def initArea(self, mapFile):
"""Load maps and initialize sprite layers for each new area"""
self.tilemap = tmx.load(mapFile, screen.get_size())
self.players = tmx.SpriteLayer()
self.objects = tmx.SpriteLayer()
# Initializing other animated sprites
try:
for cell in self.tilemap.layers['sprites'].find('src'):
SpriteLoop((cell.px,cell.py), cell, self.objects)
# In case there is no sprite layer for the current map
except KeyError:
pass
else:
self.tilemap.layers.append(self.objects)
# Initializing player sprite
startCell = self.tilemap.layers['triggers'].find('playerStart')[0]
self.player = Player((startCell.px, startCell.py),
startCell['playerStart'], self.players)
self.tilemap.layers.append(self.players)
self.tilemap.set_focus(self.player.rect.x, self.player.rect.y)
def main(self):
clock = pygame.time.Clock()
self.initArea('palletTown.tmx')
while 1:
dt = clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
return
self.tilemap.update(dt, self)
screen.fill((0,0,0))
self.tilemap.draw(self.screen)
pygame.display.flip()
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption("Pyllet Town")
Game(screen).main() | mit |
carlmw/oscar-wager | django/contrib/gis/gdal/field.py | 264 | 6059 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"A class that wraps an OGR Field, needs to be instantiated from a Feature object."
#### Python 'magic' routines ####
def __init__(self, feat, index):
"""
Initializes on the feature pointer and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat, index)
if not fld_ptr:
raise OGRException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
#### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat, self._index)
def as_int(self):
"Retrieves the Field's value as an integer."
return capi.get_field_as_integer(self._feat, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
return capi.get_field_as_string(self._feat, self._index)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(self._feat, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise OGRException('Unable to retrieve date & time information from the field.')
#### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
return capi.get_field_name(self.ptr)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
@property
def value(self):
"Returns an integer contained in this field."
return self.as_int()
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field): pass
class OFTWideString(Field): pass
class OFTBinary(Field): pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, OGRException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.maptools.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, OGRException):
return None
# List fields are also just subclasses
class OFTIntegerList(Field): pass
class OFTRealList(Field): pass
class OFTStringList(Field): pass
class OFTWideStringList(Field): pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = { 0 : OFTInteger,
1 : OFTIntegerList,
2 : OFTReal,
3 : OFTRealList,
4 : OFTString,
5 : OFTStringList,
6 : OFTWideString,
7 : OFTWideStringList,
8 : OFTBinary,
9 : OFTDate,
10 : OFTTime,
11 : OFTDateTime,
}
ROGRFieldTypes = dict([(cls, num) for num, cls in OGRFieldTypes.items()])
| bsd-3-clause |
DJMelonz/basic-blog | django/views/generic/simple.py | 245 | 2319 | from django.template import loader, RequestContext
from django.http import HttpResponse, HttpResponseRedirect, HttpResponsePermanentRedirect, HttpResponseGone
from django.utils.log import getLogger
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
PendingDeprecationWarning
)
logger = getLogger('django.request')
def direct_to_template(request, template, extra_context=None, mimetype=None, **kwargs):
"""
Render a given template with any extra URL parameters in the context as
``{{ params }}``.
"""
if extra_context is None: extra_context = {}
dictionary = {'params': kwargs}
for key, value in extra_context.items():
if callable(value):
dictionary[key] = value()
else:
dictionary[key] = value
c = RequestContext(request, dictionary)
t = loader.get_template(template)
return HttpResponse(t.render(c), mimetype=mimetype)
def redirect_to(request, url, permanent=True, query_string=False, **kwargs):
"""
Redirect to a given URL.
The given url may contain dict-style string formatting, which will be
interpolated against the params in the URL. For example, to redirect from
``/foo/<id>/`` to ``/bar/<id>/``, you could use the following URLconf::
urlpatterns = patterns('',
('^foo/(?P<id>\d+)/$', 'django.views.generic.simple.redirect_to', {'url' : '/bar/%(id)s/'}),
)
If the given url is ``None``, a HttpResponseGone (410) will be issued.
If the ``permanent`` argument is False, then the response will have a 302
HTTP status code. Otherwise, the status code will be 301.
If the ``query_string`` argument is True, then the GET query string
from the request is appended to the URL.
"""
args = request.META["QUERY_STRING"]
if args and query_string and url is not None:
url = "%s?%s" % (url, args)
if url is not None:
klass = permanent and HttpResponsePermanentRedirect or HttpResponseRedirect
return klass(url % kwargs)
else:
logger.warning('Gone: %s' % request.path,
extra={
'status_code': 410,
'request': request
})
return HttpResponseGone()
| bsd-3-clause |
RockRaidersInc/ROS-Main | gps/src/FSM.py | 1 | 1234 | """TODO."""
from enum import Enum
import UBX
def isObj(obj, cls):
"""Test if UBX message obj is of class cls."""
return obj._class == cls._class and obj._id == cls._id
def isACK(obj):
"""Test whether message obj is a ACK."""
return isObj(obj, UBX.ACK.ACK)
def isNAK(obj):
"""Test whether message obj is a NAK."""
return isObj(obj, UBX.ACK.NAK)
def FSM_Get(msgCls):
"""Decorator that makes a getter FSM for use in Manager."""
def decorator(FSMCls):
# 1. class STATE
class STATE(Enum):
START = 0
DONE = 1
setattr(FSMCls, "STATE", STATE)
# 2. function __init__
def __init__(self):
self.state = FSMCls.STATE.START
self.ver = None
setattr(FSMCls, "__init__", __init__)
# 3. function done
def done(self):
return self.state == FSMCls.STATE.DONE
setattr(FSMCls, "done", done)
# 4. function onUBX
def onUBX(self, obj, manager):
if obj._class == msgCls._class and obj._id == msgCls._id:
print(obj)
self.state = FSMCls.STATE.DONE
setattr(FSMCls, "onUBX", onUBX)
return FSMCls
return decorator
| gpl-3.0 |
incaser/odoo-odoo | doc/_extensions/odoo/translator.py | 207 | 26718 | # -*- coding: utf-8 -*-
import os.path
import posixpath
import re
import urllib
from docutils import nodes
from sphinx import addnodes, util
from sphinx.locale import admonitionlabels
def _parents(node):
while node.parent:
node = node.parent
yield node
class BootstrapTranslator(nodes.NodeVisitor, object):
head_prefix = 'head_prefix'
head = 'head'
stylesheet = 'stylesheet'
body_prefix = 'body_prefix'
body_pre_docinfo = 'body_pre_docinfo'
docinfo = 'docinfo'
body_suffix = 'body_suffix'
subtitle = 'subtitle'
header = 'header'
footer = 'footer'
html_prolog = 'html_prolog'
html_head = 'html_head'
html_title = 'html_title'
html_subtitle = 'html_subtitle'
# <meta> tags
meta = [
'<meta http-equiv="X-UA-Compatible" content="IE=edge">',
'<meta name="viewport" content="width=device-width, initial-scale=1">'
]
def __init__(self, builder, document):
super(BootstrapTranslator, self).__init__(document)
self.builder = builder
self.body = []
self.fragment = self.body
self.html_body = self.body
# document title
self.title = []
self.start_document_title = 0
self.first_title = False
self.context = []
self.section_level = 0
self.highlightlang = self.highlightlang_base = self.builder.config.highlight_language
self.highlightopts = getattr(builder.config, 'highlight_options', {})
self.first_param = 1
self.optional_param_level = 0
self.required_params_left = 0
self.param_separator = ','
def encode(self, text):
return unicode(text).translate({
ord('&'): u'&',
ord('<'): u'<',
ord('"'): u'"',
ord('>'): u'>',
0xa0: u' '
})
def starttag(self, node, tagname, **attributes):
tagname = unicode(tagname).lower()
# extract generic attributes
attrs = {name.lower(): value for name, value in attributes.iteritems()}
attrs.update(
(name, value) for name, value in node.attributes.iteritems()
if name.startswith('data-')
)
prefix = []
postfix = []
# handle possibly multiple ids
assert 'id' not in attrs, "starttag can't be passed a single id attribute, use a list of ids"
ids = node.get('ids', []) + attrs.pop('ids', [])
if ids:
_ids = iter(ids)
attrs['id'] = next(_ids)
postfix.extend(u'<i id="{}"></i>'.format(_id) for _id in _ids)
# set CSS class
classes = set(node.get('classes', []) + attrs.pop('class', '').split())
if classes:
attrs['class'] = u' '.join(classes)
return u'{prefix}<{tag} {attrs}>{postfix}'.format(
prefix=u''.join(prefix),
tag=tagname,
attrs=u' '.join(u'{}="{}"'.format(name, self.attval(value))
for name, value in attrs.iteritems()),
postfix=u''.join(postfix),
)
# only "space characters" SPACE, CHARACTER TABULATION, LINE FEED,
# FORM FEED and CARRIAGE RETURN should be collapsed, not al White_Space
def attval(self, value, whitespace=re.compile(u'[ \t\n\f\r]')):
return self.encode(whitespace.sub(u' ', unicode(value)))
def astext(self):
return u''.join(self.body)
def unknown_visit(self, node):
print "unknown node", node.__class__.__name__
self.body.append(u'[UNKNOWN NODE {}]'.format(node.__class__.__name__))
raise nodes.SkipNode
def visit_highlightlang(self, node):
self.highlightlang = node['lang']
def depart_highlightlang(self, node):
pass
def visit_document(self, node):
self.first_title = True
def depart_document(self, node):
pass
def visit_section(self, node):
# close "parent" or preceding section, unless this is the opening of
# the first section
if self.section_level:
self.body.append(u'</section>')
self.section_level += 1
self.body.append(self.starttag(node, 'section'))
def depart_section(self, node):
self.section_level -= 1
# close last section of document
if not self.section_level:
self.body.append(u'</section>')
def is_compact_paragraph(self, node):
parent = node.parent
if isinstance(parent, (nodes.document, nodes.compound,
addnodes.desc_content,
addnodes.versionmodified)):
# Never compact paragraphs in document or compound.
return False
for key, value in node.attlist():
# we can ignore a few specific classes, all other non-default
# attributes require that a <p> node remains
if key != 'classes' or value not in ([], ['first'], ['last'], ['first', 'last']):
return False
first = isinstance(node.parent[0], nodes.label)
for child in parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return False
parent_length = len([
1 for n in parent
if not isinstance(n, (nodes.Invisible, nodes.label))
])
return parent_length == 1
def visit_paragraph(self, node):
if self.is_compact_paragraph(node):
self.context.append(u'')
return
self.body.append(self.starttag(node, 'p'))
self.context.append(u'</p>')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_literal_block(self, node):
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.body.append(self.starttag(node, 'pre'))
return
lang = self.highlightlang
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
linenos = node.get('linenos', False)
if lang is self.highlightlang_base:
# only pass highlighter options for original language
opts = self.highlightopts
else:
opts = {}
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.builder.highlighter.highlight_block(
node.rawsource, lang, opts=opts, warn=warner, linenos=linenos,
**highlight_args)
self.body.append(self.starttag(node, 'div', CLASS='highlight-%s' % lang))
self.body.append(highlighted)
self.body.append(u'</div>\n')
raise nodes.SkipNode
def depart_literal_block(self, node):
self.body.append(u'</pre>')
def visit_bullet_list(self, node):
self.body.append(self.starttag(node, 'ul'))
def depart_bullet_list(self, node):
self.body.append(u'</ul>')
def visit_enumerated_list(self, node):
self.body.append(self.starttag(node, 'ol'))
def depart_enumerated_list(self, node):
self.body.append(u'</ol>')
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li'))
def depart_list_item(self, node):
self.body.append(u'</li>')
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl'))
def depart_definition_list(self, node):
self.body.append(u'</dl>')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
self.body.append(self.starttag(node, 'dt'))
def depart_term(self, node):
self.body.append(u'</dt>')
def visit_termsep(self, node):
self.body.append(self.starttag(node, 'br'))
raise nodes.SkipNode
def visit_definition(self, node):
self.body.append(self.starttag(node, 'dd'))
def depart_definition(self, node):
self.body.append(u'</dd>')
def visit_admonition(self, node, type=None):
clss = {
# ???: 'alert-success',
'note': 'alert-info',
'hint': 'alert-info',
'tip': 'alert-info',
'seealso': 'alert-info',
'warning': 'alert-warning',
'attention': 'alert-warning',
'caution': 'alert-warning',
'important': 'alert-warning',
'danger': 'alert-danger',
'error': 'alert-danger',
'exercise': 'alert-exercise',
}
self.body.append(self.starttag(node, 'div', role='alert', CLASS='alert {}'.format(
clss.get(type, '')
)))
if 'alert-dismissible' in node.get('classes', []):
self.body.append(
u'<button type="button" class="close" data-dismiss="alert" aria-label="Close">'
u'<span aria-hidden="true">×</span>'
u'</button>')
if type:
node.insert(0, nodes.title(type, admonitionlabels[type]))
def depart_admonition(self, node):
self.body.append(u'</div>')
visit_note = lambda self, node: self.visit_admonition(node, 'note')
visit_warning = lambda self, node: self.visit_admonition(node, 'warning')
visit_attention = lambda self, node: self.visit_admonition(node, 'attention')
visit_caution = lambda self, node: self.visit_admonition(node, 'caution')
visit_danger = lambda self, node: self.visit_admonition(node, 'danger')
visit_error = lambda self, node: self.visit_admonition(node, 'error')
visit_hint = lambda self, node: self.visit_admonition(node, 'hint')
visit_important = lambda self, node: self.visit_admonition(node, 'important')
visit_tip = lambda self, node: self.visit_admonition(node, 'tip')
visit_exercise = lambda self, node: self.visit_admonition(node, 'exercise')
visit_seealso = lambda self, node: self.visit_admonition(node, 'seealso')
depart_note = depart_admonition
depart_warning = depart_admonition
depart_attention = depart_admonition
depart_caution = depart_admonition
depart_danger = depart_admonition
depart_error = depart_admonition
depart_hint = depart_admonition
depart_important = depart_admonition
depart_tip = depart_admonition
depart_exercise = depart_admonition
depart_seealso = depart_admonition
def visit_versionmodified(self, node):
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
self.body.append(u'</div>')
def visit_title(self, node):
parent = node.parent
closing = u'</p>'
if isinstance(parent, nodes.Admonition):
self.body.append(self.starttag(node, 'p', CLASS='alert-title'))
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h1'))
closing = u'</h1>'
self.start_document_title = len(self.body)
else:
assert isinstance(parent, nodes.section), "expected a section node as parent to the title, found {}".format(parent)
if self.first_title:
self.first_title = False
raise nodes.SkipNode()
nodename = 'h{}'.format(self.section_level)
self.body.append(self.starttag(node, nodename))
closing = u'</{}>'.format(nodename)
self.context.append(closing)
def depart_title(self, node):
self.body.append(self.context.pop())
if self.start_document_title:
self.title = self.body[self.start_document_title:-1]
self.start_document_title = 0
del self.body[:]
# the rubric should be a smaller heading than the current section, up to
# h6... maybe "h7" should be a ``p`` instead?
def visit_rubric(self, node):
self.body.append(self.starttag(node, 'h{}'.format(min(self.section_level + 1, 6))))
def depart_rubric(self, node):
self.body.append(u'</h{}>'.format(min(self.section_level + 1, 6)))
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote'))
def depart_block_quote(self, node):
self.body.append(u'</blockquote>')
def visit_attribution(self, node):
self.body.append(self.starttag(node, 'footer'))
def depart_attribution(self, node):
self.body.append(u'</footer>')
def visit_container(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_container(self, node):
self.body.append(u'</div>')
def visit_compound(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_compound(self, node):
self.body.append(u'</div>')
def visit_image(self, node):
uri = node['uri']
if uri in self.builder.images:
uri = posixpath.join(self.builder.imgpath,
self.builder.images[uri])
attrs = {'src': uri, 'class': 'img-responsive'}
if 'alt' in node:
attrs['alt'] = node['alt']
if 'align' in node:
if node['align'] == 'center':
attrs['class'] += ' center-block'
else:
doc = None
if node.source:
doc = node.source
if node.line:
doc += ':%d' % node.line
self.builder.app.warn(
"Unsupported alignment value \"%s\"" % node['align'],
location=doc
)
# todo: explicit width/height/scale?
self.body.append(self.starttag(node, 'img', **attrs))
def depart_image(self, node): pass
def visit_figure(self, node):
self.body.append(self.starttag(node, 'div'))
def depart_figure(self, node):
self.body.append(u'</div>')
def visit_caption(self, node):
# first paragraph of figure content
self.body.append(self.starttag(node, 'h4'))
def depart_caption(self, node):
self.body.append(u'</h4>')
def visit_legend(self, node): pass
def depart_legend(self, node): pass
def visit_line(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line'))
# ensure the line still takes the room it needs
if not len(node): self.body.append(u'<br />')
def depart_line(self, node):
self.body.append(u'</div>')
def visit_line_block(self, node):
self.body.append(self.starttag(node, 'div', CLASS='line-block'))
def depart_line_block(self, node):
self.body.append(u'</div>')
def visit_table(self, node):
self.body.append(self.starttag(node, 'table', CLASS='table'))
def depart_table(self, node):
self.body.append(u'</table>')
def visit_tgroup(self, node): pass
def depart_tgroup(self, node): pass
def visit_colspec(self, node): raise nodes.SkipNode
def visit_thead(self, node):
self.body.append(self.starttag(node, 'thead'))
def depart_thead(self, node):
self.body.append(u'</thead>')
def visit_tbody(self, node):
self.body.append(self.starttag(node, 'tbody'))
def depart_tbody(self, node):
self.body.append(u'</tbody>')
def visit_row(self, node):
self.body.append(self.starttag(node, 'tr'))
def depart_row(self, node):
self.body.append(u'</tr>')
def visit_entry(self, node):
if isinstance(node.parent.parent, nodes.thead):
tagname = 'th'
else:
tagname = 'td'
self.body.append(self.starttag(node, tagname))
self.context.append(tagname)
def depart_entry(self, node):
self.body.append(u'</{}>'.format(self.context.pop()))
def visit_Text(self, node):
self.body.append(self.encode(node.astext()))
def depart_Text(self, node):
pass
def visit_literal(self, node):
self.body.append(self.starttag(node, 'code'))
def depart_literal(self, node):
self.body.append(u'</code>')
visit_literal_emphasis = visit_literal
depart_literal_emphasis = depart_literal
def visit_emphasis(self, node):
self.body.append(self.starttag(node, 'em'))
def depart_emphasis(self, node):
self.body.append(u'</em>')
def visit_strong(self, node):
self.body.append(self.starttag(node, 'strong'))
def depart_strong(self, node):
self.body.append(u'</strong>')
visit_literal_strong = visit_strong
depart_literal_strong = depart_strong
def visit_inline(self, node):
self.body.append(self.starttag(node, 'span'))
def depart_inline(self, node):
self.body.append(u'</span>')
def visit_abbreviation(self, node):
attrs = {}
if 'explanation' in node:
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', **attrs))
def depart_abbreviation(self, node):
self.body.append(u'</abbr>')
def visit_reference(self, node):
attrs = {
'class': 'reference',
'href': node['refuri'] if 'refuri' in node else '#' + node['refid']
}
attrs['class'] += ' internal' if (node.get('internal') or 'refuri' not in node) else ' external'
if any(isinstance(ancestor, nodes.Admonition) for ancestor in _parents(node)):
attrs['class'] += ' alert-link'
if 'reftitle' in node:
attrs['title'] = node['reftitle']
self.body.append(self.starttag(node, 'a', **attrs))
def depart_reference(self, node):
self.body.append(u'</a>')
def visit_target(self, node): pass
def depart_target(self, node): pass
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'div', CLASS='footnote'))
self.footnote_backrefs(node)
def depart_footnote(self, node):
self.body.append(u'</div>')
def visit_footnote_reference(self, node):
self.body.append(self.starttag(
node, 'a', href='#' + node['refid'], CLASS="footnote-ref"))
def depart_footnote_reference(self, node):
self.body.append(u'</a>')
def visit_label(self, node):
self.body.append(self.starttag(node, 'span', CLASS='footnote-label'))
self.body.append(u'%s[' % self.context.pop())
def depart_label(self, node):
# Context added in footnote_backrefs.
self.body.append(u']%s</span> %s' % (self.context.pop(), self.context.pop()))
def footnote_backrefs(self, node):
# should store following data on context stack (in that order since
# they'll be popped so LIFO)
#
# * outside (after) label
# * after label text
# * before label text
backrefs = node['backrefs']
if not backrefs:
self.context.extend(['', '', ''])
elif len(backrefs) == 1:
self.context.extend([
'',
'</a>',
'<a class="footnote-backref" href="#%s">' % backrefs[0]
])
else:
backlinks = (
'<a class="footnote-backref" href="#%s">%s</a>' % (backref, i)
for i, backref in enumerate(backrefs, start=1)
)
self.context.extend([
'<em class="footnote-backrefs">(%s)</em> ' % ', '.join(backlinks),
'',
''
])
def visit_desc(self, node):
self.body.append(self.starttag(node, 'section', CLASS='code-' + node['objtype']))
def depart_desc(self, node):
self.body.append(u'</section>')
def visit_desc_signature(self, node):
self.body.append(self.starttag(node, 'h6'))
self.body.append(u'<code>')
def depart_desc_signature(self, node):
self.body.append(u'</code>')
self.body.append(u'</h6>')
def visit_desc_addname(self, node): pass
def depart_desc_addname(self, node): pass
def visit_desc_type(self, node): pass
def depart_desc_type(self, node): pass
def visit_desc_returns(self, node):
self.body.append(u' → ')
def depart_desc_returns(self, node):
pass
def visit_desc_name(self, node): pass
def depart_desc_name(self, node): pass
def visit_desc_parameterlist(self, node):
self.body.append(u'(')
self.first_param = True
self.optional_param_level = 0
# How many required parameters are left.
self.required_params_left = sum(isinstance(c, addnodes.desc_parameter) for c in node.children)
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append(u')')
# If required parameters are still to come, then put the comma after
# the parameter. Otherwise, put the comma before. This ensures that
# signatures like the following render correctly (see issue #1001):
#
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node):
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
self.body.append(self.param_separator)
if self.optional_param_level == 0:
self.required_params_left -= 1
if 'noemph' not in node: self.body.append(u'<em>')
def depart_desc_parameter(self, node):
if 'noemph' not in node: self.body.append(u'</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
self.optional_param_level += 1
self.body.append(u'[')
def depart_desc_optional(self, node):
self.optional_param_level -= 1
self.body.append(u']')
def visit_desc_annotation(self, node):
self.body.append(self.starttag(node, 'em'))
def depart_desc_annotation(self, node):
self.body.append(u'</em>')
def visit_desc_content(self, node): pass
def depart_desc_content(self, node): pass
def visit_field_list(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-fields'))
def depart_field_list(self, node):
self.body.append(u'</div>')
def visit_field(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field'))
def depart_field(self, node):
self.body.append(u'</div>')
def visit_field_name(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field-name'))
def depart_field_name(self, node):
self.body.append(u'</div>')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'div', CLASS='code-field-body'))
def depart_field_body(self, node):
self.body.append(u'</div>')
def visit_glossary(self, node): pass
def depart_glossary(self, node): pass
def visit_comment(self, node): raise nodes.SkipNode
def visit_toctree(self, node):
# div class=row {{ section_type }}
# h2 class=col-sm-12
# {{ section title }}
# div class=col-sm-6 col-md-3
# figure class=card
# a href=current_link style=background-image: document-image-attribute class=card-img
# figcaption
# {{ card title }}
env = self.builder.env
conf = self.builder.app.config
for title, ref in ((e[0], e[1]) for e in node['entries']):
# external URL, no toc, can't recurse into
if ref not in env.tocs:
continue
toc = env.tocs[ref].traverse(addnodes.toctree)
classes = env.metadata[ref].get('types', 'tutorials')
classes += ' toc-single-entry' if not toc else ' toc-section'
self.body.append(self.starttag(node, 'div', CLASS="row " + classes))
self.body.append(u'<h2 class="col-sm-12">')
self.body.append(title if title else util.nodes.clean_astext(env.titles[ref]))
self.body.append(u'</h2>')
entries = [(title, ref)] if not toc else ((e[0], e[1]) for e in toc[0]['entries'])
for subtitle, subref in entries:
baseuri = self.builder.get_target_uri(node['parent'])
if subref in env.metadata:
cover = env.metadata[subref].get('banner', conf.odoo_cover_default)
elif subref in conf.odoo_cover_external:
cover = conf.odoo_cover_external[subref]
else:
cover = conf.odoo_cover_default_external
if cover:
banner = '_static/' + cover
base, ext = os.path.splitext(banner)
small = "{}.small{}".format(base, ext)
if os.path.isfile(urllib.url2pathname(small)):
banner = small
style = u"background-image: url('{}')".format(
util.relative_uri(baseuri, banner) or '#')
else:
style = u''
self.body.append(u"""
<div class="col-sm-6 col-md-3">
<figure class="card">
<a href="{link}" class="card-img">
<span style="{style}"></span>
<figcaption>{title}</figcaption>
</a>
</figure>
</div>
""".format(
link=subref if util.url_re.match(subref) else util.relative_uri(
baseuri, self.builder.get_target_uri(subref)),
style=style,
title=subtitle if subtitle else util.nodes.clean_astext(env.titles[subref]),
))
self.body.append(u'</div>')
raise nodes.SkipNode
def visit_index(self, node): raise nodes.SkipNode
def visit_raw(self, node):
if 'html' in node.get('format', '').split():
t = 'span' if isinstance(node.parent, nodes.TextElement) else 'div'
if node['classes']:
self.body.append(self.starttag(node, t))
self.body.append(node.astext())
if node['classes']:
self.body.append('</%s>' % t)
# Keep non-HTML raw text out of output:
raise nodes.SkipNode
# internal node
def visit_substitution_definition(self, node): raise nodes.SkipNode
| agpl-3.0 |
brettatoms/cerberus | docs/conf.py | 1 | 8056 | # -*- coding: utf-8 -*-
#
# Cerberus documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 11 15:52:25 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cerberus'
copyright = u'2012-2015, Nicola Iarocci'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __import__('cerberus').__version__
# The short X.Y version.
version = release.split('-dev')[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Cerberusdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Cerberus.tex', u'Cerberus Documentation',
u'Nicola Iarocci', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cerberus', u'Cerberus Documentation',
[u'Nicola Iarocci'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Cerberus', u'Cerberus Documentation',
u'Nicola Iarocci', 'Cerberus', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| isc |
aam-at/tensorflow | tensorflow/python/ops/ragged/ragged_tensor.py | 1 | 117296 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for storing ragged tensors and their values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_ragged_conversion_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_config
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged.row_partition import RowPartition
from tensorflow.python.types import internal as internal_types
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
# pylint: disable=protected-access
_convert_row_partition = RowPartition._convert_row_partition
# pylint: enable=protected-access
#===============================================================================
# RaggedTensor
#===============================================================================
@tf_export("RaggedTensor")
class RaggedTensor(composite_tensor.CompositeTensor,
internal_types.NativeObject):
"""Represents a ragged tensor.
A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are
dimensions whose slices may have different lengths. For example, the inner
(column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged,
since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths.
Dimensions whose slices all have the same length are called *uniform
dimensions*. The outermost dimension of a `RaggedTensor` is always uniform,
since it consists of a single slice (and so there is no possibility for
differing slice lengths).
The total number of dimensions in a `RaggedTensor` is called its *rank*,
and the number of ragged dimensions in a `RaggedTensor` is called its
*ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation
time: it can't depend on the runtime values of `Tensor`s, and can't vary
dynamically for different session runs.
Note that the `__init__` constructor is private. Please use one of the
following methods to construct a `RaggedTensor`:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
### Potentially Ragged Tensors
Many ops support both `Tensor`s and `RaggedTensor`s. The term "potentially
ragged tensor" may be used to refer to a tensor that might be either a
`Tensor` or a `RaggedTensor`. The ragged-rank of a `Tensor` is zero.
### Documenting RaggedTensor Shapes
When documenting the shape of a RaggedTensor, ragged dimensions can be
indicated by enclosing them in parentheses. For example, the shape of
a 3-D `RaggedTensor` that stores the fixed-size word embedding for each
word in a sentence, for each sentence in a batch, could be written as
`[num_sentences, (num_words), embedding_size]`. The parentheses around
`(num_words)` indicate that dimension is ragged, and that the length
of each element list in that dimension may vary for each item.
### Component Tensors
Internally, a `RaggedTensor` consists of a concatenated list of values that
are partitioned into variable-length rows. In particular, each `RaggedTensor`
consists of:
* A `values` tensor, which concatenates the variable-length rows into a
flattened list. For example, the `values` tensor for
`[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`.
* A `row_splits` vector, which indicates how those flattened values are
divided into rows. In particular, the values for row `rt[i]` are stored
in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Example:
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
### Alternative Row-Partitioning Schemes
In addition to `row_splits`, ragged tensors provide support for five other
row-partitioning schemes:
* `row_lengths`: a vector with shape `[nrows]`, which specifies the length
of each row.
* `value_rowids` and `nrows`: `value_rowids` is a vector with shape
`[nvals]`, corresponding one-to-one with `values`, which specifies
each value's row index. In particular, the row `rt[row]` consists of the
values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an
integer scalar that specifies the number of rows in the
`RaggedTensor`. (`nrows` is used to indicate trailing empty rows.)
* `row_starts`: a vector with shape `[nrows]`, which specifies the start
offset of each row. Equivalent to `row_splits[:-1]`.
* `row_limits`: a vector with shape `[nrows]`, which specifies the stop
offset of each row. Equivalent to `row_splits[1:]`.
* `uniform_row_length`: A scalar tensor, specifying the length of every
row. This row-partitioning scheme may only be used if all rows have
the same length.
Example: The following ragged tensors are equivalent, and all represent the
nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`.
>>> values = [3, 1, 4, 1, 5, 9, 2, 6]
>>> rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])
>>> rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])
>>> rt3 = RaggedTensor.from_value_rowids(
... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
>>> rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])
>>> rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])
### Multiple Ragged Dimensions
`RaggedTensor`s with multiple ragged dimensions can be defined by using
a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor`
adds a single ragged dimension.
>>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above
... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
>>> outer_rt = RaggedTensor.from_row_splits(
... values=inner_rt, row_splits=[0, 3, 3, 5])
>>> print(outer_rt.to_list())
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
>>> print(outer_rt.ragged_rank)
2
The factory function `RaggedTensor.from_nested_row_splits` may be used to
construct a `RaggedTensor` with multiple ragged dimensions directly, by
providing a list of `row_splits` tensors:
>>> RaggedTensor.from_nested_row_splits(
... flat_values=[3, 1, 4, 1, 5, 9, 2, 6],
... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list()
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
### Uniform Inner Dimensions
`RaggedTensor`s with uniform inner dimensions can be defined
by using a multidimensional `Tensor` for `values`.
>>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3], tf.int32),
... row_splits=[0, 2, 5])
>>> print(rt.to_list())
[[[1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]]
>>> print(rt.shape)
(2, None, 3)
### Uniform Outer Dimensions
`RaggedTensor`s with uniform outer dimensions can be defined by using
one or more `RaggedTensor` with a `uniform_row_length` row-partitioning
tensor. For example, a `RaggedTensor` with shape `[2, 2, None]` can be
constructed with this method from a `RaggedTensor` values with shape
`[4, None]`:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(values.shape)
(4, None)
>>> rt6 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> print(rt6)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt6.shape)
(2, 2, None)
Note that `rt6` only contains one ragged dimension (the innermost
dimension). In contrast, if `from_row_splits` is used to construct a similar
`RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:
>>> rt7 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])
>>> print(rt7.shape)
(2, None, None)
Uniform and ragged outer dimensions may be interleaved, meaning that a
tensor with any combination of ragged and uniform dimensions may be created.
For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could
be constructed as follows:
```python
t0 = tf.zeros([1000, 2]) # Shape: [1000, 2]
t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2]
t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2]
t3 = RaggedTensor.from_uniform_row_length(t2, 4) # [5, 4, 8, None, 2]
t4 = RaggedTensor.from_row_lengths(t3, [...]) # [3, None, 4, 8, None, 2]
```
"""
#=============================================================================
# Constructor (private)
#=============================================================================
@doc_controls.do_not_generate_docs
def __init__(self, values, row_partition, internal=False):
"""Creates a `RaggedTensor` with a specified partitioning for `values`.
This constructor is private -- please use one of the following ops to
build `RaggedTensor`s:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
Args:
values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`.
row_partition: A `RowPartition` object, representing the arrangement of
the lists at the top level.
internal: True if the constructor is being called by one of the factory
methods. If false, an exception will be raised.
Raises:
ValueError: If internal = False. Note that this method is intended only
for internal use.
TypeError: If values is not a `RaggedTensor` or `Tensor`, or
row_partition is not a `RowPartition`.
"""
if not internal:
raise ValueError("RaggedTensor constructor is private; please use one "
"of the factory methods instead (e.g., "
"RaggedTensor.from_row_lengths())")
_assert_is_supported_ragged_values_type(values)
if not isinstance(row_partition, RowPartition):
raise TypeError("row_partition must be a RowPartition, got %r" %
row_partition)
# Validate shapes.
values.shape.with_rank_at_least(1)
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
assert row_partition.dtype == values._row_partition.dtype
self._values = values
self._row_partition = row_partition
#=============================================================================
# Factory Methods
#=============================================================================
@classmethod
def _from_row_partition(cls, values, row_partition, validate=True):
"""Creates a `RaggedTensor` with a row partition.
This is used as a way for RaggedTensors to share row partitions.
The outer dimension of values must be equal to `partition.nvals()`.
Args:
values: A potentially ragged tensor.
row_partition: a `RowPartition`: can be shared between tensors.
validate: If true, then use assertions to check that the arguments form a
valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If partition.nvals() != _nrows(values)
"""
if not isinstance(row_partition, RowPartition):
raise TypeError("row_partition must be a RowPartition")
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
values, row_partition = cls._convert_values_and_partition(
values, row_partition, "partition")
if row_partition.has_precomputed_value_rowids():
value_rowids_shape = row_partition.value_rowids().shape
values.shape[:1].assert_is_compatible_with(value_rowids_shape)
if validate:
msg = "Arguments to _from_row_partition do not form a valid RaggedTensor"
nvals = _nrows(values, row_partition.dtype)
checks = [
check_ops.assert_equal(
row_partition.nvals(out_type=row_partition.dtype),
nvals,
message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_partition = row_partition.with_dependencies(checks)
return cls(
values=values,
internal=True,
row_partition=row_partition)
@classmethod
@dispatch.add_dispatch_support
def from_value_rowids(cls,
values,
value_rowids,
nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `value_rowids`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values[i] for i in range(len(values)) if value_rowids[i] == row]
for row in range(nrows)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds
one-to-one with `values`, and specifies each value's row index. Must be
nonnegative, and must be sorted in ascending order.
nrows: An integer scalar specifying the number of rows. This should be
specified if the `RaggedTensor` may containing empty training rows. Must
be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty).
Defaults to `value_rowids[-1]` (or zero if `value_rowids` is empty).
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `nrows` is incompatible with `value_rowids`.
#### Example:
>>> print(tf.RaggedTensor.from_value_rowids(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],
... nrows=5))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromValueRowIds",
[values, value_rowids, nrows]):
row_partition = RowPartition.from_value_rowids(
value_rowids=value_rowids,
nrows=nrows,
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_splits(cls, values, row_splits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_splits`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [values[row_splits[i]:row_splits[i + 1]]
for i in range(len(row_splits) - 1)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be
empty, and must be sorted in ascending order. `row_splits[0]` must be
zero and `row_splits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `row_splits` is an empty list.
#### Example:
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]):
row_partition = RowPartition.from_row_splits(
row_splits=row_splits,
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_lengths(cls, values, row_lengths, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_lengths`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(length)]
for length in row_lengths]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative. `sum(row_lengths)` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_lengths(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_lengths=[4, 0, 3, 1, 0]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]):
row_partition = RowPartition.from_row_lengths(
row_lengths=row_lengths,
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_starts(cls, values, row_starts, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_starts`.
Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_starts: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative and sorted in ascending order. If `nrows>0`, then
`row_starts[0]` must be zero.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_starts(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_starts=[0, 4, 4, 7, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]):
values = _convert_to_ragged_tensor_values(values)
row_partition = RowPartition.from_row_starts(
row_starts=row_starts,
nvals=_nrows(values),
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_limits(cls, values, row_limits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_limits`.
Equivalent to: `from_row_splits(values, concat([0, row_limits]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in
ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_limits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_limits=[4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]):
row_partition = RowPartition.from_row_limits(
row_limits=row_limits,
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_uniform_row_length(cls,
values,
uniform_row_length,
nrows=None,
validate=True,
name=None):
"""Creates a `RaggedTensor` with rows partitioned by `uniform_row_length`.
This method can be used to create `RaggedTensor`s with multiple uniform
outer dimensions. For example, a `RaggedTensor` with shape `[2, 2, None]`
can be constructed with this method from a `RaggedTensor` values with shape
`[4, None]`:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(values.shape)
(4, None)
>>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> print(rt1)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt1.shape)
(2, 2, None)
Note that `rt1` only contains one ragged dimension (the innermost
dimension). In contrast, if `from_row_splits` is used to construct a similar
`RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:
>>> rt2 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])
>>> print(rt2.shape)
(2, None, None)
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
uniform_row_length: A scalar integer tensor. Must be nonnegative. The
size of the outer axis of `values` must be evenly divisible by
`uniform_row_length`.
nrows: The number of rows in the constructed RaggedTensor. If not
specified, then it defaults to `nvals/uniform_row_length` (or `0` if
`uniform_row_length==0`). `nrows` only needs to be specified if
`uniform_row_length` might be zero. `uniform_row_length*nrows` must
be `nvals`.
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor` that corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(uniform_row_length)]
for _ in range(nrows)]
```
`result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromUniformRowLength",
[values, uniform_row_length, nrows]):
values = _convert_to_ragged_tensor_values(values)
uniform_row_length = _convert_row_partition(
uniform_row_length, "UniformRowLength",
_get_optional_partition_dtype(values))
nvals = _nvals_uniform_row_length(values, uniform_row_length)
row_partition = RowPartition.from_uniform_row_length(
uniform_row_length=uniform_row_length,
nvals=nvals,
nrows=nrows,
validate=validate,
preferred_dtype=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_nested_value_rowids(cls,
flat_values,
nested_value_rowids,
nested_nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `value_rowids` tensors.
Equivalent to:
```python
result = flat_values
for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)):
result = from_value_rowids(result, rowids, nrows)
```
Args:
flat_values: A potentially ragged tensor.
nested_value_rowids: A list of 1-D integer tensors. The `i`th tensor is
used as the `value_rowids` for the `i`th ragged dimension.
nested_nrows: A list of integer scalars. The `i`th scalar is used as the
`nrows` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty).
Raises:
ValueError: If `len(nested_values_rowids) != len(nested_nrows)`.
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_value_rowids, ops.Tensor):
raise TypeError("nested_value_rowids must be a list of Tensors")
if nested_nrows is None:
nested_nrows = [None] * len(nested_value_rowids)
else:
if isinstance(nested_nrows, ops.Tensor):
raise TypeError("nested_nrows must be a list of Tensors")
if len(nested_nrows) != len(nested_value_rowids):
raise ValueError("nested_nrows must have the same length as "
"nested_value_rowids")
with ops.name_scope(name, "RaggedFromNestedValueRowIds", [flat_values] +
list(nested_value_rowids) + list(nested_nrows)):
result = flat_values
for value_rowids, nrows in reversed(
list(zip(nested_value_rowids, nested_nrows))):
result = cls.from_value_rowids(
result, value_rowids, nrows, validate=validate)
return result
@classmethod
@dispatch.add_dispatch_support
def from_nested_row_splits(cls,
flat_values,
nested_row_splits,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_splits` tensors.
Equivalent to:
```python
result = flat_values
for row_splits in reversed(nested_row_splits):
result = from_row_splits(result, row_splits)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_splits: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_splits` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty).
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_row_splits, ops.Tensor):
raise TypeError("nested_row_splits must be a list of Tensors")
with ops.name_scope(name, "RaggedFromNestedRowSplits",
[flat_values] + list(nested_row_splits)):
result = flat_values
for splits in reversed(nested_row_splits):
result = cls.from_row_splits(result, splits, validate=validate)
return result
@classmethod
@dispatch.add_dispatch_support
def from_nested_row_lengths(cls,
flat_values,
nested_row_lengths,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_lengths` tensors.
Equivalent to:
```python
result = flat_values
for row_lengths in reversed(nested_row_lengths):
result = from_row_lengths(result, row_lengths)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_lengths: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_lengths` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_row_lengths, ops.Tensor):
raise TypeError("nested_row_lengths must be a list of Tensors")
with ops.name_scope(name, "RaggedFromNestedRowlengths",
[flat_values] + list(nested_row_lengths)):
result = flat_values
for lengths in reversed(nested_row_lengths):
result = cls.from_row_lengths(result, lengths, validate=validate)
return result
@classmethod
def _convert_values_and_partition(cls, values, row_partition, name):
"""Converts `values` and `partition` to Tensors.
If `values` is a `RaggedTensor`, then converts `values` and `partition`
to have compatible row-partitioning dtypes. In particular, if any of the
row partitioning tensors are `int64`, then all of the other row
partitioning tensors wil be cast to `int64` (if auto_cast_partition_dtype()
is true) or an error will be raised (if auto_cast_partition_dtype() is
false).
Args:
values: The `values` for the `RaggedTensor` being constructed.
row_partition: A RowPartition object for the `RaggedTensor` being
constructed.
name: The name of the RowPartition object.
Returns:
A tuple (values, partition).
"""
if not isinstance(row_partition, RowPartition):
raise ValueError("partition must be a RowPartition")
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
if values._row_partition.dtype != row_partition.dtype:
if not ragged_config.auto_cast_partition_dtype():
# pylint: disable=protected-access
raise ValueError(
"dtype mismatch: %s (%s) vs values.partition (%s)" %
(name, row_partition.dtype, values._row_partition.dtype))
values = values.with_row_splits_dtype(row_partition.dtype)
else:
values = _convert_to_ragged_tensor_values(values)
return (values, row_partition)
#=============================================================================
# Accessors
#=============================================================================
@property
def dtype(self):
"""The `DType` of values in this tensor."""
return self._values.dtype
@property
def shape(self):
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Examples:
>>> tf.ragged.constant([[0], [1, 2]]).shape
TensorShape([2, None])
>>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape
TensorShape([2, None, 2])
"""
nrows = self._row_partition.static_nrows
ncols = self._row_partition.static_uniform_row_length
value_shape = self._values.shape[1:]
return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape)
def get_shape(self):
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Alias for `shape` property.
Examples:
>>> tf.ragged.constant([[0], [1, 2]]).get_shape()
TensorShape([2, None])
>>> tf.ragged.constant(
... [[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).get_shape()
TensorShape([2, None, 2])
"""
return self.shape
@property
def ragged_rank(self):
"""The number of times the RaggedTensor's flat_values is partitioned.
Examples:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> values.ragged_rank
1
>>> rt = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> rt.ragged_rank
2
Returns:
A Python `int` indicating the number of times the underlying `flat_values`
Tensor has been partitioned to add a new dimension.
I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.
"""
values_is_ragged = isinstance(self._values, RaggedTensor)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def values(self):
"""The concatenated rows for this ragged tensor.
`rt.values` is a potentially ragged tensor formed by flattening the two
outermost dimensions of `rt` into a single dimension.
`rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the
number of items in the outer two dimensions of `rt`).
`rt.ragged_rank = self.ragged_rank - 1`
Returns:
A potentially ragged tensor.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
"""
return self._values
@property
def _nested_row_partitions(self):
"""Returns the row partitions for this `RaggedTensor`."""
partitions = [self._row_partition]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
# pylint: disable=protected-access
partitions.append(rt_values._row_partition)
rt_values = rt_values.values
return tuple(partitions)
@property
def row_splits(self):
"""The row-split indices for this ragged tensor's `values`.
`rt.row_splits` specifies where the values for each row begin and end in
`rt.values`. In particular, the values for row `rt[i]` are stored in
the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Returns:
A 1-D integer `Tensor` with shape `[self.nrows+1]`.
The returned tensor is non-empty, and is sorted in ascending order.
`self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to
`self.values.shape[0]`.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.row_splits) # indices of row splits in rt.values
tf.Tensor([0 4 4 7 8 8], shape=(6,), dtype=int64)
"""
return self._row_partition.row_splits()
@property
def uniform_row_length(self):
"""The length of each row in this ragged tensor, or None if rows are ragged.
>>> rt1 = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(rt1.uniform_row_length) # rows are ragged.
None
>>> rt2 = tf.RaggedTensor.from_uniform_row_length(
... values=rt1, uniform_row_length=2)
>>> print(rt2)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt2.uniform_row_length) # rows are not ragged (all have size 2).
tf.Tensor(2, shape=(), dtype=int64)
A RaggedTensor's rows are only considered to be uniform (i.e. non-ragged)
if it can be determined statically (at graph construction time) that the
rows all have the same length.
Returns:
A scalar integer `Tensor`, specifying the length of every row in this
ragged tensor (for ragged tensors whose rows are uniform); or `None`
(for ragged tensors whose rows are ragged).
"""
return self._row_partition.uniform_row_length()
@property
def flat_values(self):
"""The innermost `values` tensor for this ragged tensor.
Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is
`rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`.
Conceptually, `flat_values` is the tensor formed by flattening the
outermost dimension and all of the ragged dimensions into a single
dimension.
`rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]`
(where `nvals` is the number of items in the flattened dimensions).
Returns:
A `Tensor`.
#### Example:
>>> rt = tf.ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
>>> print(rt.flat_values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
"""
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
"""A tuple containing the row_splits for all ragged dimensions.
`rt.nested_row_splits` is a tuple containing the `row_splits` tensors for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where:
* `value_splits = ()` if `rt.values` is a `Tensor`.
* `value_splits = rt.values.nested_row_splits` otherwise.
Returns:
A `tuple` of 1-D integer `Tensor`s.
#### Example:
>>> rt = tf.ragged.constant(
... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, splits in enumerate(rt.nested_row_splits):
... print('Splits for dimension %d: %s' % (i+1, splits.numpy()))
Splits for dimension 1: [0 3]
Splits for dimension 2: [0 3 3 5]
Splits for dimension 3: [0 4 4 7 8 8]
"""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
def value_rowids(self, name=None):
"""Returns the row indices for the `values` in this ragged tensor.
`rt.value_rowids()` corresponds one-to-one with the outermost dimension of
`rt.values`, and specifies the row containing each value. In particular,
the row `rt[row]` consists of the values `rt.values[j]` where
`rt.value_rowids()[j] == row`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer `Tensor` with shape `self.values.shape[:1]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.value_rowids()) # corresponds 1:1 with rt.values
tf.Tensor([0 0 0 0 2 2 2 3], shape=(8,), dtype=int64)
"""
with ops.name_scope(name, "RaggedValueRowIds", [self]):
return self._row_partition.value_rowids()
def nested_value_rowids(self, name=None):
"""Returns a tuple containing the value_rowids for all ragged dimensions.
`rt.nested_value_rowids` is a tuple containing the `value_rowids` tensors
for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_value_rowids = (rt.value_rowids(),) + value_ids`
where:
* `value_ids = ()` if `rt.values` is a `Tensor`.
* `value_ids = rt.values.nested_value_rowids` otherwise.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensor`s.
#### Example:
>>> rt = tf.ragged.constant(
... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, ids in enumerate(rt.nested_value_rowids()):
... print('row ids for dimension %d: %s' % (i+1, ids.numpy()))
row ids for dimension 1: [0 0 0]
row ids for dimension 2: [0 0 0 2 2]
row ids for dimension 3: [0 0 0 0 2 2 2 3]
"""
with ops.name_scope(name, "RaggedNestedValueRowIds", [self]):
rt_nested_ids = [self.value_rowids()]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_ids.append(rt_values.value_rowids())
rt_values = rt_values.values
return tuple(rt_nested_ids)
def nrows(self, out_type=None, name=None):
"""Returns the number of rows in this ragged tensor.
I.e., the size of the outermost dimension of the tensor.
Args:
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
name: A name prefix for the returned tensor (optional).
Returns:
A scalar `Tensor` with dtype `out_type`.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.nrows()) # rt has 5 rows.
tf.Tensor(5, shape=(), dtype=int64)
"""
with ops.name_scope(name, "RaggedNRows", [self]):
return self._row_partition.nrows(out_type=out_type)
def row_starts(self, name=None):
"""Returns the start indices for rows in this ragged tensor.
These indices specify where the values for each row begin in
`self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.row_starts()) # indices of row starts in rt.values
tf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64)
"""
with ops.name_scope(name, "RaggedRowStarts", [self]):
return self._row_partition.row_starts()
def row_limits(self, name=None):
"""Returns the limit indices for rows in this ragged tensor.
These indices specify where the values for each row end in
`self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.row_limits()) # indices of row limits in rt.values
tf.Tensor([4 4 7 8 8], shape=(5,), dtype=int64)
"""
with ops.name_scope(name, "RaggedRowLimits", [self]):
return self._row_partition.row_limits()
def row_lengths(self, axis=1, name=None):
"""Returns the lengths of the rows in this ragged tensor.
`rt.row_lengths()[i]` indicates the number of values in the
`i`th row of `rt`.
Args:
axis: An integer constant indicating the axis whose row lengths should be
returned.
name: A name prefix for the returned tensor (optional).
Returns:
A potentially ragged integer Tensor with shape `self.shape[:axis]`.
Raises:
ValueError: If `axis` is out of bounds.
#### Example:
>>> rt = tf.ragged.constant(
... [[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []])
>>> print(rt.row_lengths()) # lengths of rows in rt
tf.Tensor([2 0 2 1 0], shape=(5,), dtype=int64)
>>> print(rt.row_lengths(axis=2)) # lengths of axis=2 rows.
<tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]>
"""
if axis == 0:
return self._row_partition.nrows()
if axis == 1:
return self._row_partition.row_lengths()
with ops.name_scope(name, "RaggedRowLengths", [self]):
axis = array_ops.get_positive_axis(
axis, self.shape.rank, ndims_name="rank(self)")
if axis == 0:
return self.nrows()
elif axis == 1:
splits = self.row_splits
return splits[1:] - splits[:-1]
elif isinstance(self.values, RaggedTensor):
return self.with_values(self.values.row_lengths(axis - 1))
else:
shape = array_ops.shape(self.values, out_type=self._row_partition.dtype)
return self.with_values(
array_ops.ones(shape[:axis - 1], self._row_partition.dtype) *
shape[axis - 1])
def nested_row_lengths(self, name=None):
"""Returns a tuple containing the row_lengths for all ragged dimensions.
`rt.nested_row_lengths()` is a tuple containing the `row_lengths` tensors
for all ragged dimensions in `rt`, ordered from outermost to innermost.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to
`self.ragged_rank`.
"""
with ops.name_scope(name, "RaggedNestedRowLengths", [self]):
rt_nested_row_lengths = []
rt = self
while isinstance(rt, RaggedTensor):
rt_nested_row_lengths.append(rt.row_lengths())
rt = rt.values
return tuple(rt_nested_row_lengths)
def bounding_shape(self, axis=None, name=None, out_type=None):
"""Returns the tight bounding box shape for this `RaggedTensor`.
Args:
axis: An integer scalar or vector indicating which axes to return the
bounding box for. If not specified, then the full bounding box is
returned.
name: A name prefix for the returned tensor (optional).
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
Returns:
An integer `Tensor` (`dtype=self.row_splits.dtype`). If `axis` is not
specified, then `output` is a vector with
`output.shape=[self.shape.ndims]`. If `axis` is a scalar, then the
`output` is a scalar. If `axis` is a vector, then `output` is a vector,
where `output[i]` is the bounding size for dimension `axis[i]`.
#### Example:
>>> rt = tf.ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])
>>> rt.bounding_shape().numpy()
array([5, 4])
"""
if out_type is None:
out_type = self._row_partition.dtype
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, "RaggedBoundingBox", [self, axis]):
nested_splits = self.nested_row_splits
rt_flat_values = self.flat_values
# Optimized special cases for when axis=0 or axis=1:
if isinstance(axis, int):
if axis == 0:
return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1
elif axis == 1:
return math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0)
splits_shape = array_ops.shape(self.row_splits, out_type=out_type)
flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type)
ragged_dimensions = [splits_shape[0] - 1] + [
math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0)
for splits in nested_splits
]
inner_dimensions = flat_values_shape[1:]
if out_type != self._row_partition.dtype:
ragged_dimensions = [
math_ops.cast(d, out_type) for d in ragged_dimensions
]
bbox = array_ops.concat(
[array_ops.stack(ragged_dimensions), inner_dimensions], axis=0)
return bbox if axis is None else array_ops.gather(bbox, axis)
#=============================================================================
# Transformation
#=============================================================================
def with_values(self, new_values):
"""Returns a copy of `self` with `values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor to use as the `values` for the
returned `RaggedTensor`. Must have `rank > 0`, and must have the same
number of rows as `self.values`.
Returns:
A `RaggedTensor`. `result.rank = 1 + new_values.rank`.
`result.ragged_rank = 1 + new_values.ragged_rank`
"""
new_values = _convert_to_ragged_tensor_values(new_values)
new_values.shape.with_rank_at_least(1)
self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])
if (isinstance(new_values, RaggedTensor) and
self._row_partition.dtype != new_values.row_splits.dtype):
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("self and new_values have mismatched row_splits "
"dtypes; use RaggedTensor.with_row_splits_dtype() to "
"convert them to compatible dtypes.")
new_values = new_values.with_row_splits_dtype(dtypes.int64)
return self.with_row_splits_dtype(dtypes.int64).with_values(new_values)
return RaggedTensor(
values=new_values, row_partition=self._row_partition, internal=True)
def with_flat_values(self, new_values):
"""Returns a copy of `self` with `flat_values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor that should replace
`self.flat_values`. Must have `rank > 0`, and must have the same number
of rows as `self.flat_values`.
Returns:
A `RaggedTensor`.
`result.rank = self.ragged_rank + new_values.rank`.
`result.ragged_rank = self.ragged_rank + new_values.ragged_rank`.
"""
if isinstance(self._values, RaggedTensor):
return self.with_values(self.values.with_flat_values(new_values))
else:
new_values = _convert_to_ragged_tensor_values(new_values)
return self.with_values(new_values)
def with_row_splits_dtype(self, dtype):
"""Returns a copy of this RaggedTensor with the given `row_splits` dtype.
For RaggedTensors with multiple ragged dimensions, the `row_splits` for all
nested `RaggedTensor` objects are cast to the given dtype.
Args:
dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`.
Returns:
A copy of this RaggedTensor, with the `row_splits` cast to the given
type.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("dtype must be int32 or int64")
if self._row_partition.dtype == dtype:
return self
current_values = self._values
if isinstance(current_values, RaggedTensor):
return RaggedTensor(
values=current_values.with_row_splits_dtype(dtype),
row_partition=self._row_partition.with_row_splits_dtype(dtype),
internal=True)
else:
return RaggedTensor(
values=current_values,
row_partition=self._row_partition.with_row_splits_dtype(dtype),
internal=True)
def merge_dims(self, outer_axis, inner_axis):
"""Merges outer_axis...inner_axis into a single dimension.
Returns a copy of this RaggedTensor with the specified range of dimensions
flattened into a single dimension, with elements in row-major order.
#### Examples:
>>> rt = tf.ragged.constant([[[1, 2], [3]], [[4, 5, 6]]])
>>> print(rt.merge_dims(0, 1))
<tf.RaggedTensor [[1, 2], [3], [4, 5, 6]]>
>>> print(rt.merge_dims(1, 2))
<tf.RaggedTensor [[1, 2, 3], [4, 5, 6]]>
>>> print(rt.merge_dims(0, 2))
tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32)
To mimic the behavior of `np.flatten` (which flattens all dimensions), use
`rt.merge_dims(0, -1). To mimic the behavior of `tf.layers.Flatten` (which
flattens all dimensions except the outermost batch dimension), use
`rt.merge_dims(1, -1)`.
Args:
outer_axis: `int`: The first dimension in the range of dimensions to
merge. May be negative if `self.shape.rank` is statically known.
inner_axis: `int`: The last dimension in the range of dimensions to merge.
May be negative if `self.shape.rank` is statically known.
Returns:
A copy of this tensor, with the specified dimensions merged into a
single dimension. The shape of the returned tensor will be
`self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N`
is the total number of slices in the merged dimensions.
"""
outer_axis = array_ops.get_positive_axis(
outer_axis,
self.shape.rank,
axis_name="outer_axis",
ndims_name="rank(self)")
inner_axis = array_ops.get_positive_axis(
inner_axis,
self.shape.rank,
axis_name="inner_axis",
ndims_name="rank(self)")
if not outer_axis < inner_axis:
raise ValueError("Expected outer_axis (%d) to be less than "
"inner_axis (%d)" % (outer_axis, inner_axis))
return merge_dims(self, outer_axis, inner_axis)
def _set_shape(self, shape):
"""Updates the static shape of `self` to be `shape`.
* If a dimension of `shape` has known rank, and is encoded via
partitioning, then this will update the corresponding partition to
define `_uniform_row_length` and `nrows`.
* If a dimension of `shape` has a known rank, and is encoded as one
of the `flat_values` dimensions, then `flat_values.set_shape()` will
be used to update its shape.
Warning: Using this method to assert an incorrect shape for a RaggedTensor
(i.e., one that's not consistent with its actual shape) can cause
segmentation faults and very difficult-to-diagnose behavior. Only use this
method if you are certain that the shape is correct.
Args:
shape: `tf.TensorShape` specifying the shape for this `RaggedTensor`.
"""
# TODO(edloper): Refactor this to not directly access private members
# of RowPartition.
# pylint: disable=protected-access
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
return # Nothing to do.
shape = shape.as_list()
# Outermost dimension
if shape[0] is not None:
self._row_partition._row_splits.set_shape(shape[0] + 1)
# Partitioned dimensions
dtype = self._row_partition.dtype
for i, partition in enumerate(self._nested_row_partitions):
size = shape[i + 1]
if size is not None:
if partition._uniform_row_length is not None:
old_row_length = tensor_util.constant_value(
partition._uniform_row_length)
if old_row_length is not None:
if size == old_row_length:
continue # already have shape info for this axis.
else:
raise ValueError("Inconsistent size for axis %s: %s vs %s" %
((i + 1), old_row_length, size))
partition._uniform_row_length = ops.convert_to_tensor(size, dtype)
if partition._nrows is None:
partition._nrows = array_ops.size(partition._row_splits) - 1
# Inner dimensions
flat_shape = tensor_shape.as_shape([None] + shape[self.ragged_rank + 1:])
self.flat_values.set_shape(flat_shape)
#=============================================================================
# Tensor Type Conversions
#=============================================================================
@classmethod
@dispatch.add_dispatch_support
def from_tensor(cls,
tensor,
lengths=None,
padding=None,
ragged_rank=1,
name=None,
row_splits_dtype=dtypes.int64):
"""Converts a `tf.Tensor` into a `RaggedTensor`.
The set of absent/default values may be specified using a vector of lengths
or a padding value (but not both). If `lengths` is specified, then the
output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`. If
'lengths' is a list of lists or tuple of lists, those lists will be used
as nested row lengths. If `padding` is specified, then any row *suffix*
consisting entirely of `padding` will be excluded from the returned
`RaggedTensor`. If neither `lengths` nor `padding` is specified, then the
returned `RaggedTensor` will have no absent/default values.
Examples:
>>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
>>> tf.RaggedTensor.from_tensor(dt)
<tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, lengths=[1, 0, 3])
<tf.RaggedTensor [[5], [], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, padding=0)
<tf.RaggedTensor [[5, 7], [0, 3], [6]]>
>>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]],
... [[0, 0], [3, 0], [0, 0]],
... [[6, 0], [0, 0], [0, 0]]])
>>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1]))
<tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]>
Args:
tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or
higher.
lengths: An optional set of row lengths, specified using a 1-D integer
`Tensor` whose length is equal to `tensor.shape[0]` (the number of rows
in `tensor`). If specified, then `output[row]` will contain
`tensor[row][:lengths[row]]`. Negative lengths are treated as zero. You
may optionally pass a list or tuple of lengths to this argument, which
will be used as nested row lengths to construct a ragged tensor with
multiple ragged dimensions.
padding: An optional padding value. If specified, then any row suffix
consisting entirely of `padding` will be excluded from the returned
RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor`
and with `shape=tensor.shape[ragged_rank + 1:]`.
ragged_rank: Integer specifying the ragged rank for the returned
`RaggedTensor`. Must be greater than zero.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the specified `ragged_rank`. The shape of the
returned ragged tensor is compatible with the shape of `tensor`.
Raises:
ValueError: If both `lengths` and `padding` are specified.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if lengths is not None and padding is not None:
raise ValueError("Specify lengths or padding, but not both")
if not isinstance(ragged_rank, int):
raise TypeError("ragged_rank expected int, got %r" % ragged_rank)
if ragged_rank <= 0:
raise ValueError("ragged_rank must be greater than 0; got %s" %
ragged_rank)
with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor.shape.with_rank_at_least(ragged_rank + 1)
input_shape = array_ops.shape(tensor, out_type=row_splits_dtype)
ncols = input_shape[1]
# Handle nested row lengths.
if (lengths is not None and isinstance(lengths, (list, tuple)) and
len(lengths) and not isinstance(lengths[0], (int, float))):
if ragged_rank not in (1, len(lengths)):
# Note: we accept `ragged_rank=1` here because it's the default value;
# i.e., if the user passes in a tuple of lengths, but doesn't specify
# ragged_rank, then we should use that tuple to determine ragged_rank.
# We only want to complain if they pass in an explicit ragged_rank
# that doesn't match len(lengths).
raise ValueError("If lengths is a tuple of row_lengths, then "
"ragged_rank must be len(lengths).")
# Rather than reconstructing the tensor mask directly, we can
# recreate it as a boolean RaggedTensor, then densify that and use
# that as the mask to clear out the unused data in the passed tensor.
tensor.shape.with_rank_at_least(len(lengths) + 1)
num_tokens = math_ops.reduce_sum(lengths[-1])
ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool)
ragged_mask = cls.from_nested_row_lengths(
ones_mask, lengths, validate=False)
dense_ragged_mask = ragged_mask.to_tensor(default_value=False)
masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask)
return cls.from_nested_row_lengths(masked_data, lengths, validate=False)
# Handle ragged_rank>1 via recursion:
# If the output should have multiple ragged dimensions, then first
# flatten the tensor to eliminate all but the last ragged dimension,
# and recursively convert that flattened tensor. Then add on the splits
# for the dimensions that we flattened out.
if ragged_rank > 1:
if tensor.shape.is_fully_defined():
input_shape = tensor.shape.as_list()
# The total number of elements in each dimension. E.g., if
# input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.
dim_size = np.cumprod(input_shape)
new_shape = [dim_size[ragged_rank - 1]] + input_shape[ragged_rank:]
else:
dim_size = math_ops.cumprod(input_shape)
new_shape = array_ops.concat([[dim_size[ragged_rank - 1]],
input_shape[ragged_rank:]],
axis=0)
flattened = array_ops.reshape(tensor, new_shape)
result = cls.from_tensor(
flattened, lengths, padding, row_splits_dtype=row_splits_dtype)
for axis in range(ragged_rank - 1, 0, -1):
dim_len = tensor_shape.dimension_at_index(tensor.shape, axis).value
if dim_len is None:
dim_len = input_shape[axis]
else:
dim_len = constant_op.constant(dim_len, row_splits_dtype)
result = RaggedTensor.from_uniform_row_length(
values=result,
uniform_row_length=dim_len,
nrows=dim_size[axis - 1],
validate=False)
return result
# If padding was specified, then use it to find row lengths.
if padding is not None:
padding = ops.convert_to_tensor(
padding, name="padding", dtype=tensor.dtype)
padding.shape.assert_is_compatible_with(tensor.shape[2:])
# Find places where the padding is equal to the tensor. (This will
# broadcast `padding` across the outermost 2 dimensions of `tensor`,
# so `has_default_value.shape = tensor.shape`.)
has_default_value = math_ops.equal(padding, tensor)
# If the padding isn't a scalar, then require that all values in the
# padding match each item in the tensor. After this block of code,
# `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just
# use reduce_all for both cases, becaue when you pass an empty `axis`
# list to reduce_all, it reduces all axes; but we want it to reduce no
# axes -- i.e., to be a no-op.)
tensor_rank = array_ops.rank(tensor)
reduce_axis = math_ops.range(2, tensor_rank)
has_default = control_flow_ops.cond(
tensor_rank > 2,
lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),
lambda: has_default_value)
has_default.set_shape(tensor_shape.TensorShape([None, None]))
has_default.set_shape(tensor.shape[:2])
# Use has_default to find the length of each row: for each
# non-default item in a row, calculate the length that the row needs to
# have to include that item; and then take the max of those values
# (across each row).
has_nondefault = math_ops.logical_not(has_default)
has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype)
length_for_nondefault_value = (
has_nondefault *
array_ops.expand_dims(math_ops.range(1, ncols + 1), 0))
lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)
if lengths is not None:
# If we have lengths (either directly supplied, or computed from
# paddings), then use those to construct splits; and then use masking
# to get the corresponding values.
lengths = ragged_util.convert_to_int_tensor(lengths, "lengths",
row_splits_dtype)
lengths.shape.assert_has_rank(1)
lengths = math_ops.minimum(lengths, ncols)
lengths = math_ops.maximum(lengths, 0)
limits = math_ops.cumsum(lengths)
splits = array_ops.concat(
[array_ops.zeros([1], row_splits_dtype), limits], axis=0)
mask = array_ops.sequence_mask(lengths, maxlen=ncols)
values = array_ops.boolean_mask(tensor, mask)
return cls.from_row_splits(values, splits, validate=False)
# If neither padding nor lengths were specified, then create a splits
# vector that contains no default values, and reshape the input tensor
# to form the values for the RaggedTensor.
values_shape = array_ops.concat([[input_shape[0] * input_shape[1]],
input_shape[2:]], axis=0)
values = array_ops.reshape(tensor, values_shape)
const_nrows = tensor_shape.dimension_at_index(tensor.shape, 0).value
const_ncols = tensor_shape.dimension_at_index(tensor.shape, 1).value
if const_nrows is not None:
nrows = constant_op.constant(const_nrows, row_splits_dtype)
else:
nrows = input_shape[0]
if const_ncols is not None:
ncols = constant_op.constant(const_ncols, row_splits_dtype)
else:
ncols = input_shape[1]
return RaggedTensor.from_uniform_row_length(
values=values, uniform_row_length=ncols, nrows=nrows, validate=False)
def to_tensor(self, default_value=None, name=None, shape=None):
"""Converts this `RaggedTensor` into a `tf.Tensor`.
If `shape` is specified, then the result is padded and/or truncated to
the specified shape.
Examples:
>>> rt = tf.ragged.constant([[9, 8, 7], [], [6, 5], [4]])
>>> print(rt.to_tensor())
tf.Tensor(
[[9 8 7] [0 0 0] [6 5 0] [4 0 0]], shape=(4, 3), dtype=int32)
>>> print(rt.to_tensor(shape=[5, 2]))
tf.Tensor(
[[9 8] [0 0] [6 5] [4 0] [0 0]], shape=(5, 2), dtype=int32)
Args:
default_value: Value to set for indices not specified in `self`. Defaults
to zero. `default_value` must be broadcastable to
`self.shape[self.ragged_rank + 1:]`.
name: A name prefix for the returned tensors (optional).
shape: The shape of the resulting dense tensor. In particular,
`result.shape[i]` is `shape[i]` (if `shape[i]` is not None), or
`self.bounding_shape(i)` (otherwise).`shape.rank` must be `None` or
equal to `self.rank`.
Returns:
A `Tensor` with shape `ragged.bounding_shape(self)` and the
values specified by the non-empty values in `self`. Empty values are
assigned `default_value`.
"""
with ops.name_scope(name, "RaggedToTensor", [self, default_value, shape]):
if default_value is not None:
default_value = ops.convert_to_tensor(
default_value, name="default_value", dtype=self.dtype)
type_tensor_pairs = _get_row_partition_type_tensor_pairs(self)
row_partition_types = [x[0] for x in type_tensor_pairs]
row_partition_tensors = [x[1] for x in type_tensor_pairs]
if default_value is None:
default_value = array_ops.zeros((), self.dtype)
if (isinstance(shape, (list, tuple)) and
any(isinstance(v, ops.Tensor) for v in shape) and
all(isinstance(v, (int, ops.Tensor)) for v in shape)):
shape = array_ops.stack(shape)
shape_tensor = _shape_as_tensor(shape, row_partition_tensors[0].dtype)
tensor = gen_ragged_conversion_ops.ragged_tensor_to_tensor(
shape=shape_tensor,
values=self.flat_values,
default_value=default_value,
row_partition_types=row_partition_types,
row_partition_tensors=row_partition_tensors)
ragged_shape = self.shape
if ragged_shape.rank is not None and not isinstance(shape, ops.Tensor):
# Merged self.shape and shape, favoring the second one as it takes
# into account potential padding added to the output.
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
output_shape = ragged_shape
else:
# At this point we can assume that hshape.rank == ragged_shape.rank
# because otherwise it would have failed earlier.
output_shape = [s1 if s1 is not None else s2 for (s1, s2)
in zip(shape.as_list(), ragged_shape.as_list())]
tensor.set_shape(output_shape)
return tensor
@classmethod
@dispatch.add_dispatch_support
def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64):
"""Converts a 2D `tf.sparse.SparseTensor` to a `RaggedTensor`.
Each row of the `output` `RaggedTensor` will contain the explicit values
from the same row in `st_input`. `st_input` must be ragged-right. If not
it is not ragged-right, then an error will be generated.
Example:
>>> indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]]
>>> st = tf.sparse.SparseTensor(indices=indices,
... values=[1, 2, 3, 4, 5],
... dense_shape=[4, 3])
>>> tf.RaggedTensor.from_sparse(st).to_list()
[[1, 2, 3], [4], [], [5]]
Currently, only two-dimensional `SparseTensors` are supported.
Args:
st_input: The sparse tensor to convert. Must have rank 2.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the same values as `st_input`.
`output.ragged_rank = rank(st_input) - 1`.
`output.shape = [st_input.dense_shape[0], None]`.
Raises:
ValueError: If the number of dimensions in `st_input` is not known
statically, or is not two.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if not sparse_tensor.is_sparse(st_input):
raise TypeError("Expected SparseTensor, got %s" % type(st_input).__name__)
with ops.name_scope(name, "RaggedFromSparse", [st_input]):
st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
st_input, name="st_input")
if st_input.dense_shape.shape.ndims is None:
static_rank_from_dense_shape = None
else:
static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value
if st_input.indices.shape.ndims is None:
static_rank_from_indices = None
else:
static_rank_from_indices = st_input.indices.shape.dims[1].value
if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
raise ValueError("rank(st_input) must be 2")
with ops.control_dependencies(
_assert_sparse_indices_are_ragged_right(st_input.indices)):
# Treat sparse row indices as segment ids to generate a splits tensor
# thta we can pair with the sparse tensor values. (Ignore sparse column
# indices.)
segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype)
num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype)
return cls.from_value_rowids(
st_input.values, segment_ids, num_segments, validate=False)
def to_sparse(self, name=None):
"""Converts this `RaggedTensor` into a `tf.sparse.SparseTensor`.
Example:
>>> rt = tf.ragged.constant([[1, 2, 3], [4], [], [5, 6]])
>>> print(rt.to_sparse())
SparseTensor(indices=tf.Tensor(
[[0 0] [0 1] [0 2] [1 0] [3 0] [3 1]],
shape=(6, 2), dtype=int64),
values=tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32),
dense_shape=tf.Tensor([4 3], shape=(2,), dtype=int64))
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A SparseTensor with the same values as `self`.
"""
with ops.name_scope(name, "RaggedToSparse", [self]):
result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(
self.nested_row_splits, self.flat_values, name=name)
return sparse_tensor.SparseTensor(result.sparse_indices,
result.sparse_values,
result.sparse_dense_shape)
@classmethod
def _from_variant(cls,
variant,
dtype,
output_ragged_rank,
input_ragged_rank=None,
row_splits_dtype=dtypes.int64,
name=None):
"""Converts a `variant` Tensor into a `RaggedTensor`.
The input `variant` could be a scalar, meaning it encodes a single
`RaggedTensor` with ragged_rank `output_ragged_rank`. Alternatively it could
have an arbitrary rank, in which case each element is decoded into a
`RaggedTensor` with ragged_rank `input_ragged_rank` and these are then
stacked according to the input shape to output a single `RaggedTensor`
with ragged_rank `output_ragged_rank`. If `input_ragged_rank` is not
provided, it is inferred dynamically as `output_ragged_rank` -
`rank(variant)`. If `input_ragged_rank` is provided, the following must be
true: `output_ragged_rank` = `input_ragged_rank` + `rank(variant)`.
Example:
>>> rt = tf.ragged.constant([[0], [1, 2]])
>>> et = rt._to_variant()
>>> stacked_et = tf.stack([et, et])
>>> tf.RaggedTensor._from_variant( # scalar input.
... et, dtype=tf.int32, output_ragged_rank=1).to_list()
[[0], [1, 2]]
>>> tf.RaggedTensor._from_variant( # batched input.
... stacked_et, dtype=tf.int32, output_ragged_rank=2).to_list()
[[[0], [1, 2]], [[0], [1, 2]]]
Args:
variant: A `variant` Tensor representing an encoded (possibly
nested-batched) `RaggedTensor`.
dtype: The dtype of the encoded `RaggedTensor`.
output_ragged_rank: The expected ragged rank of the output `RaggedTensor`.
input_ragged_rank: The ragged rank of each encoded `RaggedTensor`. This is
optional and inferred dynamically if not provided.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
name: A name prefix for the returned tensors (optional).
Returns:
A `RaggedTensor` of dtype `dtype` and ragged rank `output_ragged_rank`.
Raises:
ValueError: If the input rank is known, `input_ragged_rank` is provided
and `output_ragged_rank` = `input_ragged_rank` + `rank(variant)` does
not hold.
"""
variant = ops.convert_to_tensor(
variant, name="variant", dtype=dtypes.variant)
if (variant.shape.ndims is not None and input_ragged_rank is not None and
output_ragged_rank != input_ragged_rank + variant.shape.ndims):
raise ValueError(
"output_ragged_rank must be equal to input_ragged_rank +"
"variant.shape.ndims, found variant.shape.ndims: %d, "
"input_ragged_rank: %d, output_ragged_rank: %d" %
(variant.shape.ndims, input_ragged_rank, output_ragged_rank))
input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank
with ops.name_scope(
name, "RaggedFromVariant",
[variant, dtype, input_ragged_rank, output_ragged_rank]):
result = gen_ragged_conversion_ops.ragged_tensor_from_variant(
variant, input_ragged_rank, output_ragged_rank, dtype,
row_splits_dtype, name)
return cls.from_nested_row_splits(
result.output_dense_values,
result.output_nested_splits,
validate=False)
def _to_variant(self, batched_input=False, name=None):
"""Converts this `RaggedTensor` into a `variant` Tensor.
If `batched_input` is `True`, then the `RaggedTensor` is unbatched along the
zero-th dimension, each component `RaggedTensor` is encoded into a scalar
`variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.
If `batched_input` is `False`, then the `RaggedTensor` is encoded as is and
a scalar `variant` Tensor is returned.
Example:
>>> rt = tf.ragged.constant([[[0]], [[1]], [[2]]])
>>> rt._to_variant().shape.as_list()
[]
>>> rt._to_variant(batched_input=True).shape.as_list()
[3]
Args:
batched_input: If `True`, the `RaggedTensor` is unbatched and converted to
a `variant` vector. Set to `False` by default.
name: A name prefix for the returned tensors (optional).
Returns:
A `variant` Tensor that encodes this `RaggedTensor`.
"""
with ops.name_scope(name, "RaggedToVariant", [self, batched_input]):
return gen_ragged_conversion_ops.ragged_tensor_to_variant(
self.nested_row_splits, self.flat_values, batched_input, name)
#=============================================================================
# String Encoding
#=============================================================================
def __repr__(self):
if self._is_eager():
return "<tf.RaggedTensor %s>" % self.to_list()
else:
return "tf.RaggedTensor(values=%s, row_splits=%s)" % (
self.values, self.row_splits)
#=============================================================================
# Eager Execution Mode
#=============================================================================
def numpy(self):
"""Returns a numpy `array` with the values for this `RaggedTensor`.
Requires that this `RaggedTensor` was constructed in eager execution mode.
Ragged dimensions are encoded using numpy `arrays` with `dtype=object` and
`rank=1`, where each element is a single row.
#### Examples
In the following example, the value returned by `RaggedTensor.numpy()`
contains three numpy `array` objects: one for each row (with `rank=1` and
`dtype=int64`), and one to combine them (with `rank=1` and `dtype=object`):
>>> tf.ragged.constant([[1, 2, 3], [4, 5]], dtype=tf.int64).numpy()
array([array([1, 2, 3]), array([4, 5])], dtype=object)
Uniform dimensions are encoded using multidimensional numpy `array`s. In
the following example, the value returned by `RaggedTensor.numpy()` contains
a single numpy `array` object, with `rank=2` and `dtype=int64`:
>>> tf.ragged.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.int64).numpy()
array([[1, 2, 3], [4, 5, 6]])
Returns:
A numpy `array`.
"""
if not self._is_eager():
raise ValueError("RaggedTensor.numpy() is only supported in eager mode.")
values = self.values.numpy()
splits = self.row_splits.numpy()
rows = [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)]
if not rows:
return np.zeros((0, 0) + values.shape[1:], dtype=values.dtype)
# Note: if `rows` have ragged lengths, then they will be stored in a
# np.ndarray with dtype=object and rank=1. If they have uniform lengths,
# they will be combined into a single np.ndarray with dtype=row.dtype and
# rank=row.rank+1.
return np.array(rows)
def to_list(self):
"""Returns a nested Python `list` with the values for this `RaggedTensor`.
Requires that `rt` was constructed in eager execution mode.
Returns:
A nested Python `list`.
"""
if self._is_eager():
return self._eager_value().to_list()
else:
raise ValueError("RaggedTensor.to_list() is only supported in eager "
"mode; in graph mode, evaluate the RaggedTensor first "
"and then use RaggedTensorValue.to_list().")
def _eager_value(self):
"""Returns a RaggedTensorValue for self. Requires self._is_eager()=true."""
value = self.flat_values.numpy()
for row_splits in reversed(self.nested_row_splits):
value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy())
return value
def _is_eager(self):
"""Returns True if values & row_splits Tensors are all `EagerTensor`s."""
rt = self
while isinstance(rt, RaggedTensor):
if not isinstance(rt.row_splits, ops.EagerTensor):
return False
rt = rt.values
return isinstance(rt, ops.EagerTensor)
#=============================================================================
# Operators
#=============================================================================
# To avoid circular dependencies, we define stub methods for operators here,
# and then override them when the ragged_operators module is imported.
def _overloaded_operator(name): # pylint: disable=no-self-argument
def stub(*args, **kwargs):
del args, kwargs
raise ValueError(
"You must import 'tensorflow.python.ops.ragged.ragged_ops' "
"before using RaggedTensor.%s" % name)
return stub
__getitem__ = _overloaded_operator("__getitem__")
__ge__ = _overloaded_operator("__ge__")
__gt__ = _overloaded_operator("__gt__")
__le__ = _overloaded_operator("__le__")
__lt__ = _overloaded_operator("__lt__")
__and__ = _overloaded_operator("__and__")
__rand__ = _overloaded_operator("__rand__")
__invert__ = _overloaded_operator("__invert__")
__ror__ = _overloaded_operator("__ror__")
__or__ = _overloaded_operator("__or__")
__xor__ = _overloaded_operator("__xor__")
__rxor__ = _overloaded_operator("__rxor__")
__abs__ = _overloaded_operator("__abs__")
__add__ = _overloaded_operator("__add__")
__radd__ = _overloaded_operator("__radd__")
__div__ = _overloaded_operator("__div__")
__rdiv__ = _overloaded_operator("__rdiv__")
__floordiv__ = _overloaded_operator("__floordiv__")
__rfloordiv__ = _overloaded_operator("__rfloordiv__")
__mod__ = _overloaded_operator("__mod__")
__rmod__ = _overloaded_operator("__rmod__")
__mul__ = _overloaded_operator("__mul__")
__rmul__ = _overloaded_operator("__rmul__")
__neg__ = _overloaded_operator("__neg__")
__pow__ = _overloaded_operator("__pow__")
__rpow__ = _overloaded_operator("__rpow__")
__sub__ = _overloaded_operator("__sub__")
__rsub__ = _overloaded_operator("__rsub__")
__truediv__ = _overloaded_operator("__truediv__")
__rtruediv__ = _overloaded_operator("__rtruediv__")
del _overloaded_operator
#=============================================================================
# Name Scope
#=============================================================================
# This private function is used by ops.name_scope to ensure that all of the
# input tensors for the scope belong to the same graph. Defining this means
# that you may include `RaggedTensor` objects in the name_scope `values`
# list.
def _as_graph_element(self):
"""Convert `self` to a graph element."""
values = self.values
while isinstance(values, RaggedTensor):
values = values.values
return values
#=============================================================================
# Composite Tensor
#=============================================================================
@property
def _type_spec(self):
return RaggedTensorSpec.from_value(self)
def _shape_invariant_to_type_spec(self, shape):
return RaggedTensorSpec(shape, self.dtype, self.ragged_rank,
self.row_splits.dtype)
def consumers(self):
return self._consumers()
def is_ragged(value):
"""Returns true if `value` is a ragged tensor or ragged tensor value."""
return isinstance(value,
(RaggedTensor, ragged_tensor_value.RaggedTensorValue))
def match_row_splits_dtypes(*tensors, **kwargs):
"""Return a copy of `tensors` with row_splits all having the same dtype.
Args:
*tensors: A list of Tensors or RaggedTensors.
**kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors),
where `dtype` is the data type used by row-splits, and `tensors` is the
converted list of `Tensors` and `RaggedTensors`.
Returns:
The converted list of `Tensors` and `RaggedTensors`.
"""
return_dtype = kwargs.pop("return_dtype", False)
if kwargs:
raise ValueError("Unexpected keyword args %r" % kwargs)
has_int32 = False
has_int64 = False
for tensor in tensors:
if isinstance(tensor, RaggedTensor):
if tensor.row_splits.dtype == dtypes.int32:
has_int32 = True
else:
has_int64 = True
if has_int32 and has_int64:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; "
"use RaggedTensor.with_row_splits_dtype() to convert "
"them to compatible dtypes.")
dtype = dtypes.int64
tensors = tuple(
t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor
) else t
for t in tensors)
elif has_int32:
dtype = dtypes.int32
else:
dtype = dtypes.int64
if return_dtype:
return (dtype, tensors)
else:
return tensors
#===============================================================================
# RaggedTensorSpec
#===============================================================================
@tf_export("RaggedTensorSpec")
class RaggedTensorSpec(type_spec.BatchableTypeSpec):
"""Type specification for a `tf.RaggedTensor`."""
__slots__ = [
"_shape", "_dtype", "_ragged_rank", "_row_splits_dtype",
"_flat_values_spec"
]
@property
def dtype(self):
"""The `tf.dtypes.DType` specified by this type for the RaggedTensor.
Examples:
>>> rt = tf.ragged.constant([["a"], ["b", "c"]], dtype=tf.string)
>>> tf.type_spec_from_value(rt).dtype
tf.string
Returns:
A `tf.dtypes.DType` of the values in the RaggedTensor.
"""
return self._dtype
@property
def shape(self):
"""The statically known shape of the RaggedTensor.
Examples:
>>> rt = tf.ragged.constant([[0], [1, 2]])
>>> tf.type_spec_from_value(rt).shape
TensorShape([2, None])
>>> rt = tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1)
>>> tf.type_spec_from_value(rt).shape
TensorShape([2, None, 2])
Returns:
A `tf.TensorShape` containing the statically known shape of the
RaggedTensor. Ragged dimensions have a size of `None`.
"""
return self._shape
@property
def ragged_rank(self):
"""The number of times the RaggedTensor's flat_values is partitioned.
Defaults to `shape.ndims - 1`.
Examples:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> tf.type_spec_from_value(values).ragged_rank
1
>>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> tf.type_spec_from_value(rt1).ragged_rank
2
Returns:
A Python `int` indicating the number of times the underlying `flat_values`
Tensor has been partitioned to add a new dimension.
I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.
"""
return self._ragged_rank
@property
def row_splits_dtype(self):
"""The `tf.dtypes.DType` of the RaggedTensor's `row_splits`.
Examples:
>>> rt = tf.ragged.constant([[1, 2, 3], [4]], row_splits_dtype=tf.int64)
>>> tf.type_spec_from_value(rt).row_splits_dtype
tf.int64
Returns:
A `tf.dtypes.DType` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
"""
return self._row_splits_dtype
@property
def flat_values_spec(self):
"""The `TypeSpec` of the flat_values of RaggedTensor.
Returns:
- The TypeSpec of flat_values.
- None when the flat_values is a Tensor.
"""
return self._flat_values_spec
@property
def value_type(self):
return RaggedTensor if self._ragged_rank > 0 else ops.Tensor
def __init__(self,
shape=None,
dtype=dtypes.float32,
ragged_rank=None,
row_splits_dtype=dtypes.int64,
flat_values_spec=None):
"""Constructs a type specification for a `tf.RaggedTensor`.
Args:
shape: The shape of the RaggedTensor, or `None` to allow any shape. If a
shape is specified, then all ragged dimensions must have size `None`.
dtype: `tf.DType` of values in the RaggedTensor.
ragged_rank: Python integer, the number of times the RaggedTensor's
flat_values is partitioned. Defaults to `shape.ndims - 1`.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
flat_values_spec: TypeSpec for flat_value of the RaggedTensor. It shall be
provided when the flat_values is a CompositeTensor rather then Tensor.
If both `dtype` and `flat_values_spec` and are provided, `dtype` must
be the same as `flat_values_spec.dtype`. (experimental)
"""
self._shape = tensor_shape.as_shape(shape)
self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if flat_values_spec is not None:
if dtype is None:
dtype = flat_values_spec.dtype
elif dtype != flat_values_spec.dtype:
raise ValueError("dtype must be the same as flat_values_spec.dtype")
elif dtype is None:
raise ValueError(
"At least one of dtype or flat_values_spec must be provided")
self._dtype = dtypes.as_dtype(dtype)
self._flat_values_spec = flat_values_spec
rank = self._shape.ndims
if ragged_rank is None:
if rank is None:
raise ValueError("Must specify ragged_rank or "
"a shape with a known rank.")
ragged_rank = rank - 1
self._ragged_rank = ragged_rank
if not isinstance(self._ragged_rank, int):
raise TypeError("ragged_rank must be an int")
if rank is not None:
if ragged_rank >= rank:
raise ValueError("ragged_rank must be less than rank.")
def is_compatible_with(self, spec_or_value):
# RaggedTensor with ragged_rank 0 can be compatible with raw flat_values.
if self._ragged_rank == 0:
if self._flat_values_spec is None:
if isinstance(spec_or_value, (ops.Tensor, tensor_spec.TensorSpec)):
return tensor_spec.TensorSpec(
self._shape, self._dtype).is_compatible_with(spec_or_value)
elif not isinstance(spec_or_value, (RaggedTensor, RaggedTensorSpec)):
return self._flat_values_spec.is_compatible_with(spec_or_value)
return super(RaggedTensorSpec, self).is_compatible_with(spec_or_value)
def _serialize(self):
if self._flat_values_spec is None:
return (self._shape, self._dtype, self._ragged_rank,
self._row_splits_dtype)
else:
return (self._shape, self._dtype, self._ragged_rank,
self._row_splits_dtype, self._flat_values_spec)
@property
def _component_specs(self):
if self._ragged_rank == 0:
if self._flat_values_spec is not None:
return [self._flat_values_spec]
else:
return [tensor_spec.TensorSpec(self._shape, self._dtype)]
flat_values_spec = self._flat_values_spec
if flat_values_spec is None:
flat_values_shape = tensor_shape.TensorShape([None]).concatenate(
self._shape[self._ragged_rank + 1:])
flat_values_spec = tensor_spec.TensorSpec(flat_values_shape, self._dtype)
outer_dim = tensor_shape.dimension_at_index(self._shape, 0)
outer_splits_shape = [None if outer_dim is None else outer_dim + 1]
inner_splits_spec = tensor_spec.TensorSpec([None], self._row_splits_dtype)
specs = ([
flat_values_spec,
tensor_spec.TensorSpec(outer_splits_shape, self._row_splits_dtype)
] + [inner_splits_spec for _ in range(self._ragged_rank - 1)])
return specs
def _to_components(self, value):
if is_ragged(value):
return [value.flat_values] + list(value.nested_row_splits)
else:
return [value]
def _from_components(self, tensor_list):
result = tensor_list[0]
if (all(isinstance(t, np.ndarray) for t in tensor_list) and
not tf2.enabled()):
for row_splits in reversed(tensor_list[1:]):
result = ragged_tensor_value.RaggedTensorValue(result, row_splits)
else:
if isinstance(tensor_list[0], np.ndarray):
tensor_list = [ops.convert_to_tensor(t) for t in tensor_list]
result = tensor_list[0]
for row_splits in reversed(tensor_list[1:]):
result = RaggedTensor(
result,
RowPartition.from_row_splits(row_splits, validate=False),
internal=True)
return result
# The RaggedTensorSpec tensor_list encoding uses to/from_variant ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mishragaurav): The default flat shape of a boxed `RaggedTensor` is
# `[]` (scalar), but a `RaggedTensorSpec` can also represent a batch of
# boxed `RaggedTensor` objects with shape `(...)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_spec.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
# TODO(edloper): Update gen_ragged_conversion_ops that convert to and
# from variant to include all of the row-partitioning tensors.
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported")
ragged_rank = value.ragged_rank if isinstance(value, RaggedTensor) else 0
if ragged_rank != self._ragged_rank:
raise ValueError("Ragged rank of value (%d) does not match ragged "
"rank of type (%d)" % (ragged_rank, self._ragged_rank))
if ragged_rank == 0:
return [
gen_ragged_conversion_ops.ragged_tensor_to_variant(
(), value, batched_input=False)
]
# pylint: disable=protected-access
return [value._to_variant(batched_input=False)]
def _to_batched_tensor_list(self, value):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported")
ragged_rank = value.ragged_rank if isinstance(value, RaggedTensor) else 0
if ragged_rank != self._ragged_rank:
raise ValueError("Ragged rank of value (%d) does not match ragged "
"rank of type (%d)" % (ragged_rank, self._ragged_rank))
if ragged_rank == 0:
# TODO(b/141789000) Update this to handle ragged_rank=0.
raise ValueError(
"_to_batched_tensor_list doesn't support ragged_rank=0 yet")
# pylint: disable=protected-access
return [value._to_variant(batched_input=True)]
def _from_compatible_tensor_list(self, tensor_list):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported")
if self._ragged_rank < 0:
raise ValueError("ragged_rank must be non-negative; got %s." %
self._ragged_rank)
result = RaggedTensor._from_variant( # pylint: disable=protected-access
tensor_list[0],
dtype=self._dtype,
row_splits_dtype=self._row_splits_dtype,
output_ragged_rank=self._ragged_rank)
if self._shape.ndims is not None:
if isinstance(result, RaggedTensor):
outer_dim = tensor_shape.dimension_value(self._shape[0])
if outer_dim is not None:
result.row_splits.set_shape([outer_dim + 1])
result._set_shape(self._shape) # pylint: disable=protected-access
else:
result.set_shape(self._shape)
return result
def _batch(self, batch_size):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported")
return RaggedTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype, self._ragged_rank + 1, self._row_splits_dtype)
def _unbatch(self):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported")
# Note: Negative ragged_rank is allowed here because the dataset could be
# subsequently batched again. If ragged_rank > 1, assume row_splits_dtype is
# consistent. Errors are handled in
# RaggedTensorSpec._from_compatible_tensor_list()
return RaggedTensorSpec(self._shape[1:], self._dtype, self._ragged_rank - 1,
self._row_splits_dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return self
@classmethod
def from_value(cls, value):
if (isinstance(value, ragged_tensor_value.RaggedTensorValue) or
isinstance(value.flat_values, ops.Tensor)):
return cls(
shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype)
else:
return cls(
shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype,
flat_values_spec=type_spec.type_spec_from_value(value.flat_values))
type_spec.register_type_spec_from_value_converter(
ragged_tensor_value.RaggedTensorValue, RaggedTensorSpec.from_value)
#===============================================================================
# Convert value -> tensor
#===============================================================================
def convert_to_tensor_or_ragged_tensor(value,
dtype=None,
preferred_dtype=None,
name=None):
"""Converts value to a `RaggedTensor` or `Tensor`.
* If `value` is a `RaggedTensor`, then return it as-is.
* If `value` is a `RaggedTensorValue`, return a corresponding constant
`RaggedTensor`.
* Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`.
Args:
value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has
a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing the type
is inferred from the type of `value`.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. This argument has no effect if `value` is already a
tensor, or when conversion is not possible.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `RaggedTensor`.
"""
if isinstance(value, RaggedTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError("Tensor conversion requested dtype %s for "
"RaggedTensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []):
flat_values = ops.convert_to_tensor(
value=value.flat_values,
dtype=dtype,
preferred_dtype=preferred_dtype,
name="flat_values")
return RaggedTensor.from_nested_row_splits(
flat_values, value.nested_row_splits, validate=False)
else:
return ops.convert_to_tensor_v2_with_dispatch(
value=value, dtype=dtype, dtype_hint=preferred_dtype, name=name)
def _convert_to_ragged_tensor_values(value):
"""Converts value to supported RaggedTensor value.
* If `value` is an object of supported value type, then return it as-is.
* Otherwise convert it to Tensor or RaggedTensor.
Args:
value: An object of `Tensor`, `RaggedTensor` or registerred RaggedTensor
value types, or an object whose type has a registered `Tensor`
conversion function.
Returns:
An object of `Tensor`, `RaggedTensor` or registerred RaggedTensor
value types
"""
if _is_supported_ragged_values_type(value):
return value
else:
return convert_to_tensor_or_ragged_tensor(value, name="values")
#===============================================================================
# Register RaggedTensor for use with session.run.
#===============================================================================
def _ragged_tensor_value_from_components(components):
components = list(components)
value = components.pop()
while components:
value = ragged_tensor_value.RaggedTensorValue(value, components.pop())
return value
def _ragged_tensor_session_fetch(rt):
components = rt.nested_row_splits + (rt.flat_values,)
return (components, _ragged_tensor_value_from_components)
def _ragged_tensor_session_feed(feed_key, feed_val):
key_components = feed_key.nested_row_splits + (feed_key.flat_values,)
val_components = feed_val.nested_row_splits + (feed_val.flat_values,)
return zip(key_components, val_components)
def _ragged_tensor_session_feed_for_partial_run(feed_key):
return feed_key.nested_row_splits + (feed_key.flat_values,)
session.register_session_run_conversion_functions(
RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed,
_ragged_tensor_session_feed_for_partial_run)
#===============================================================================
# RaggedTensorType
#===============================================================================
class RaggedTensorType(object):
"""Encoding of a static type for a `RaggedTensor`.
Use this type to express/declare that an output must have the type of
`RaggedTensor`.
"""
def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):
"""Initializes a RaggedTensorType object.
Args:
dtype: data type of the `RaggedTensor`'s inner values.
ragged_rank: ragged_rank of the declared `RaggedTensor`.
row_splits_dtype: data type for the `RaggedTensor`'s row splits.
One of: `tf.int32` or `tf.int64`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
self._dtype = dtype
self._ragged_rank = ragged_rank
self._row_splits_dtype = row_splits_dtype
dtype = property(lambda self: self._dtype)
ragged_rank = property(lambda self: self._ragged_rank)
row_splits_dtype = property(lambda self: self._row_splits_dtype)
def __repr__(self):
return "RaggedTensorType(%r, %r, %r)" % (
self.dtype, self.ragged_rank, self.row_splits_dtype)
#===============================================================================
# Helper Functions
#===============================================================================
def _assert_sparse_indices_are_ragged_right(indices):
"""Checks that the given SparseTensor.indices tensor is ragged-right.
Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right
because the entry `[3, 1]` skips a cell.
Args:
indices: The SparseTensor indices to check.
Returns:
A list of control dependency op tensors.
"""
index_prefix = indices[:, :-1]
index_suffix = indices[:, -1]
# Check whether each index is starting a new row in the innermost dimension
# (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
# (Note: this skips the first index; we will check that separately below.)
index_prefix_changed = math_ops.reduce_any(
math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)
# Check two cases:
# * For indices that start a new row: index_suffix[i] must be zero.
# * For indices that continue a row: index_suffix[i] must be equal to
# index_suffix[i-1]+1.
index_ok = array_ops.where(
index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))
# Also check that the very first index didn't skip any cells. The first
# index starts a new row (by definition), so its suffix should be zero.
sparse_indices_are_ragged_right = math_ops.logical_and(
math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
math_ops.reduce_all(index_ok))
message = [
"SparseTensor is not right-ragged", "SparseTensor.indices =", indices
]
return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
@ops.RegisterGradient("RaggedTensorToSparse")
def _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad,
sparse_values_grad,
unused_sparse_shape_grad):
"""Gradient for RaggedTensorToSparse."""
op_inputs_nested_row_splits = op.inputs[:-1]
op_inputs_flat_values = op.inputs[-1]
# No gradient for the RaggedTensor's nested_row_splits.
nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits)
# Gradient for the RaggedTensor's flat_values is formed by reshaping
# the gradient for the SparseTensor's values.
flat_values_shape = array_ops.shape(op_inputs_flat_values)
flat_values_gradient = array_ops.reshape(sparse_values_grad,
flat_values_shape)
return nested_row_splits_gradient + [flat_values_gradient]
def _assert_monotonic_increasing(tensor, message=None):
return check_ops.assert_non_negative(
tensor[1:] - tensor[:-1], message=message)
def _assert_zero(tensor, message=None):
return check_ops.assert_equal(
tensor, constant_op.constant(0, dtype=tensor.dtype), message=message)
def _nrows(tensor, out_type=dtypes.int32):
if isinstance(tensor, RaggedTensor):
return tensor.nrows(out_type=out_type)
else:
return array_ops.shape(tensor, out_type=out_type)[0]
def merge_dims(value, outer_axis, inner_axis):
"""Merges value[outer_axis...inner_axis] into a single dimension.
See `RaggedTensor.merge_dims()` for more details. This helper differs from
`RaggedTensor.merge_dims()` in that `value` may be a dense or ragged tensor.
Args:
value: A `RaggedTensor` or `Tensor`
outer_axis: `int`
inner_axis: `int`
Returns:
A flattened `RaggedTensor` or `Tensor`.
"""
if outer_axis == inner_axis:
return value
# Flatten outer dimensions of a RaggedTensor by just taking its values.
while outer_axis == 0 and isinstance(value, RaggedTensor):
value = value.values
inner_axis -= 1
if inner_axis == 0:
return value
# Flatten non-Ragged tensors using tf.reshape().
if not isinstance(value, RaggedTensor):
if value.shape.is_fully_defined():
old_shape = value.shape.as_list()
new_shape = old_shape[:outer_axis] + [-1] + old_shape[inner_axis + 1:]
else:
old_shape = array_ops.shape(value)
new_shape = array_ops.concat(
[old_shape[:outer_axis], [-1], old_shape[inner_axis + 1:]], axis=0)
return array_ops.reshape(value, new_shape)
# Handle outer_axis>1 via recursion.
if outer_axis > 1:
return value.with_values(
merge_dims(value.values, outer_axis - 1, inner_axis - 1))
# At this point, we know outer_axis == 1, and value is a RaggedTensor.
# So we need to flatten the values and build a corresponding splits tensor.
new_values = value.values
new_splits = value.row_splits
for axis in range(outer_axis, inner_axis):
if isinstance(new_values, RaggedTensor):
# Flatten a single ragged dimension.
new_splits = array_ops.gather(new_values.row_splits, new_splits)
new_values = new_values.values
else:
# Flatten all remaining dense dimensions.
shape_split = inner_axis - axis + 1
if new_values.shape.is_fully_defined():
old_shape = new_values.shape.as_list()
new_shape = [-1] + old_shape[shape_split:]
flat_size = _prod(old_shape[1:shape_split])
else:
old_shape = array_ops.shape(new_values)
new_shape = array_ops.concat([[-1], old_shape[shape_split:]], axis=0)
flat_size = math_ops.cast(
math_ops.reduce_prod(old_shape[1:shape_split]), new_splits.dtype)
new_values = array_ops.reshape(new_values, new_shape)
new_splits = new_splits * flat_size
break
return RaggedTensor.from_row_splits(new_values, new_splits)
def _prod(lst):
"""Returns the product of the numbers in a list."""
return functools.reduce(operator.mul, lst, 1)
def _get_row_partition_type_tensor_pairs_tail(partition):
"""Gets a row partition type tensor pair for the tail.
If value_rowid is defined, then it is used. Otherwise, row_splits
are used.
Args:
partition: a RowPartition.
Returns:
A list of (row_partition_type, row_partition_tensor) pairs.
"""
if partition.has_precomputed_value_rowids():
return ("VALUE_ROWIDS", partition.value_rowids())
else:
return ("ROW_SPLITS", partition.row_splits())
def _get_row_partition_type_tensor_pairs(rt_input):
"""Gets a list of the row partitions for rt_input.
If value_rowids are defined, then they are used. Otherwise, row_splits
are used. If the outermost level has value_rowids defind, then nrows is
also added.
Args:
rt_input: a ragged tensor.
Returns:
A list of (row_partition_type, row_partition_tensor) pairs.
"""
partitions = rt_input._nested_row_partitions # pylint: disable=protected-access
tail = [_get_row_partition_type_tensor_pairs_tail(x) for x in partitions[1:]]
if partitions[0]._value_rowids is not None: # pylint: disable=protected-access
return [("FIRST_DIM_SIZE", partitions[0].nrows()),
("VALUE_ROWIDS", partitions[0].value_rowids())] + tail
else:
return [("ROW_SPLITS", partitions[0].row_splits())] + tail
def _shape_as_tensor(shape, dtype):
"""Takes shape and coerces it to a shape as a tensor.
If the object is already a tensor, simply passes it on (result is guaranteed
to be int64 or int32, but not necessarily dtype).
If not, creates a tensor of type dtype.
Result is either a scalar equal to -1 if the shape is unknown_rank.
Otherwise, it is a vector, where unknown dimensions are represented with a
value of -1.
In C++, see TensorShapeFromTensor for parsing shapes in kernels, and
InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape, for
use in the shape inference function.
Args:
shape: input to coerce from TensorShape, Tensor, None, List[Optional[Int]],
Tuple[Optional[Int]].
dtype: tf.int64 or tf.int32
Returns:
a scalar or vector tensor of dtype tf.int32 or tf.int64.
"""
if dtype != dtypes.int64 and dtype != dtypes.int32:
raise ValueError("Expected int64 or int32 for dtype: got {}".format(dtype))
if isinstance(shape, ops.Tensor):
if shape.dtype != dtypes.int64 and shape.dtype != dtypes.int32:
return math_ops.cast(shape, dtype)
return shape
shape = tensor_shape.as_shape(shape)
if not shape:
# Imply rank is unknown using a -1 scalar.
return constant_op.constant(-1, dtype=dtype)
shape = [(-1 if x is None else x) for x in shape.as_list()]
# At this point, shape is List[Int].
return constant_op.constant(shape, dtype=dtype)
def _nvals_uniform_row_length(values, uniform_row_length):
"""Get the number of values for uniform row length constructor."""
const_nvals = tensor_shape.dimension_at_index(values.shape, 0).value
if const_nvals is not None:
nvals = constant_op.constant(const_nvals, uniform_row_length.dtype)
elif isinstance(values, RaggedTensor):
nvals = values.nrows(out_type=uniform_row_length.dtype)
else:
nvals = array_ops.shape(values, out_type=uniform_row_length.dtype)[0]
return nvals
def _get_optional_partition_dtype(values):
"""Returns the partition dtype, or None if None exists."""
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
return values._row_partition.dtype
return None
_SUPPORTED_RAGGED_VALUE_TYPES = (ops.Tensor, RaggedTensor)
# TODO(edloper): Consider whether we should change the registry to be on
# TypeSpecs rather than ValueTypes.
def _add_supported_value_type(cls):
"""Register the `cls` as supported value type of RaggedTenosr.
The cls must be a subclass of CompositeTensor, and must support:
- Properties:
- x.shape
- x.dtype
- Methods:
- x.__getitem__(idx) (method: returns a supported value type)
- Ops:
- tf.shape(x) -- tf.shape(x)[0] must be a tf.Tensor.
- tf.tile(x)
- assert_rank_at_least(x)
- tf.ones_like(x)
- tf.gather(params=x, indices=Tensor)
- tf.add(x, y)
- tf.boolean_mask(x, ...)
- @TODO(edloper): Complete this list
Note: the following RaggedTensor, RaggedTensorSpec methods & ops are not
currently supported unless `rt.values` is a RaggedTensor or a tf.Tensor:
- rt.to_tensor()
- rt.to_sparse_tensor()
- rt._to_variant()
- rt._from_variant()
- tf.ragged.cross([rt])
- tf.gather(params=x, indices=rt) # rt used for indices
- RaggedTensorSpec methods:
- _batch
- _unbatch
- _to_tensor_list
- _to_batched_tensor_list
- _from_compatible_tensor_list
Args:
cls: The type to be added to supported value types.
"""
if not issubclass(cls, composite_tensor.CompositeTensor):
raise ValueError("cls(%s) must be a subclass of CompositeTensor" % cls)
if not hasattr(cls, "shape"):
raise ValueError("cls must support the `shape` property")
if not hasattr(cls, "dtype"):
raise ValueError("cls must support the `dtype` property")
global _SUPPORTED_RAGGED_VALUE_TYPES
_SUPPORTED_RAGGED_VALUE_TYPES += (cls,)
def _is_supported_ragged_values_type(value):
return isinstance(value, _SUPPORTED_RAGGED_VALUE_TYPES)
def _assert_is_supported_ragged_values_type(value):
if not _is_supported_ragged_values_type(value):
ok_types = ", ".join(cls.__name__ for cls in
_SUPPORTED_RAGGED_VALUE_TYPES)
raise TypeError("type(values) must be one of: %r, got %r" %
(ok_types, value))
| apache-2.0 |
niphlod/pydal | pydal/connection.py | 1 | 5577 | # -*- coding: utf-8 -*-
import os
from ._compat import itervalues
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL
from ._load import OrderedDict
from .helpers._internals import Cursor
class ConnectionPool(object):
POOLS = {}
check_active_connection = True
def __init__(self):
_iid_ = str(id(self))
self._connection_thname_ = '_pydal_connection_' + _iid_ + '_'
self._cursors_thname_ = '_pydal_cursors_' + _iid_ + '_'
@property
def _pid_(self):
return str(os.getpid())
@property
def _connection_uname_(self):
return self._connection_thname_ + self._pid_
@property
def _cursors_uname_(self):
return self._cursors_thname_ + self._pid_
@staticmethod
def set_folder(folder):
THREAD_LOCAL._pydal_folder_ = folder
@property
def connection(self):
return getattr(THREAD_LOCAL, self._connection_uname_)
@connection.setter
def connection(self, val):
setattr(THREAD_LOCAL, self._connection_uname_, val)
self._clean_cursors()
if val is not None:
self._build_cursor()
def _clean_cursors(self):
setattr(THREAD_LOCAL, self._cursors_uname_, OrderedDict())
@property
def cursors(self):
return getattr(THREAD_LOCAL, self._cursors_uname_)
def _build_cursor(self):
rv = Cursor(self.connection)
self.cursors[id(rv.cursor)] = rv
return rv
def _get_or_build_free_cursor(self):
for handler in itervalues(self.cursors):
if handler.available:
return handler
return self._build_cursor()
@property
def cursor(self):
return self._get_or_build_free_cursor().cursor
def lock_cursor(self, cursor):
self.cursors[id(cursor)].lock()
def release_cursor(self, cursor):
self.cursors[id(cursor)].release()
def close_cursor(self, cursor):
cursor.close()
del self.cursors[id(cursor)]
def close(self, action='commit', really=True):
#: if we have an action (commit, rollback), try to execute it
succeeded = True
if action:
try:
if callable(action):
action(self)
else:
getattr(self, action)()
except:
#: connection had some problems, we want to drop it
succeeded = False
#: if we have pools, we should recycle the connection (but only when
# we succeded in `action`, if any and `len(pool)` is good)
if self.pool_size and succeeded:
GLOBAL_LOCKER.acquire()
pool = ConnectionPool.POOLS[self.uri]
if len(pool) < self.pool_size:
pool.append(self.connection)
really = False
GLOBAL_LOCKER.release()
#: closing the connection when we `really` want to, in particular:
# - when we had an exception running `action`
# - when we don't have pools
# - when we have pools but they're full
if really:
try:
self.close_connection()
except:
pass
#: always unset `connection` attribute
self.connection = None
@staticmethod
def close_all_instances(action):
""" to close cleanly databases in a multithreaded environment """
dbs = getattr(THREAD_LOCAL, '_pydal_db_instances_', {}).items()
for db_uid, db_group in dbs:
for db in db_group:
if hasattr(db, '_adapter'):
db._adapter.close(action)
getattr(THREAD_LOCAL, '_pydal_db_instances_', {}).clear()
getattr(THREAD_LOCAL, '_pydal_db_instances_zombie_', {}).clear()
if callable(action):
action(None)
return
def _find_work_folder(self):
self.folder = getattr(THREAD_LOCAL, '_pydal_folder_', '')
def after_connection_hook(self):
"""Hook for the after_connection parameter"""
if callable(self._after_connection):
self._after_connection(self)
self.after_connection()
def after_connection(self):
#this it is supposed to be overloaded by adapters
pass
def reconnect(self):
"""
Defines: `self.connection` and `self.cursor`
if `self.pool_size>0` it will try pull the connection from the pool
if the connection is not active (closed by db server) it will loop
if not `self.pool_size` or no active connections in pool makes a new one
"""
if getattr(THREAD_LOCAL, self._connection_uname_, None) is not None:
return
if not self.pool_size:
self.connection = self.connector()
self.after_connection_hook()
else:
uri = self.uri
POOLS = ConnectionPool.POOLS
while True:
GLOBAL_LOCKER.acquire()
if uri not in POOLS:
POOLS[uri] = []
if POOLS[uri]:
self.connection = POOLS[uri].pop()
GLOBAL_LOCKER.release()
try:
if self.check_active_connection:
self.test_connection()
break
except:
pass
else:
GLOBAL_LOCKER.release()
self.connection = self.connector()
self.after_connection_hook()
break
| bsd-3-clause |
pmav99/praktoras | checks.d/windows_service.py | 9 | 3049 | """ Collect status information for Windows services
"""
# project
from checks import AgentCheck
from checks.wmi_check import WinWMICheck
from utils.containers import hash_mutable
from utils.timeout import TimeoutException
class WindowsService(WinWMICheck):
STATE_TO_VALUE = {
'Stopped': AgentCheck.CRITICAL,
'Start Pending': AgentCheck.WARNING,
'Stop Pending': AgentCheck.WARNING,
'Running': AgentCheck.OK,
'Continue Pending': AgentCheck.WARNING,
'Pause Pending': AgentCheck.WARNING,
'Paused': AgentCheck.WARNING,
'Unknown': AgentCheck.UNKNOWN
}
NAMESPACE = "root\\CIMV2"
CLASS = "Win32_Service"
def __init__(self, name, init_config, agentConfig, instances):
WinWMICheck.__init__(self, name, init_config, agentConfig, instances)
def check(self, instance):
# Connect to the WMI provider
host = instance.get('host', "localhost")
user = instance.get('username', "")
password = instance.get('password', "")
services = instance.get('services', [])
instance_hash = hash_mutable(instance)
instance_key = self._get_instance_key(host, self.NAMESPACE, self.CLASS, instance_hash)
tags = [] if (host == "localhost" or host == ".") else [u'host:{0}'.format(host)]
if len(services) == 0:
raise Exception('No services defined in windows_service.yaml')
properties = ["Name", "State"]
filters = map(lambda x: {"Name": tuple(('=', x))}, services)
wmi_sampler = self._get_wmi_sampler(
instance_key,
self.CLASS, properties,
filters=filters,
host=host, namespace=self.NAMESPACE,
username=user, password=password
)
try:
# Sample, extract & submit metrics
wmi_sampler.sample()
except TimeoutException:
self.log.warning(
u"[WinService] WMI query timed out."
u" class={wmi_class} - properties={wmi_properties} -"
u" filters={filters} - tags={tags}".format(
wmi_class=self.CLASS, wmi_properties=properties,
filters=filters, tags=tags
)
)
else:
self._process_services(wmi_sampler, services, tags)
def _process_services(self, wmi_sampler, services, tags):
expected_services = set(services)
for wmi_obj in wmi_sampler:
service = wmi_obj['Name']
if service not in services:
continue
status = self.STATE_TO_VALUE.get(wmi_obj["state"], AgentCheck.UNKNOWN)
self.service_check("windows_service.state", status,
tags=tags + ['service:{0}'.format(service)])
expected_services.remove(service)
for service in expected_services:
self.service_check("windows_service.state", AgentCheck.CRITICAL,
tags=tags + ['service:{0}'.format(service)])
| bsd-3-clause |
av8ramit/tensorflow | tensorflow/contrib/copy_graph/python/util/copy_test.py | 112 | 3739 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.copy_graph.python.util.copy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.copy_graph.python.util import copy_elements
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
graph1 = ops.Graph()
graph2 = ops.Graph()
class CopyVariablesTest(test.TestCase):
def testVariableCopy(self):
with graph1.as_default():
#Define a Variable in graph1
some_var = variables.Variable(2)
#Initialize session
sess1 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess1)
#Make a copy of some_var in the defsult scope in graph2
copy1 = copy_elements.copy_variable_to_graph(some_var, graph2)
#Make another copy with different scope
copy2 = copy_elements.copy_variable_to_graph(some_var, graph2, "test_scope")
#Initialize both the copies
with graph2.as_default():
#Initialize Session
sess2 = session_lib.Session()
#Initialize the Variables
variables.global_variables_initializer().run(session=sess2)
#Ensure values in all three variables are the same
v1 = some_var.eval(session=sess1)
v2 = copy1.eval(session=sess2)
v3 = copy2.eval(session=sess2)
assert isinstance(copy1, variables.Variable)
assert isinstance(copy2, variables.Variable)
assert v1 == v2 == v3 == 2
class CopyOpsTest(test.TestCase):
def testOpsCopy(self):
with graph1.as_default():
#Initialize a basic expression y = ax + b
x = array_ops.placeholder("float")
a = variables.Variable(3.0)
b = constant_op.constant(4.0)
ax = math_ops.multiply(x, a)
y = math_ops.add(ax, b)
#Initialize session
sess1 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess1)
#First, initialize a as a Variable in graph2
a1 = copy_elements.copy_variable_to_graph(a, graph2)
#Initialize a1 in graph2
with graph2.as_default():
#Initialize session
sess2 = session_lib.Session()
#Initialize the Variable
variables.global_variables_initializer().run(session=sess2)
#Initialize a copy of y in graph2
y1 = copy_elements.copy_op_to_graph(y, graph2, [a1])
#Now that y has been copied, x must be copied too.
#Get that instance
x1 = copy_elements.get_copied_op(x, graph2)
#Compare values of y & y1 for a sample input
#and check if they match
v1 = y.eval({x: 5}, session=sess1)
v2 = y1.eval({x1: 5}, session=sess2)
assert v1 == v2
if __name__ == "__main__":
test.main()
| apache-2.0 |
jparyani/capnproto | doc/_plugins/capnp_lexer.py | 35 | 2103 | #! /usr/bin/env python
from pygments.lexer import RegexLexer
from pygments.token import *
class CapnpLexer(RegexLexer):
name = "Cap'n Proto lexer"
aliases = ['capnp']
filenames = ['*.capnp']
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'@[0-9a-zA-Z]*', Name.Decorator),
(r'=', Literal, 'expression'),
(r':', Name.Class, 'type'),
(r'\$', Name.Attribute, 'annotation'),
(r'(struct|enum|interface|union|import|using|const|annotation|extends|in|of|on|as|with|from|fixed)\b',
Token.Keyword),
(r'[a-zA-Z0-9_.]+', Token.Name),
(r'[^#@=:$a-zA-Z0-9_]+', Text),
],
'type': [
(r'[^][=;,(){}$]+', Name.Class),
(r'[[(]', Name.Class, 'parentype'),
(r'', Name.Class, '#pop')
],
'parentype': [
(r'[^][;()]+', Name.Class),
(r'[[(]', Name.Class, '#push'),
(r'[])]', Name.Class, '#pop'),
(r'', Name.Class, '#pop')
],
'expression': [
(r'[^][;,(){}$]+', Literal),
(r'[[(]', Literal, 'parenexp'),
(r'', Literal, '#pop')
],
'parenexp': [
(r'[^][;()]+', Literal),
(r'[[(]', Literal, '#push'),
(r'[])]', Literal, '#pop'),
(r'', Literal, '#pop')
],
'annotation': [
(r'[^][;,(){}=:]+', Name.Attribute),
(r'[[(]', Name.Attribute, 'annexp'),
(r'', Name.Attribute, '#pop')
],
'annexp': [
(r'[^][;()]+', Name.Attribute),
(r'[[(]', Name.Attribute, '#push'),
(r'[])]', Name.Attribute, '#pop'),
(r'', Name.Attribute, '#pop')
],
}
if __name__ == "__main__":
from setuptools import setup, find_packages
setup(name = "CapnpPygmentsLexer",
version = "0.1",
packages = find_packages(),
py_modules = [ 'capnp_lexer' ],
entry_points = {'pygments.lexers': 'capnp = capnp_lexer:CapnpLexer'})
| mit |
inveniosoftware/iugw2017 | 3-datamodels/custom-data-module/custom_data_module/providers.py | 1 | 1974 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""PID providers."""
from __future__ import absolute_import, print_function
from invenio_pidstore.providers.base import BaseProvider
from invenio_pidstore.models import PIDStatus
class CustomRecordProvider(BaseProvider):
"""Record identifier provider."""
pid_type = 'custid'
"""Type of persistent identifier."""
pid_provider = None
"""Provider name.
The provider name is not recorded in the PID since the provider does not
provide any additional features besides creation of record ids.
"""
default_status = PIDStatus.REGISTERED
"""Record UUIDs are registered immediately."""
@classmethod
def create(cls, object_type=None, object_uuid=None, **kwargs):
"""Create a new record identifier from the depoist PID value."""
assert 'pid_value' in kwargs
kwargs.setdefault('status', cls.default_status)
return super(CustomRecordProvider, cls).create(
object_type=object_type, object_uuid=object_uuid, **kwargs)
| gpl-3.0 |
PowerDNS/exabgp | dev/unittest2/connection.py | 6 | 1401 | #!/usr/bin/env python
# encoding: utf-8
"""
connection.py
Created by Thomas Mangin on 2013-07-13.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
import os
import sys
import unittest
from exabgp.util.od import od
def test ():
OPEN = ''.join([chr(int(_,16)) for _ in "FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF 00 1D 01 04 78 14 00 5A 52 DB 00 45 00".split()])
KEEP = ''.join([chr(int(_,16)) for _ in "FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF 00 00 04".split()])
from exabgp.reactor.network.outgoing import Outgoing
connection = Outgoing(1,'82.219.0.5','82.219.212.34')
writer=connection._writer(OPEN)
while writer() == False:
pass
writer=connection._writer(KEEP)
while writer() == False:
pass
reader=connection.reader()
for size,kind,header,body in reader:
if size: print od(header+body)
else: sys.stdout.write('-')
reader=connection.reader()
for size,kind,header,body in reader:
if size: print od(header+body)
else: sys.stdout.write('+')
connection.close()
class TestData (unittest.TestCase):
def test_1 (self):
if not os.environ.get('profile',False):
result = test()
if result: self.fail(result)
def test_2 (self):
if not not os.environ.get('profile',False):
cProfile.run('test()')
if __name__ == '__main__':
unittest.main()
# import cProfile
# print 'profiling'
# cProfile.run('unittest.main()','profile.info')
| bsd-3-clause |
infoxchange/django-localflavor | localflavor/nz/forms.py | 4 | 4267 | # -*- coding: utf-8 -*-
"""
New Zealand specific form helpers
"""
from __future__ import unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from .nz_councils import NORTH_ISLAND_COUNCIL_CHOICES, SOUTH_ISLAND_COUNCIL_CHOICES
from .nz_provinces import PROVINCE_CHOICES
from .nz_regions import REGION_CHOICES
PHONE_08_RE = re.compile(r'^((0800\d{6})|(0800\w{6,10}))$')
PHONE_IN_RE = re.compile(r'^((0064|064|\+64|\+\+64)((\d{8})|(2\d{7,9})))$')
PHONE_NZ_RE = re.compile(r'^((0\d{8})|(02\d{7,9}))$')
BANK_ACCOUNT_NUMBER_RE = re.compile(r'^(\d{2})(\d{4})(\d{7})(\d{2,3})$')
class NZRegionSelect(Select):
"""
A select widget with list of New Zealand regions as its choices.
"""
def __init__(self, attrs=None):
super(NZRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class NZProvinceSelect(Select):
"""
A select widget with list of New Zealand provinces as its choices.
"""
def __init__(self, attrs=None):
super(NZProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class NZNorthIslandCouncilSelect(Select):
"""
A select widget with list of New Zealand North Island city and district councils as its choices.
"""
def __init__(self, attrs=None):
super(NZNorthIslandCouncilSelect, self).__init__(attrs, choices=NORTH_ISLAND_COUNCIL_CHOICES)
class NZSouthIslandCouncilSelect(Select):
"""
A select widget with list of New Zealand South Island city and district councils as its choices.
"""
def __init__(self, attrs=None):
super(NZSouthIslandCouncilSelect, self).__init__(attrs, choices=SOUTH_ISLAND_COUNCIL_CHOICES)
class NZPostCodeField(RegexField):
"""
A form field that validates its input as New Zealand postal code.
"""
default_error_messages = {
'invalid': _('Invalid post code.'),
}
def __init__(self, *args, **kwargs):
super(NZPostCodeField, self).__init__(r'^\d{4}$',
*args, **kwargs)
class NZPhoneNumberField(Field):
"""
A form field that validates its input as New Zealand phone number.
"""
default_error_messages = {'invalid': _('Invalid phone number.')}
def clean(self, value):
super(NZPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\(|\)|\s+|_|-)', '', smart_str(value))
value = re.sub('^(\+\+)', '00', smart_str(value))
value = re.sub('^(\+)', '00', smart_str(value))
phone_08_match = PHONE_08_RE.search(value)
if phone_08_match:
return '%s' % phone_08_match.group(0)
phone_nz_match = PHONE_NZ_RE.search(value)
if phone_nz_match:
return '%s' % phone_nz_match.group(0)
phone_in_match = PHONE_IN_RE.search(value)
if phone_in_match:
return '%s' % phone_in_match.group(0)
raise ValidationError(self.error_messages['invalid'])
class NZBankAccountNumberField(Field):
"""
A form field that validates its input as New Zealand bank account number.
Formats:
XX-XXXX-XXXXXXX-XX
XX-XXXX-XXXXXXX-XXX
Where:
* the first two digits is the bank ID
* the next four digits are the branch number where the account was opened
* the next 7 digits are the account numbers
* the last two or three digits define type of the account.
"""
default_error_messages = {
'invalid': _('Invalid bank account number.'),
}
def clean(self, value):
super(NZBankAccountNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('(\s+|-)', '', smart_str(value))
match = BANK_ACCOUNT_NUMBER_RE.search(value)
if match:
# normalize the last part
last = '0%s' % match.group(4) if len(match.group(4)) == 2 else match.group(4)
return '%s-%s-%s-%s' % (match.group(1),
match.group(2), match.group(3), last)
raise ValidationError(self.error_messages['invalid'])
| bsd-3-clause |
huanpc/IoT-1 | gui/controller/.venv/lib/python3.5/site-packages/rest_framework/test.py | 7 | 8635 | # -- coding: utf-8 --
# Note that we import as `DjangoRequestFactory` and `DjangoClient` in order
# to make it harder for the user to import the wrong thing without realizing.
from __future__ import unicode_literals
from django.conf import settings
from django.test import testcases
from django.test.client import Client as DjangoClient
from django.test.client import RequestFactory as DjangoRequestFactory
from django.test.client import ClientHandler
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.http import urlencode
from rest_framework.settings import api_settings
def force_authenticate(request, user=None, token=None):
request._force_auth_user = user
request._force_auth_token = token
class APIRequestFactory(DjangoRequestFactory):
renderer_classes_list = api_settings.TEST_REQUEST_RENDERER_CLASSES
default_format = api_settings.TEST_REQUEST_DEFAULT_FORMAT
def __init__(self, enforce_csrf_checks=False, **defaults):
self.enforce_csrf_checks = enforce_csrf_checks
self.renderer_classes = {}
for cls in self.renderer_classes_list:
self.renderer_classes[cls.format] = cls
super(APIRequestFactory, self).__init__(**defaults)
def _encode_data(self, data, format=None, content_type=None):
"""
Encode the data returning a two tuple of (bytes, content_type)
"""
if data is None:
return ('', content_type)
assert format is None or content_type is None, (
'You may not set both `format` and `content_type`.'
)
if content_type:
# Content type specified explicitly, treat data as a raw bytestring
ret = force_bytes(data, settings.DEFAULT_CHARSET)
else:
format = format or self.default_format
assert format in self.renderer_classes, (
"Invalid format '{0}'. Available formats are {1}. "
"Set TEST_REQUEST_RENDERER_CLASSES to enable "
"extra request formats.".format(
format,
', '.join(["'" + fmt + "'" for fmt in self.renderer_classes.keys()])
)
)
# Use format and render the data into a bytestring
renderer = self.renderer_classes[format]()
ret = renderer.render(data)
# Determine the content-type header from the renderer
content_type = "{0}; charset={1}".format(
renderer.media_type, renderer.charset
)
# Coerce text to bytes if required.
if isinstance(ret, six.text_type):
ret = bytes(ret.encode(renderer.charset))
return ret, content_type
def get(self, path, data=None, **extra):
r = {
'QUERY_STRING': urlencode(data or {}, doseq=True),
}
# Fix to support old behavior where you have the arguments in the url
# See #1461
if not data and '?' in path:
r['QUERY_STRING'] = path.split('?')[1]
r.update(extra)
return self.generic('GET', path, **r)
def post(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('POST', path, data, content_type, **extra)
def put(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('PUT', path, data, content_type, **extra)
def patch(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('PATCH', path, data, content_type, **extra)
def delete(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('DELETE', path, data, content_type, **extra)
def options(self, path, data=None, format=None, content_type=None, **extra):
data, content_type = self._encode_data(data, format, content_type)
return self.generic('OPTIONS', path, data, content_type, **extra)
def request(self, **kwargs):
request = super(APIRequestFactory, self).request(**kwargs)
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
return request
class ForceAuthClientHandler(ClientHandler):
"""
A patched version of ClientHandler that can enforce authentication
on the outgoing requests.
"""
def __init__(self, *args, **kwargs):
self._force_user = None
self._force_token = None
super(ForceAuthClientHandler, self).__init__(*args, **kwargs)
def get_response(self, request):
# This is the simplest place we can hook into to patch the
# request object.
force_authenticate(request, self._force_user, self._force_token)
return super(ForceAuthClientHandler, self).get_response(request)
class APIClient(APIRequestFactory, DjangoClient):
def __init__(self, enforce_csrf_checks=False, **defaults):
super(APIClient, self).__init__(**defaults)
self.handler = ForceAuthClientHandler(enforce_csrf_checks)
self._credentials = {}
def credentials(self, **kwargs):
"""
Sets headers that will be used on every outgoing request.
"""
self._credentials = kwargs
def force_authenticate(self, user=None, token=None):
"""
Forcibly authenticates outgoing requests with the given
user and/or token.
"""
self.handler._force_user = user
self.handler._force_token = token
if user is None:
self.logout() # Also clear any possible session info if required
def request(self, **kwargs):
# Ensure that any credentials set get added to every request.
kwargs.update(self._credentials)
return super(APIClient, self).request(**kwargs)
def get(self, path, data=None, follow=False, **extra):
response = super(APIClient, self).get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super(APIClient, self).post(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super(APIClient, self).put(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super(APIClient, self).patch(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super(APIClient, self).delete(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data=None, format=None, content_type=None,
follow=False, **extra):
response = super(APIClient, self).options(
path, data=data, format=format, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def logout(self):
self._credentials = {}
# Also clear any `force_authenticate`
self.handler._force_user = None
self.handler._force_token = None
if self.session:
super(APIClient, self).logout()
class APITransactionTestCase(testcases.TransactionTestCase):
client_class = APIClient
class APITestCase(testcases.TestCase):
client_class = APIClient
class APISimpleTestCase(testcases.SimpleTestCase):
client_class = APIClient
class APILiveServerTestCase(testcases.LiveServerTestCase):
client_class = APIClient
| mit |
shanot/imp | modules/multifit/test/test_connected_components.py | 2 | 1704 | import sys
import os
import IMP
import IMP.em
import IMP.test
import IMP.core
import IMP.atom
import IMP.multifit
class Tests(IMP.test.TestCase):
"""Test connected components """
def setUp(self):
"""Build test model and optimizer"""
IMP.test.TestCase.setUp(self)
IMP.set_log_level(IMP.VERBOSE) # SILENT)
def test_connected_components(self):
"""test connected components"""
for i in range(5):
# sample i populations
mdl = IMP.Model()
ps = []
# create a map of i components
for j in range(i + 1):
bb = IMP.algebra.BoundingBox3D(
IMP.algebra.Vector3D(
-1 * (j + 1),
-1 * (j + 1),
-1 * (j + 1)),
IMP.algebra.Vector3D(1 * (j + 1), 1 * (j + 1), 1 * (j + 1)))
for k in range(10):
p = IMP.Particle(mdl)
center = IMP.algebra.get_random_vector_in(bb) \
+ IMP.algebra.Vector3D(j * 20, j * 20, j * 20)
IMP.core.XYZR.setup_particle(p,
IMP.algebra.Sphere3D(center, 2))
IMP.atom.Mass.setup_particle(p, 1)
ps.append(p)
dmap = IMP.em.particles2density(ps, 10, 1)
con_comp = IMP.multifit.get_connected_components(dmap, 0.001, 0.5)
for c in con_comp:
for ind in c:
self.assertLess(ind, dmap.get_number_of_voxels())
self.assertEqual(len(con_comp), i + 1)
if __name__ == '__main__':
IMP.test.main()
| gpl-3.0 |
yunlongliukm/chm1_scripts | AlignmentReaders.py | 2 | 1464 | #!/usr/bin/env python
class Alignment:
def __init__(self):
self.qname = ""
self.tname = ""
self.qstat = 0
self.qend = 0
self.qstrand = 0
self.qlen = 0
self.tstart = 0
self.tend = 0
self.tstrand = 0
self.tlen = 0
self.score = 0
self.number = 0
self.identity = 0
def ToString(self):
members = ["qname", "tname", "qstart", "qend", "qstrand", "qlen", "tstart", "tend", "tstrand", "tlen", "score", "number", "identity"]
#return str(self.__dict__.values())
return " ".join([str(getattr(self,members[i])) for i in range(len(members))])
class M4Reader:
def __init__(self, filename):
self.fh = open(filename)
self.prev = None
def GetNext(self):
line = self.fh.readline()
if (line == ""):
return None
vals = line.split()
a = Alignment()
a.qname = vals[0]
a.tname = vals[1]
a.tstrand = int(vals[2])
a.qstrand = int(vals[3])
a.score = int(vals[4])
a.identity = float(vals[5])
a.tstart = int(vals[6])
a.tend = int(vals[7])
a.tlen = int(vals[8])
a.qstart = int(vals[9])
a.qend = int(vals[10])
a.qlen = int(vals[11])
if (self.prev is not None and self.prev.qname == a.qname):
a.number = self.prev.number + 1
self.prev = a
return a
| mit |
jriehl/numba | numba/roc/hsadrv/devices.py | 2 | 3256 | """
Expose each GPU device directly
"""
from __future__ import print_function, absolute_import, division
import functools
from numba import servicelib
from .driver import hsa as driver, Context as _Context
class _culist(object):
"""A thread local list of GPU instances
"""
def __init__(self):
self._lst = None
@property
def _gpus(self):
if not self._lst:
self._lst = self._init_gpus()
return self._lst
def _init_gpus(self):
gpus = []
for com in driver.components:
gpus.append(CU(com))
return gpus
def __getitem__(self, item):
return self._gpus[item]
def append(self, item):
return self._gpus.append(item)
def __len__(self):
return len(self._gpus)
def __nonzero__(self):
return bool(self._gpus)
def __iter__(self):
return iter(self._gpus)
__bool__ = __nonzero__
def reset(self):
for gpu in self:
gpu.reset()
@property
def current(self):
"""Get the current GPU object associated with the thread
"""
return _custack.top
cus = _culist()
del _culist
class CU(object):
def __init__(self, cu):
self._cu = cu
self._context = None
def __getattr__(self, key):
"""Redirect to self._gpu
"""
if key.startswith('_'):
raise AttributeError(key)
return getattr(self._cu, key)
def __repr__(self):
return repr(self._cu)
def associate_context(self):
"""Associate the context of this GPU to the running thread
"""
# No context was created for this GPU
if self._context is None:
self._context = self._cu.create_context()
return self._context
def __enter__(self):
self.associate_context()
_custack.push(self)
def __exit__(self, exc_type, exc_val, exc_tb):
assert _get_device() is self
self._context.pop()
_custack.pop()
def reset(self):
if self._context:
self._context.reset()
self._context = None
_cpu_context = None
def get_cpu_context():
global _cpu_context
if _cpu_context is None:
cpu_agent = [a for a in driver.agents if not a.is_component][0]
_cpu_context = _Context(cpu_agent)
return _cpu_context
def get_gpu(i):
return cus[i]
def get_num_gpus():
return len(cus)
_custack = servicelib.TLStack()
def _get_device(devnum=0):
"""Get the current device or use a device by device number.
"""
if not _custack:
_custack.push(get_gpu(devnum))
return _custack.top
def get_context(devnum=0):
"""Get the current device or use a device by device number, and
return the HSA context.
"""
return _get_device(devnum=devnum).associate_context()
def get_all_contexts():
return [get_context(i) for i in range(get_num_gpus())]
def require_context(fn):
"""
A decorator to ensure a context for the HSA subsystem
"""
@functools.wraps(fn)
def _require_cu_context(*args, **kws):
get_context()
return fn(*args, **kws)
return _require_cu_context
def reset():
cus.reset()
_custack.clear()
| bsd-2-clause |
prestoncarman/vxquery | vxquery-server/src/main/resources/scripts/cluster_cli.py | 11 | 3506 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, getopt, os
# Custom modules.
from cluster_actions import *
def main(argv):
action = ""
cluster_file_name = ""
deploy_path = ""
try:
opts, args = getopt.getopt(argv, "a:c:d:h", ["action=", "deploy_path="])
except getopt.GetoptError:
print 'The file options for cluster_cli.py were not correctly specified.'
print 'To see a full list of options try:'
print ' $ python cluster_cli.py -h'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'Options for pushing a benchmark:'
print ' -a (str) The action to perform (deploy, start, or stop).'
print ' -c The XML cluster configuration file.'
sys.exit()
elif opt in ('-a', "--action"):
# check if file exists.
if arg in ('deploy', 'start', 'stop', 'kill'):
action = arg
else:
print 'Error: Argument must be a string ("deploy", "start", "stop", or "kill") for --action (-a).'
sys.exit()
elif opt in ('-c', "--cluster"):
# check if file exists.
if os.path.exists(arg):
cluster_file_name = arg
else:
print 'Error: Argument must be a file name for --cluster (-c).'
sys.exit()
elif opt in ('-d', "--deploy_folder"):
# check if file exists.
if os.path.exists(arg):
if os.path.basename(arg) == "":
deploy_path = os.path.dirname(arg)
else:
deploy_path = arg
else:
print 'Error: Argument must be a file name for --deploy_folder (-d).'
sys.exit()
# Required fields to run the script.
if cluster_file_name == "" or not os.path.exists(cluster_file_name):
print 'Error: The cluster XML file option must be supplied: --cluster (-c).'
sys.exit()
# The action to take on the cluster.
cluster = ClusterActions(cluster_file_name)
if action == 'start':
cluster.start()
elif action == 'stop':
cluster.stop_cluster()
elif action == 'kill':
cluster.stop()
elif action == 'deploy':
if deploy_path != "":
cluster.deploy(deploy_path)
else:
print 'Error: The cluster cli must have a deploy_folder option when doing the deploy action: --deploy_folder (-d).'
sys.exit()
else:
print 'Error: The cluster cli must have an action option must be supplied: --action (-a).'
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
dyyi/moneybook | venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/fields.py | 200 | 5872 | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| apache-2.0 |
Endika/OpenUpgrade | addons/account/wizard/account_report_general_journal.py | 378 | 1697 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_general_journal(osv.osv_memory):
_inherit = "account.common.journal.report"
_name = 'account.general.journal'
_description = 'Account General Journal'
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_general_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return self.pool['report'].get_action(cr, uid, [], 'account.report_generaljournal', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
thaumos/ansible | lib/ansible/modules/network/fortios/fortios_webfilter_content.py | 24 | 10670 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_content
short_description: Configure Web filter banned word table in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure webfilter feature and content category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
webfilter_content:
description:
- Configure Web filter banned word table.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
comment:
description:
- Optional comments.
entries:
description:
- Configure banned word entries.
suboptions:
action:
description:
- Block or exempt word when a match is found.
choices:
- block
- exempt
lang:
description:
- Language of banned word.
choices:
- western
- simch
- trach
- japanese
- korean
- french
- thai
- spanish
- cyrillic
name:
description:
- Banned word.
required: true
pattern-type:
description:
- "Banned word pattern type: wildcard pattern or Perl regular expression."
choices:
- wildcard
- regexp
score:
description:
- Score, to be applied every time the word appears on a web page (0 - 4294967295, default = 10).
status:
description:
- Enable/disable banned word.
choices:
- enable
- disable
id:
description:
- ID.
required: true
name:
description:
- Name of table.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure Web filter banned word table.
fortios_webfilter_content:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
webfilter_content:
state: "present"
comment: "Optional comments."
entries:
-
action: "block"
lang: "western"
name: "default_name_7"
pattern-type: "wildcard"
score: "9"
status: "enable"
id: "11"
name: "default_name_12"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_webfilter_content_data(json):
option_list = ['comment', 'entries', 'id',
'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def webfilter_content(data, fos):
vdom = data['vdom']
webfilter_content_data = data['webfilter_content']
filtered_data = filter_webfilter_content_data(webfilter_content_data)
if webfilter_content_data['state'] == "present":
return fos.set('webfilter',
'content',
data=filtered_data,
vdom=vdom)
elif webfilter_content_data['state'] == "absent":
return fos.delete('webfilter',
'content',
mkey=filtered_data['id'],
vdom=vdom)
def fortios_webfilter(data, fos):
login(data)
methodlist = ['webfilter_content']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"webfilter_content": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"entries": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["block", "exempt"]},
"lang": {"required": False, "type": "str",
"choices": ["western", "simch", "trach",
"japanese", "korean", "french",
"thai", "spanish", "cyrillic"]},
"name": {"required": True, "type": "str"},
"pattern-type": {"required": False, "type": "str",
"choices": ["wildcard", "regexp"]},
"score": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_webfilter(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kemalakyol48/python-for-android | python-modules/twisted/twisted/names/test/test_names.py | 49 | 31329 | # -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.names.
"""
import socket, operator, copy
from twisted.trial import unittest
from twisted.internet import reactor, defer, error
from twisted.internet.defer import succeed
from twisted.names import client, server, common, authority, hosts, dns
from twisted.python import failure
from twisted.names.error import DNSFormatError, DNSServerError, DNSNameError
from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError
from twisted.names.error import DNSUnknownError
from twisted.names.dns import EFORMAT, ESERVER, ENAME, ENOTIMP, EREFUSED
from twisted.names.dns import Message
from twisted.names.client import Resolver
from twisted.names.test.test_client import StubPort
from twisted.python.compat import reduce
def justPayload(results):
return [r.payload for r in results[0]]
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# Yes, skip FileAuthority
common.ResolverBase.__init__(self)
self.soa, self.records = soa, records
soa_record = dns.Record_SOA(
mname = 'test-domain.com',
rname = 'root.test-domain.com',
serial = 100,
refresh = 1234,
minimum = 7654,
expire = 19283784,
retry = 15,
ttl=1
)
reverse_soa = dns.Record_SOA(
mname = '93.84.28.in-addr.arpa',
rname = '93.84.28.in-addr.arpa',
serial = 120,
refresh = 54321,
minimum = 382,
expire = 11193983,
retry = 30,
ttl=3
)
my_soa = dns.Record_SOA(
mname = 'my-domain.com',
rname = 'postmaster.test-domain.com',
serial = 130,
refresh = 12345,
minimum = 1,
expire = 999999,
retry = 100,
)
test_domain_com = NoFileAuthority(
soa = ('test-domain.com', soa_record),
records = {
'test-domain.com': [
soa_record,
dns.Record_A('127.0.0.1'),
dns.Record_NS('39.28.189.39'),
dns.Record_SPF('v=spf1 mx/30 mx:example.org/30 -all'),
dns.Record_SPF('v=spf1 +mx a:\0colo', '.example.com/28 -all not valid'),
dns.Record_MX(10, 'host.test-domain.com'),
dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know'),
dns.Record_CNAME('canonical.name.com'),
dns.Record_MB('mailbox.test-domain.com'),
dns.Record_MG('mail.group.someplace'),
dns.Record_TXT('A First piece of Text', 'a SecoNd piece'),
dns.Record_A6(0, 'ABCD::4321', ''),
dns.Record_A6(12, '0:0069::0', 'some.network.tld'),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net'),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?'),
dns.Record_MR('mail.redirect.or.whatever'),
dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box'),
dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com'),
dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text'),
dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP,
'\x12\x01\x16\xfe\xc1\x00\x01'),
dns.Record_NAPTR(100, 10, "u", "sip+E2U",
"!^.*$!sip:[email protected]!"),
dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF')],
'http.tcp.test-domain.com': [
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool')
],
'host.test-domain.com': [
dns.Record_A('123.242.1.5'),
dns.Record_A('0.255.0.255'),
],
'host-two.test-domain.com': [
#
# Python bug
# dns.Record_A('255.255.255.255'),
#
dns.Record_A('255.255.255.254'),
dns.Record_A('0.0.0.0')
],
'cname.test-domain.com': [
dns.Record_CNAME('test-domain.com')
],
'anothertest-domain.com': [
dns.Record_A('1.2.3.4')],
}
)
reverse_domain = NoFileAuthority(
soa = ('93.84.28.in-addr.arpa', reverse_soa),
records = {
'123.93.84.28.in-addr.arpa': [
dns.Record_PTR('test.host-reverse.lookup.com'),
reverse_soa
]
}
)
my_domain_com = NoFileAuthority(
soa = ('my-domain.com', my_soa),
records = {
'my-domain.com': [
my_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')
]
}
)
class ServerDNSTestCase(unittest.TestCase):
"""
Test cases for DNS server and client.
"""
def setUp(self):
self.factory = server.DNSServerFactory([
test_domain_com, reverse_domain, my_domain_com
], verbose=2)
p = dns.DNSDatagramProtocol(self.factory)
while 1:
listenerTCP = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
# It's simpler to do the stop listening with addCleanup,
# even though we might not end up using this TCP port in
# the test (if the listenUDP below fails). Cleaning up
# this TCP port sooner than "cleanup time" would mean
# adding more code to keep track of the Deferred returned
# by stopListening.
self.addCleanup(listenerTCP.stopListening)
port = listenerTCP.getHost().port
try:
listenerUDP = reactor.listenUDP(port, p, interface="127.0.0.1")
except error.CannotListenError:
pass
else:
self.addCleanup(listenerUDP.stopListening)
break
self.listenerTCP = listenerTCP
self.listenerUDP = listenerUDP
self.resolver = client.Resolver(servers=[('127.0.0.1', port)])
def tearDown(self):
"""
Clean up any server connections associated with the
L{DNSServerFactory} created in L{setUp}
"""
# It'd be great if DNSServerFactory had a method that
# encapsulated this task. At least the necessary data is
# available, though.
for conn in self.factory.connections[:]:
conn.transport.loseConnection()
def namesTest(self, d, r):
self.response = None
def setDone(response):
self.response = response
def checkResults(ignored):
if isinstance(self.response, failure.Failure):
raise self.response
results = justPayload(self.response)
assert len(results) == len(r), "%s != %s" % (map(str, results), map(str, r))
for rec in results:
assert rec in r, "%s not in %s" % (rec, map(str, r))
d.addBoth(setDone)
d.addCallback(checkResults)
return d
def testAddressRecord1(self):
"""Test simple DNS 'A' record queries"""
return self.namesTest(
self.resolver.lookupAddress('test-domain.com'),
[dns.Record_A('127.0.0.1', ttl=19283784)]
)
def testAddressRecord2(self):
"""Test DNS 'A' record queries with multiple answers"""
return self.namesTest(
self.resolver.lookupAddress('host.test-domain.com'),
[dns.Record_A('123.242.1.5', ttl=19283784), dns.Record_A('0.255.0.255', ttl=19283784)]
)
def testAddressRecord3(self):
"""Test DNS 'A' record queries with edge cases"""
return self.namesTest(
self.resolver.lookupAddress('host-two.test-domain.com'),
[dns.Record_A('255.255.255.254', ttl=19283784), dns.Record_A('0.0.0.0', ttl=19283784)]
)
def testAuthority(self):
"""Test DNS 'SOA' record queries"""
return self.namesTest(
self.resolver.lookupAuthority('test-domain.com'),
[soa_record]
)
def testMailExchangeRecord(self):
"""Test DNS 'MX' record queries"""
return self.namesTest(
self.resolver.lookupMailExchange('test-domain.com'),
[dns.Record_MX(10, 'host.test-domain.com', ttl=19283784)]
)
def testNameserver(self):
"""Test DNS 'NS' record queries"""
return self.namesTest(
self.resolver.lookupNameservers('test-domain.com'),
[dns.Record_NS('39.28.189.39', ttl=19283784)]
)
def testHINFO(self):
"""Test DNS 'HINFO' record queries"""
return self.namesTest(
self.resolver.lookupHostInfo('test-domain.com'),
[dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know', ttl=19283784)]
)
def testPTR(self):
"""Test DNS 'PTR' record queries"""
return self.namesTest(
self.resolver.lookupPointer('123.93.84.28.in-addr.arpa'),
[dns.Record_PTR('test.host-reverse.lookup.com', ttl=11193983)]
)
def testCNAME(self):
"""Test DNS 'CNAME' record queries"""
return self.namesTest(
self.resolver.lookupCanonicalName('test-domain.com'),
[dns.Record_CNAME('canonical.name.com', ttl=19283784)]
)
def testCNAMEAdditional(self):
"""Test additional processing for CNAME records"""
return self.namesTest(
self.resolver.lookupAddress('cname.test-domain.com'),
[dns.Record_CNAME('test-domain.com', ttl=19283784), dns.Record_A('127.0.0.1', ttl=19283784)]
)
def testMB(self):
"""Test DNS 'MB' record queries"""
return self.namesTest(
self.resolver.lookupMailBox('test-domain.com'),
[dns.Record_MB('mailbox.test-domain.com', ttl=19283784)]
)
def testMG(self):
"""Test DNS 'MG' record queries"""
return self.namesTest(
self.resolver.lookupMailGroup('test-domain.com'),
[dns.Record_MG('mail.group.someplace', ttl=19283784)]
)
def testMR(self):
"""Test DNS 'MR' record queries"""
return self.namesTest(
self.resolver.lookupMailRename('test-domain.com'),
[dns.Record_MR('mail.redirect.or.whatever', ttl=19283784)]
)
def testMINFO(self):
"""Test DNS 'MINFO' record queries"""
return self.namesTest(
self.resolver.lookupMailboxInfo('test-domain.com'),
[dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box', ttl=19283784)]
)
def testSRV(self):
"""Test DNS 'SRV' record queries"""
return self.namesTest(
self.resolver.lookupService('http.tcp.test-domain.com'),
[dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl=19283784)]
)
def testAFSDB(self):
"""Test DNS 'AFSDB' record queries"""
return self.namesTest(
self.resolver.lookupAFSDatabase('test-domain.com'),
[dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com', ttl=19283784)]
)
def testRP(self):
"""Test DNS 'RP' record queries"""
return self.namesTest(
self.resolver.lookupResponsibility('test-domain.com'),
[dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text', ttl=19283784)]
)
def testTXT(self):
"""Test DNS 'TXT' record queries"""
return self.namesTest(
self.resolver.lookupText('test-domain.com'),
[dns.Record_TXT('A First piece of Text', 'a SecoNd piece', ttl=19283784),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?', ttl=19283784)]
)
def test_spf(self):
"""
L{DNSServerFactory} can serve I{SPF} resource records.
"""
return self.namesTest(
self.resolver.lookupSenderPolicy('test-domain.com'),
[dns.Record_SPF('v=spf1 mx/30 mx:example.org/30 -all', ttl=19283784),
dns.Record_SPF('v=spf1 +mx a:\0colo', '.example.com/28 -all not valid', ttl=19283784)]
)
def testWKS(self):
"""Test DNS 'WKS' record queries"""
return self.namesTest(
self.resolver.lookupWellKnownServices('test-domain.com'),
[dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP, '\x12\x01\x16\xfe\xc1\x00\x01', ttl=19283784)]
)
def testSomeRecordsWithTTLs(self):
result_soa = copy.copy(my_soa)
result_soa.ttl = my_soa.expire
return self.namesTest(
self.resolver.lookupAllRecords('my-domain.com'),
[result_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')]
)
def testAAAA(self):
"""Test DNS 'AAAA' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupIPV6Address('test-domain.com'),
[dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF', ttl=19283784)]
)
def testA6(self):
"""Test DNS 'A6' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupAddress6('test-domain.com'),
[dns.Record_A6(0, 'ABCD::4321', '', ttl=19283784),
dns.Record_A6(12, '0:0069::0', 'some.network.tld', ttl=19283784),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net', ttl=19283784)]
)
def test_zoneTransfer(self):
"""
Test DNS 'AXFR' queries (Zone transfer)
"""
default_ttl = soa_record.expire
results = [copy.copy(r) for r in reduce(operator.add, test_domain_com.records.values())]
for r in results:
if r.ttl is None:
r.ttl = default_ttl
return self.namesTest(
self.resolver.lookupZone('test-domain.com').addCallback(lambda r: (r[0][:-1],)),
results
)
def testSimilarZonesDontInterfere(self):
"""Tests that unrelated zones don't mess with each other."""
return self.namesTest(
self.resolver.lookupAddress("anothertest-domain.com"),
[dns.Record_A('1.2.3.4', ttl=19283784)]
)
def test_NAPTR(self):
"""
Test DNS 'NAPTR' record queries.
"""
return self.namesTest(
self.resolver.lookupNamingAuthorityPointer('test-domain.com'),
[dns.Record_NAPTR(100, 10, "u", "sip+E2U",
"!^.*$!sip:[email protected]!",
ttl=19283784)])
class DNSServerFactoryTests(unittest.TestCase):
"""
Tests for L{server.DNSServerFactory}.
"""
def _messageReceivedTest(self, methodName, message):
"""
Assert that the named method is called with the given message when
it is passed to L{DNSServerFactory.messageReceived}.
"""
# Make it appear to have some queries so that
# DNSServerFactory.allowQuery allows it.
message.queries = [None]
receivedMessages = []
def fakeHandler(message, protocol, address):
receivedMessages.append((message, protocol, address))
class FakeProtocol(object):
def writeMessage(self, message):
pass
protocol = FakeProtocol()
factory = server.DNSServerFactory(None)
setattr(factory, methodName, fakeHandler)
factory.messageReceived(message, protocol)
self.assertEqual(receivedMessages, [(message, protocol, None)])
def test_notifyMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode
of C{OP_NOTIFY} on to L{DNSServerFactory.handleNotify}.
"""
# RFC 1996, section 4.5
opCode = 4
self._messageReceivedTest('handleNotify', Message(opCode=opCode))
def test_updateMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode
of C{OP_UPDATE} on to L{DNSServerFactory.handleOther}.
This may change if the implementation ever covers update messages.
"""
# RFC 2136, section 1.3
opCode = 5
self._messageReceivedTest('handleOther', Message(opCode=opCode))
def test_connectionTracking(self):
"""
The C{connectionMade} and C{connectionLost} methods of
L{DNSServerFactory} cooperate to keep track of all
L{DNSProtocol} objects created by a factory which are
connected.
"""
protoA, protoB = object(), object()
factory = server.DNSServerFactory()
factory.connectionMade(protoA)
self.assertEqual(factory.connections, [protoA])
factory.connectionMade(protoB)
self.assertEqual(factory.connections, [protoA, protoB])
factory.connectionLost(protoA)
self.assertEqual(factory.connections, [protoB])
factory.connectionLost(protoB)
self.assertEqual(factory.connections, [])
class HelperTestCase(unittest.TestCase):
def testSerialGenerator(self):
f = self.mktemp()
a = authority.getSerial(f)
for i in range(20):
b = authority.getSerial(f)
self.failUnless(a < b)
a = b
class AXFRTest(unittest.TestCase):
def setUp(self):
self.results = None
self.d = defer.Deferred()
self.d.addCallback(self._gotResults)
self.controller = client.AXFRController('fooby.com', self.d)
self.soa = dns.RRHeader(name='fooby.com', type=dns.SOA, cls=dns.IN, ttl=86400, auth=False,
payload=dns.Record_SOA(mname='fooby.com',
rname='hooj.fooby.com',
serial=100,
refresh=200,
retry=300,
expire=400,
minimum=500,
ttl=600))
self.records = [
self.soa,
dns.RRHeader(name='fooby.com', type=dns.NS, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.MX, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_MX(preference=10, exchange='mail.mv3d.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.A, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_A(address='64.123.27.105', ttl=700)),
self.soa
]
def _makeMessage(self):
# hooray they all have the same message format
return dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1, rCode=0, trunc=0, maxSize=0)
def testBindAndTNamesStyle(self):
# Bind style = One big single message
m = self._makeMessage()
m.queries = [dns.Query('fooby.com', dns.AXFR, dns.IN)]
m.answers = self.records
self.controller.messageReceived(m, None)
self.assertEquals(self.results, self.records)
def _gotResults(self, result):
self.results = result
def testDJBStyle(self):
# DJB style = message per record
records = self.records[:]
while records:
m = self._makeMessage()
m.queries = [] # DJB *doesn't* specify any queries.. hmm..
m.answers = [records.pop(0)]
self.controller.messageReceived(m, None)
self.assertEquals(self.results, self.records)
class HostsTestCase(unittest.TestCase):
def setUp(self):
f = open('EtcHosts', 'w')
f.write('''
1.1.1.1 EXAMPLE EXAMPLE.EXAMPLETHING
1.1.1.2 HOOJY
::1 ip6thingy
''')
f.close()
self.resolver = hosts.Resolver('EtcHosts')
def testGetHostByName(self):
data = [('EXAMPLE', '1.1.1.1'),
('EXAMPLE.EXAMPLETHING', '1.1.1.1'),
('HOOJY', '1.1.1.2'),
]
ds = [self.resolver.getHostByName(n).addCallback(self.assertEqual, ip)
for n, ip in data]
return defer.gatherResults(ds)
def testLookupAddress(self):
d = self.resolver.lookupAddress('HOOJY')
d.addCallback(lambda x: self.assertEqual(x[0][0].payload.dottedQuad(),
'1.1.1.2'))
return d
def testIPv6(self):
d = self.resolver.lookupIPV6Address('ip6thingy')
d.addCallback(self.assertEqual, '::1')
return d
testIPv6.skip = 'IPv6 support is not in our hosts resolver yet'
def testNotImplemented(self):
return self.assertFailure(self.resolver.lookupMailExchange('EXAMPLE'),
NotImplementedError)
def testQuery(self):
d = self.resolver.query(dns.Query('EXAMPLE'))
d.addCallback(lambda x: self.assertEqual(x[0][0].payload.dottedQuad(),
'1.1.1.1'))
return d
def testNotFound(self):
return self.assertFailure(self.resolver.lookupAddress('foueoa'),
dns.DomainError)
def test_searchFileFor(self):
"""
L{searchFileFor} parses hosts(5) files and returns the address for
the given name, or C{None} if the name is not found.
"""
tmp = self.mktemp()
f = open(tmp, 'w')
f.write('127.0.1.1 helmut.example.org helmut\n')
f.write('# a comment\n')
f.write('::1 localhost ip6-localhost ip6-loopback\n')
f.close()
self.assertEquals(hosts.searchFileFor(tmp, 'helmut'), '127.0.1.1')
self.assertEquals(hosts.searchFileFor(tmp, 'ip6-localhost'), '::1')
self.assertIdentical(hosts.searchFileFor(tmp, 'blah'), None)
class FakeDNSDatagramProtocol(object):
def __init__(self):
self.queries = []
self.transport = StubPort()
def query(self, address, queries, timeout=10, id=None):
self.queries.append((address, queries, timeout, id))
return defer.fail(dns.DNSQueryTimeoutError(queries))
def removeResend(self, id):
# Ignore this for the time being.
pass
class RetryLogic(unittest.TestCase):
testServers = [
'1.2.3.4',
'4.3.2.1',
'a.b.c.d',
'z.y.x.w']
def testRoundRobinBackoff(self):
addrs = [(x, 53) for x in self.testServers]
r = client.Resolver(resolv=None, servers=addrs)
r.protocol = proto = FakeDNSDatagramProtocol()
return r.lookupAddress("foo.example.com"
).addCallback(self._cbRoundRobinBackoff
).addErrback(self._ebRoundRobinBackoff, proto
)
def _cbRoundRobinBackoff(self, result):
raise unittest.FailTest("Lookup address succeeded, should have timed out")
def _ebRoundRobinBackoff(self, failure, fakeProto):
failure.trap(defer.TimeoutError)
# Assert that each server is tried with a particular timeout
# before the timeout is increased and the attempts are repeated.
for t in (1, 3, 11, 45):
tries = fakeProto.queries[:len(self.testServers)]
del fakeProto.queries[:len(self.testServers)]
tries.sort()
expected = list(self.testServers)
expected.sort()
for ((addr, query, timeout, id), expectedAddr) in zip(tries, expected):
self.assertEquals(addr, (expectedAddr, 53))
self.assertEquals(timeout, t)
self.failIf(fakeProto.queries)
class ResolvConfHandling(unittest.TestCase):
def testMissing(self):
resolvConf = self.mktemp()
r = client.Resolver(resolv=resolvConf)
self.assertEquals(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
def testEmpty(self):
resolvConf = self.mktemp()
fObj = file(resolvConf, 'w')
fObj.close()
r = client.Resolver(resolv=resolvConf)
self.assertEquals(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
class FilterAnswersTests(unittest.TestCase):
"""
Test L{twisted.names.client.Resolver.filterAnswers}'s handling of various
error conditions it might encounter.
"""
def setUp(self):
# Create a resolver pointed at an invalid server - we won't be hitting
# the network in any of these tests.
self.resolver = Resolver(servers=[('0.0.0.0', 0)])
def test_truncatedMessage(self):
"""
Test that a truncated message results in an equivalent request made via
TCP.
"""
m = Message(trunc=True)
m.addQuery('example.com')
def queryTCP(queries):
self.assertEqual(queries, m.queries)
response = Message()
response.answers = ['answer']
response.authority = ['authority']
response.additional = ['additional']
return succeed(response)
self.resolver.queryTCP = queryTCP
d = self.resolver.filterAnswers(m)
d.addCallback(
self.assertEqual, (['answer'], ['authority'], ['additional']))
return d
def _rcodeTest(self, rcode, exc):
m = Message(rCode=rcode)
err = self.resolver.filterAnswers(m)
err.trap(exc)
def test_formatError(self):
"""
Test that a message with a result code of C{EFORMAT} results in a
failure wrapped around L{DNSFormatError}.
"""
return self._rcodeTest(EFORMAT, DNSFormatError)
def test_serverError(self):
"""
Like L{test_formatError} but for C{ESERVER}/L{DNSServerError}.
"""
return self._rcodeTest(ESERVER, DNSServerError)
def test_nameError(self):
"""
Like L{test_formatError} but for C{ENAME}/L{DNSNameError}.
"""
return self._rcodeTest(ENAME, DNSNameError)
def test_notImplementedError(self):
"""
Like L{test_formatError} but for C{ENOTIMP}/L{DNSNotImplementedError}.
"""
return self._rcodeTest(ENOTIMP, DNSNotImplementedError)
def test_refusedError(self):
"""
Like L{test_formatError} but for C{EREFUSED}/L{DNSQueryRefusedError}.
"""
return self._rcodeTest(EREFUSED, DNSQueryRefusedError)
def test_refusedErrorUnknown(self):
"""
Like L{test_formatError} but for an unrecognized error code and
L{DNSUnknownError}.
"""
return self._rcodeTest(EREFUSED + 1, DNSUnknownError)
class AuthorityTests(unittest.TestCase):
"""
Tests for the basic response record selection code in L{FileAuthority}
(independent of its fileness).
"""
def test_recordMissing(self):
"""
If a L{FileAuthority} has a zone which includes an I{NS} record for a
particular name and that authority is asked for another record for the
same name which does not exist, the I{NS} record is not included in the
authority section of the response.
"""
authority = NoFileAuthority(
soa=(str(soa_record.mname), soa_record),
records={
str(soa_record.mname): [
soa_record,
dns.Record_NS('1.2.3.4'),
]})
d = authority.lookupAddress(str(soa_record.mname))
result = []
d.addCallback(result.append)
answer, authority, additional = result[0]
self.assertEquals(answer, [])
self.assertEquals(
authority, [
dns.RRHeader(
str(soa_record.mname), soa_record.TYPE,
ttl=soa_record.expire, payload=soa_record,
auth=True)])
self.assertEquals(additional, [])
def _referralTest(self, method):
"""
Create an authority and make a request against it. Then verify that the
result is a referral, including no records in the answers or additional
sections, but with an I{NS} record in the authority section.
"""
subdomain = 'example.' + str(soa_record.mname)
nameserver = dns.Record_NS('1.2.3.4')
authority = NoFileAuthority(
soa=(str(soa_record.mname), soa_record),
records={
subdomain: [
nameserver,
]})
d = getattr(authority, method)(subdomain)
result = []
d.addCallback(result.append)
answer, authority, additional = result[0]
self.assertEquals(answer, [])
self.assertEquals(
authority, [dns.RRHeader(
subdomain, dns.NS, ttl=soa_record.expire,
payload=nameserver, auth=False)])
self.assertEquals(additional, [])
def test_referral(self):
"""
When an I{NS} record is found for a child zone, it is included in the
authority section of the response. It is marked as non-authoritative if
the authority is not also authoritative for the child zone (RFC 2181,
section 6.1).
"""
self._referralTest('lookupAddress')
def test_allRecordsReferral(self):
"""
A referral is also generated for a request of type C{ALL_RECORDS}.
"""
self._referralTest('lookupAllRecords')
class NoInitialResponseTestCase(unittest.TestCase):
def test_no_answer(self):
"""
If a request returns a L{dns.NS} response, but we can't connect to the
given server, the request fails with the error returned at connection.
"""
def query(self, *args):
# Pop from the message list, so that it blows up if more queries
# are run than expected.
return succeed(messages.pop(0))
def queryProtocol(self, *args, **kwargs):
return defer.fail(socket.gaierror("Couldn't connect"))
resolver = Resolver(servers=[('0.0.0.0', 0)])
resolver._query = query
messages = []
# Let's patch dns.DNSDatagramProtocol.query, as there is no easy way to
# customize it.
self.patch(dns.DNSDatagramProtocol, "query", queryProtocol)
records = [
dns.RRHeader(name='fooba.com', type=dns.NS, cls=dns.IN, ttl=700,
auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com',
ttl=700))]
m = dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1,
rCode=0, trunc=0, maxSize=0)
m.answers = records
messages.append(m)
return self.assertFailure(
resolver.getHostByName("fooby.com"), socket.gaierror)
| apache-2.0 |
openconnectome/m2g | MR-OCP/MROCPdjango/computation/plotting/HBMPlot.py | 2 | 14895 | #!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Disa Mhembere, Johns Hopkins University
# Separated: 10/2/2012
# Plot all .np arrays in a common dir on the same axis & save
# 1 indexed
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pylab as pl
import numpy as np
import os
import sys
from glob import glob
import argparse
import scipy
from scipy import interpolate
import inspect
import csv
# Issues: Done nothing with MAD
def lineno():
'''
Get current line number
'''
return str(inspect.getframeinfo(inspect.currentframe())[1])
def csvtodict(fn ='/home/disa/code/mrn_covariates_n120-v4.csv', char = 'class'):
if char == 'class':
col = 4
elif char == 'gender':
col = 2
reader = csv.reader(open(fn, 'rb'))
outdict = dict()
for row in reader:
outdict[row[0].strip()] = row[col].strip()
#print row[0] ,'TYPE' ,outdict[row[0]]
#import pdb; pdb.set_trace()
return outdict
def pickprintcolor(charDict, arrfn):
'''
charDict: dict
'''
if (charDict[(arrfn.split('/')[-1]).split('_')[0]] == '0'):
plot_color = 'grey'
elif (charDict[(arrfn.split('/')[-1]).split('_')[0]] == '1'):
plot_color = 'blue'
elif (charDict[(arrfn.split('/')[-1]).split('_')[0]] == '2'):
plot_color = 'green'
else:
print "[ERROR]: %s, no match on subject type" % lineno()
return plot_color
def plotInvDist(invDir, pngName, numBins =100):
subj_types = csvtodict(char = 'class') # load up subject types
# ClustCoeff Degree Eigen MAD numEdges.npy ScanStat Triangle
MADdir = "MAD"
ccDir = "ClustCoeff"
DegDir = "Degree"
EigDir = "Eigen/values"
SS1dir = "ScanStat1"
triDir = "Triangle"
invDirs = [triDir, ccDir, SS1dir, DegDir ]
if not os.path.exists(invDir):
print "%s does not exist" % invDir
sys.exit(1)
pl.figure(2)
fig_gl, axes = pl.subplots(nrows=3, ncols=2)
for idx, drcty in enumerate (invDirs):
for arrfn in glob(os.path.join(invDir, drcty,'*.npy')):
try:
arr = np.load(arrfn)
arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "[ERROR]: Line %s: Invariant file not found %s" % (lineno(),arrfn)
pl.figure(1)
n, bins, patches = pl.hist(arr, bins=numBins , range=None, normed=False, weights=None, cumulative=False, \
bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
rwidth=None, log=False, color=None, label=None, hold=None)
n = np.append(n,0)
n = n/float(sum(n))
fig = pl.figure(2)
fig.subplots_adjust(hspace=.5)
ax = pl.subplot(3,2,idx+1)
#if idx == 0:
# plt.axis([0, 35, 0, 0.04])
# ax.set_yticks(scipy.arange(0,0.04,0.01))
#if idx == 1 or idx == 2:
# ax.set_yticks(scipy.arange(0,0.03,0.01))
#if idx == 3:
# ax.set_yticks(scipy.arange(0,0.04,0.01))
# Interpolation
f = interpolate.interp1d(bins, n, kind='cubic')
x = np.arange(bins[0],bins[-1],0.03) # vary linspc
interp = f(x)
ltz = interp < 0
interp[ltz] = 0
plot_color = pickprintcolor(subj_types, arrfn)
#pl.plot(x, interp, color = plot_color, linewidth=1)
pl.plot(interp, color = plot_color, linewidth=1)
if idx == 0:
pl.ylabel('Probability')
pl.xlabel('Log Number of Local Triangles')
if idx == 1:
#pl.ylabel('Probability') #**
pl.xlabel('Log Local Clustering Coefficient')
if idx == 2:
pl.ylabel('Probability')
pl.xlabel('Log Scan Statistic 1')
if idx == 3:
#pl.ylabel('Probability') #**
pl.xlabel('Log Degree')
''' Eigenvalues '''
ax = pl.subplot(3,2,5)
ax.set_yticks(scipy.arange(0,16,4))
for eigValInstance in glob(os.path.join(invDir, EigDir,"*.npy")):
try:
eigv = np.load(eigValInstance)
except:
print "Eigenvalue array"
n = len(eigv)
sa = (np.sort(eigv)[::-1])
plot_color = pickprintcolor(subj_types, eigValInstance)
pl.plot(range(1,n+1), sa/10000, color=plot_color)
pl.ylabel('Magnitude ($X 10^4$) ')
pl.xlabel('Eigenvalue Rank')
''' Edges '''
arrfn = os.path.join(invDir, 'Globals/numEdges.npy')
try:
arr = np.load(arrfn)
arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "[ERROR]: Line %s: Invariant file not found %s" % (lineno(),arrfn)
pl.figure(1)
n, bins, patches = pl.hist(arr, bins=10 , range=None, normed=False, weights=None, cumulative=False, \
bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
rwidth=None, log=False, color=None, label=None, hold=None)
n = np.append(n,0)
fig = pl.figure(2)
ax = pl.subplot(3,2,6)
ax.set_xticks(scipy.arange(17.2,18.1,0.2))
f = interpolate.interp1d(bins, n, kind='cubic')
x = np.arange(bins[0],bins[-1],0.01) # vary linspc
interp = f(x)
ltz = interp < 0
interp[ltz] = 0
pl.plot(x, interp,color ='grey' ,linewidth=1)
pl.ylabel('Frequency')
pl.xlabel('Log Global Edge Number')
pl.savefig(pngName+'.pdf')
#################################################
##################################################
##################################################
def plotstdmean(invDir, pngName, numBins =100):
subj_types = csvtodict() # load up subject types
# ClustCoeff Degree Eigen MAD numEdges.npy ScanStat Triangle
MADdir = "MAD"
ccDir = "ClustCoeff"
DegDir = "Degree"
EigDir = "Eigen"
SS1dir = "ScanStat1"
triDir = "Triangle"
invDirs = [triDir, ccDir, SS1dir, DegDir ]
if not os.path.exists(invDir):
print "%s does not exist" % invDir
sys.exit(1)
pl.figure(2)
fig_gl, axes = pl.subplots(nrows=3, ncols=2)
fig_gl.tight_layout()
for idx, drcty in enumerate (invDirs):
mean_arr = []
stddev_arr = []
ones_mean = []
twos_mean = []
zeros_mean = []
ones_std = []
twos_std = []
zeros_std = []
for arrfn in glob(os.path.join(invDir, drcty,'*.npy')):
try:
arr = np.load(arrfn)
arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "[ERROR]: Line %s: Invariant file not found %s" % (lineno(),arrfn)
pl.figure(1)
n, bins, patches = pl.hist(arr, bins=numBins , range=None, normed=False, weights=None, cumulative=False, \
bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
rwidth=None, log=False, color=None, label=None, hold=None)
n = np.append(n,0)
n = n/float(sum(n))
fig = pl.figure(2)
fig.subplots_adjust(hspace=.5)
nrows=5
ncols=4
ax = pl.subplot(nrows,ncols,idx+1)
if idx == 0:
plt.axis([0, 35, 0, 0.04])
ax.set_yticks(scipy.arange(0,0.04,0.01))
if idx == 1 or idx == 2:
ax.set_yticks(scipy.arange(0,0.03,0.01))
if idx == 3:
ax.set_yticks(scipy.arange(0,0.04,0.01))
# Interpolation
f = interpolate.interp1d(bins, n, kind='cubic')
x = np.arange(bins[0],bins[-1],0.03) # vary linspc
interp = f(x)
ltz = interp < 0
interp[ltz] = 0
import pdb; pdb.set_trace()
'''
pl.plot(x, interp, color = plot_color, linewidth=1)
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '0'):
zeros_mean.append(arr.mean())
zeros_std.append(arr.std())
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '1'):
ones_mean.append(arr.mean())
ones_std.append(arr.std())
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '2'):
twos_mean.append(arr.mean())
twos_std.append(arr.std())
'''
plot_color = pickprintcolor(subj_types, arrfn)
if idx == 0:
pl.ylabel('Probability')
pl.xlabel('Log Number of Local Triangles')
if idx == 1:
#pl.ylabel('Probability') #**
pl.xlabel('Log Local Clustering Coefficient')
if idx == 2:
pl.ylabel('Probability')
pl.xlabel('Log Scan Statistic 1')
if idx == 3:
#pl.ylabel('Probability') #**
pl.xlabel('Log Degree')
''' Eigenvalues '''
ax = pl.subplot(3,2,5)
ax.set_yticks(scipy.arange(0,16,4))
for eigValInstance in glob(os.path.join(invDir, EigDir,"*.npy")):
try:
eigv = np.load(eigValInstance)
except:
print "Eigenvalue array"
n = len(eigv)
sa = (np.sort(eigv)[::-1])
plot_color = pickprintcolor(subj_types, eigValInstance)
pl.plot(range(1,n+1), sa/10000, color=plot_color)
pl.ylabel('Magnitude ($X 10^4$) ')
pl.xlabel('eigenvalue rank')
''' Edges '''
arrfn = os.path.join(invDir, 'Globals/numEdges.npy')
try:
arr = np.load(arrfn)
arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "[ERROR]: Line %s: Invariant file not found %s" % (lineno(),arrfn)
pl.figure(1)
n, bins, patches = pl.hist(arr, bins=10 , range=None, normed=False, weights=None, cumulative=False, \
bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
rwidth=None, log=False, color=None, label=None, hold=None)
n = np.append(n,0)
fig = pl.figure(2)
ax = pl.subplot(3,2,6)
ax.set_xticks(scipy.arange(17.2,18.1,0.2))
f = interpolate.interp1d(bins, n, kind='cubic')
x = np.arange(bins[0],bins[-1],0.01) # vary linspc
interp = f(x)
ltz = interp < 0
interp[ltz] = 0
pl.plot(x, interp,color ='grey' ,linewidth=1)
pl.ylabel('Frequency')
pl.xlabel('log global edge number')
pl.savefig(pngName+'.png')
##################################################
##################################################
##################################################
def OLDplotstdmean(invDir, pngName):
subj_types = csvtodict() # load up subject types
# ClustCoeff Degree Eigen MAD numEdges.npy ScanStat Triangle
ccDir = "ClustCoeff"
DegDir = "Degree"
EigDir = "Eigen"
SS1dir = "ScanStat1"
triDir = "Triangle"
invDirs = [triDir, ccDir, SS1dir, DegDir ]
#invDirs = []
if not os.path.exists(invDir):
print "%s does not exist" % invDir
sys.exit(1)
pl.figure(1)
nrows=4
ncols=2
fig_gl, axes = pl.subplots(nrows=nrows, ncols=ncols)
fig_gl.tight_layout()
for idx, drcty in enumerate (invDirs):
mean_arr = []
stddev_arr = []
ones_mean = []
twos_mean = []
zeros_mean = []
ones_std = []
twos_std = []
zeros_std = []
for arrfn in glob(os.path.join(invDir, drcty,'*.npy')):
try:
arr = np.load(arrfn)
mean_arr.append(arr.mean())
stddev_arr.append(arr.std())
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '0'):
zeros_mean.append(arr.mean())
zeros_std.append(arr.std())
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '1'):
ones_mean.append(arr.mean())
ones_std.append(arr.std())
if ( subj_types[arrfn.split('/')[-1].split('_')[0]] == '2'):
twos_mean.append(arr.mean())
twos_std.append(arr.std())
#mean_arr.append(np.log(arr.mean()))
#stddev_arr.append(np.log(arr.std()))
#arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "[ERROR]: Line %s: Invariant file not found %s" % (lineno(),arrfn)
mean_arr = np.array(mean_arr)
stddev_arr = np.array(stddev_arr)
ax = pl.subplot(nrows,ncols,(idx*ncols)+1)
ax.set_yticks(scipy.arange(0,1,.25))
pl.gcf().subplots_adjust(bottom=0.07)
'''
if idx == 0:
plt.axis([0, 35, 0, 0.04])
ax.set_yticks(scipy.arange(0,0.04,0.01))
if idx == 1 or idx == 2:
ax.set_yticks(scipy.arange(0,0.03,0.01))
if idx == 3:
ax.set_yticks(scipy.arange(0,0.04,0.01))
'''
# Interpolation
#f = interpolate.interp1d(bins, n, kind='cubic')
#x = np.arange(bins[0],bins[-1],0.03) # vary linspc
#interp = f(x)
#ltz = interp < 0
#interp[ltz] = 0
#plot_color = pickprintcolor(subj_types, arrfn)
#pl.plot(x, interp, color = plot_color, linewidth=1)
#pl.plot(mean_arr/float(mean_arr.max()), color = "black", linewidth=1)
if (idx*ncols)+1 == 1:
pl.ylabel('')
pl.xlabel('Norm. Local Triangle Count Mean')
if (idx*ncols)+1 == 3:
#pl.ylabel('Probability') #**
pl.xlabel('Norm. Local Clustering Coefficient Mean')
if (idx*ncols)+1 == 5:
pl.ylabel('Normalized Magnitude Scale')
pl.xlabel('Norm. Scan Statistic 1 Mean')
if (idx*ncols)+1 == 7:
#pl.ylabel('Probability') #**
pl.xlabel('Norm. Local Degree Mean')
pl.plot(zeros_mean, color = 'grey' , linewidth=1)
pl.plot(ones_mean, color = 'blue', linewidth=1)
pl.plot(twos_mean, color = 'green', linewidth=1)
ax = pl.subplot(nrows,ncols,(idx*ncols)+2)
ax.set_yticks(scipy.arange(0,1,.25))
pl.gcf().subplots_adjust(bottom=0.07)
stddev_arr = np.array(stddev_arr)
#pl.plot(stddev_arr/float(stddev_arr.max()), color = "black", linewidth=1)
if (idx*ncols)+2 == 2:
pl.ylabel('')
pl.xlabel('Norm. Local Triangle Count Std Dev')
if (idx*ncols)+2 == 4:
#pl.ylabel('Probability') #**
pl.xlabel('Norm. Local Clustering Coefficient Std Dev')
if (idx*ncols)+2 == 6:
#pl.ylabel('Probability')
pl.xlabel('Norm. Scan Statistic 1 Std Dev')
if (idx*ncols)+2 == 8:
#pl.ylabel('Probability') #**
pl.xlabel('Norm. Local Degree Std Dev')
pl.plot(zeros_std, color = 'grey' , linewidth=1)
pl.plot(ones_std, color = 'blue', linewidth=1)
pl.plot(twos_std, color = 'green', linewidth=1)
pl.savefig(pngName+'.png')
def main():
parser = argparse.ArgumentParser(description='Plot distribution of invariant arrays of several graphs')
parser.add_argument('invDir', action='store',help='The full path of directory containing .npy invariant arrays')
parser.add_argument('pngName', action='store', help='Full path of directory of resulting png file')
parser.add_argument('numBins', type = int, action='store', help='Number of bins')
result = parser.parse_args()
plotInvDist(result.invDir, result.pngName, result.numBins)
#plotstdmean(result.invDir, result.pngName)
if __name__ == '__main__':
main()
#csvtodict(sys.argv[1]) | apache-2.0 |
arush0311/scrapy | scrapy/mail.py | 12 | 4745 | """
Mail sending helpers
See documentation in docs/topics/email.rst
"""
import logging
from six.moves import cStringIO as StringIO
import six
from email.utils import COMMASPACE, formatdate
from six.moves.email_mime_multipart import MIMEMultipart
from six.moves.email_mime_text import MIMEText
from six.moves.email_mime_base import MIMEBase
if six.PY2:
from email.MIMENonMultipart import MIMENonMultipart
from email import Encoders
else:
from email.mime.nonmultipart import MIMENonMultipart
from email import encoders as Encoders
from twisted.internet import defer, reactor, ssl
from .utils.misc import arg_to_iter
logger = logging.getLogger(__name__)
class MailSender(object):
def __init__(self, smtphost='localhost', mailfrom='scrapy@localhost',
smtpuser=None, smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False):
self.smtphost = smtphost
self.smtpport = smtpport
self.smtpuser = smtpuser
self.smtppass = smtppass
self.smtptls = smtptls
self.smtpssl = smtpssl
self.mailfrom = mailfrom
self.debug = debug
@classmethod
def from_settings(cls, settings):
return cls(settings['MAIL_HOST'], settings['MAIL_FROM'], settings['MAIL_USER'],
settings['MAIL_PASS'], settings.getint('MAIL_PORT'),
settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))
def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', charset=None, _callback=None):
if attachs:
msg = MIMEMultipart()
else:
msg = MIMENonMultipart(*mimetype.split('/', 1))
to = list(arg_to_iter(to))
cc = list(arg_to_iter(cc))
msg['From'] = self.mailfrom
msg['To'] = COMMASPACE.join(to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
rcpts = to[:]
if cc:
rcpts.extend(cc)
msg['Cc'] = COMMASPACE.join(cc)
if charset:
msg.set_charset(charset)
if attachs:
msg.attach(MIMEText(body, 'plain', charset or 'us-ascii'))
for attach_name, mimetype, f in attachs:
part = MIMEBase(*mimetype.split('/'))
part.set_payload(f.read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' \
% attach_name)
msg.attach(part)
else:
msg.set_payload(body)
if _callback:
_callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
if self.debug:
logger.debug('Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': len(attachs)})
return
dfd = self._sendmail(rcpts, msg.as_string())
dfd.addCallbacks(self._sent_ok, self._sent_failed,
callbackArgs=[to, cc, subject, len(attachs)],
errbackArgs=[to, cc, subject, len(attachs)])
reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)
return dfd
def _sent_ok(self, result, to, cc, subject, nattachs):
logger.info('Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs})
def _sent_failed(self, failure, to, cc, subject, nattachs):
errstr = str(failure.value)
logger.error('Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d'
'- %(mailerr)s',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs, 'mailerr': errstr})
def _sendmail(self, to_addrs, msg):
# Import twisted.mail here because it is not available in python3
from twisted.mail.smtp import ESMTPSenderFactory
msg = StringIO(msg)
d = defer.Deferred()
factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \
to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \
requireTransportSecurity=self.smtptls)
factory.noisy = False
if self.smtpssl:
reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())
else:
reactor.connectTCP(self.smtphost, self.smtpport, factory)
return d
| bsd-3-clause |
kaplun/inspire-next | inspirehep/modules/literaturesuggest/__init__.py | 1 | 1056 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""INSPIRE authors."""
from __future__ import absolute_import, division, print_function
from .ext import INSPIRELiteratureSuggestion # noqa: F401
| gpl-3.0 |
stinos/micropython | tests/extmod/uasyncio_lock_cancel.py | 16 | 1373 | # Test that locks work when cancelling multiple waiters on the lock
try:
import uasyncio as asyncio
except ImportError:
try:
import asyncio
except ImportError:
print("SKIP")
raise SystemExit
async def task(i, lock, lock_flag):
print("task", i, "start")
try:
await lock.acquire()
except asyncio.CancelledError:
print("task", i, "cancel")
return
print("task", i, "lock_flag", lock_flag[0])
lock_flag[0] = True
await asyncio.sleep(0)
lock.release()
lock_flag[0] = False
print("task", i, "done")
async def main():
# Create a lock and acquire it so the tasks below must wait
lock = asyncio.Lock()
await lock.acquire()
lock_flag = [True]
# Create 4 tasks and let them all run
t0 = asyncio.create_task(task(0, lock, lock_flag))
t1 = asyncio.create_task(task(1, lock, lock_flag))
t2 = asyncio.create_task(task(2, lock, lock_flag))
t3 = asyncio.create_task(task(3, lock, lock_flag))
await asyncio.sleep(0)
# Cancel 2 of the tasks (which are waiting on the lock) and release the lock
t1.cancel()
t2.cancel()
lock.release()
lock_flag[0] = False
# Let the tasks run to completion
for _ in range(4):
await asyncio.sleep(0)
# The locke should be unlocked
print(lock.locked())
asyncio.run(main())
| mit |
travisjwarren/train_track | train_track/tests/apps/event/test_event_delete_attendee_views.py | 1 | 2680 | __author__ = 'traviswarren'
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from train_track.apps.profile.models import UserProfileEvent
from train_track.tests.model_factory import UserProfileEventFactory
class EventGetDeleteViewTestCases(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username='user',
email='[email protected]',
password='password')
self.staff = User.objects.create_superuser(
username='staff',
email='[email protected]',
password='password')
def test_get_profile_user_event_delete_is_not_public(self):
user_profile_event = UserProfileEventFactory()
response = self.client.get(reverse('event:event-attendee-delete', kwargs={'pk': user_profile_event.id}))
self.assertContains(response, 'Access Denied', status_code=403)
def test_get_profile_user_event_delete_is_not_user_accessible(self):
user_profile_event = UserProfileEventFactory()
self.assertTrue(self.client.login(username=self.user.username, password='password'))
response = self.client.get(reverse('event:event-attendee-delete', kwargs={'pk': user_profile_event.id}))
self.assertContains(response, 'Access Denied', status_code=403)
def test_get_profile_user_delete_event_attendee_is_staff_only(self):
user_profile_event = UserProfileEventFactory()
self.assertTrue(self.client.login(username=self.staff.username, password='password'))
response = self.client.get(reverse('event:event-attendee-delete', kwargs={'pk': user_profile_event.id}))
self.assertTemplateUsed(response, 'profile/userprofileevent_confirm_delete.html')
class EventPostDeleteViewTestCases(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username='user',
email='[email protected]',
password='password')
self.staff = User.objects.create_superuser(
username='staff',
email='[email protected]',
password='password')
def test_delete_event_attendee_is_staff_only(self):
user_profile_event = UserProfileEventFactory()
self.assertTrue(self.client.login(username=self.staff.username, password='password'))
response = self.client.delete(reverse('event:event-attendee-delete', kwargs={'pk': user_profile_event.id}))
self.failIf(UserProfileEvent.objects.filter(id=user_profile_event.id).exists())
self.assertRedirects(response, 'http://testserver/events/{id}/'.format(id=user_profile_event.event.id)) | gpl-3.0 |
reinout/django | tests/migrations/test_loader.py | 26 | 19137 | from django.db import connection, connections
from django.db.migrations.exceptions import (
AmbiguityError, InconsistentMigrationHistory, NodeNotFoundError,
)
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, modify_settings, override_settings
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
multi_db = True
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
{("myapp", "0432_ponies")},
)
# That should not affect records of another database
recorder_other = MigrationRecorder(connections['other'])
self.assertEqual(
{(x, y) for (x, y) in recorder_other.applied_migrations() if x == "myapp"},
set(),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
@modify_settings(INSTALLED_APPS={'append': 'basic'})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new Apps.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0002_second"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
[x for x, y in author_state.fields],
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "author"]
)
# Ensure we've included unmigrated apps in there too
self.assertIn("basic", project_state.real_apps)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"})
def test_load_unmigrated_dependency(self):
"""
Makes sure the loader can load migrations with a dependency on an unmigrated app.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0001_initial")),
[
('contenttypes', '0001_initial'),
('auth', '0001_initial'),
("migrations", "0001_initial"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0001_initial"))
self.assertEqual(len([m for a, m in project_state.models if a == "migrations"]), 1)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "user"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_run_before(self):
"""
Makes sure the loader uses Migration.run_before.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0003_third"),
("migrations", "0002_second"),
],
)
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations_first",
"migrations2": "migrations2.test_migrations_2_first",
})
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
def test_first(self):
"""
Makes sure the '__first__' migrations build correctly.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "second")),
[
("migrations", "thefirst"),
("migrations2", "0001_initial"),
("migrations2", "0002_second"),
("migrations", "second"),
],
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
with self.assertRaises(AmbiguityError):
migration_loader.get_migration_by_prefix("migrations", "0")
with self.assertRaises(KeyError):
migration_loader.get_migration_by_prefix("migrations", "blarg")
def test_load_import_error(self):
with override_settings(MIGRATION_MODULES={"migrations": "import_error_package"}):
with self.assertRaises(ImportError):
MigrationLoader(connection)
def test_load_module_file(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App with migrations module file not in unmigrated apps."
)
def test_load_empty_dir(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App missing __init__.py in migrations module not in unmigrated apps."
)
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
)
def test_marked_as_migrated(self):
"""
Undefined MIGRATION_MODULES implies default migration module.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(migration_loader.migrated_apps, {'migrated_app'})
self.assertEqual(migration_loader.unmigrated_apps, set())
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
MIGRATION_MODULES={"migrated_app": None},
)
def test_marked_as_unmigrated(self):
"""
MIGRATION_MODULES allows disabling of migrations for a particular app.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(migration_loader.migrated_apps, set())
self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'})
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
MIGRATION_MODULES={'migrated_app': 'missing-module'},
)
def test_explicit_missing_module(self):
"""
If a MIGRATION_MODULES override points to a missing module, the error
raised during the importation attempt should be propagated unless
`ignore_no_migrations=True`.
"""
with self.assertRaisesMessage(ImportError, 'missing-module'):
migration_loader = MigrationLoader(connection)
migration_loader = MigrationLoader(connection, ignore_no_migrations=True)
self.assertEqual(migration_loader.migrated_apps, set())
self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'})
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_loading_squashed(self):
"Tests loading a squashed migration"
migration_loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
# Loading with nothing applied should just give us the one node
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
1,
)
# However, fake-apply one migration and it should now use the old two
recorder.record_applied("migrations", "0001_initial")
migration_loader.build_graph()
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
2,
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_loading_squashed_complex(self):
"Tests loading a complex set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 to 5 cannot use the squashed migration
recorder.record_applied("migrations", "3_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "4_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps(self):
loader = MigrationLoader(connection)
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
expected_plan = {
('app1', '1_auto'),
('app2', '1_squashed_2'),
('app1', '2_squashed_3'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps_partially_applied(self):
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
recorder.record_applied('app1', '1_auto')
recorder.record_applied('app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '1_squashed_2'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_erroneous"})
def test_loading_squashed_erroneous(self):
"Tests loading a complex but erroneous set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations)
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
recorder.record_applied("migrations", "1_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 4)
recorder.record_applied("migrations", "2_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 or 4, nonexistent migrations would be needed.
msg = ("Migration migrations.6_auto depends on nonexistent node ('migrations', '5_auto'). "
"Django tried to replace migration migrations.5_auto with any of "
"[migrations.3_squashed_5] but wasn't able to because some of the replaced "
"migrations are already applied.")
recorder.record_applied("migrations", "3_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
recorder.record_applied("migrations", "4_auto")
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
# Starting at 5 to 7 we are passed the squashed migrations
recorder.record_applied("migrations", "5_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 2)
recorder.record_applied("migrations", "6_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 1)
recorder.record_applied("migrations", "7_auto")
loader.build_graph()
self.assertEqual(num_nodes(), 0)
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations'},
INSTALLED_APPS=['migrations'],
)
def test_check_consistent_history(self):
loader = MigrationLoader(connection=None)
loader.check_consistent_history(connection)
recorder = MigrationRecorder(connection)
recorder.record_applied('migrations', '0002_second')
msg = (
"Migration migrations.0002_second is applied before its dependency "
"migrations.0001_initial on database 'default'."
)
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
loader.check_consistent_history(connection)
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed_extra'},
INSTALLED_APPS=['migrations'],
)
def test_check_consistent_history_squashed(self):
"""
MigrationLoader.check_consistent_history() should ignore unapplied
squashed migrations that have all of their `replaces` applied.
"""
loader = MigrationLoader(connection=None)
recorder = MigrationRecorder(connection)
recorder.record_applied('migrations', '0001_initial')
recorder.record_applied('migrations', '0002_second')
loader.check_consistent_history(connection)
recorder.record_applied('migrations', '0003_third')
loader.check_consistent_history(connection)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_ref_squashed.app1",
"app2": "migrations.test_migrations_squashed_ref_squashed.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_ref_squashed.app1",
"migrations.test_migrations_squashed_ref_squashed.app2",
]})
def test_loading_squashed_ref_squashed(self):
"Tests loading a squashed migration with a new migration referencing it"
r"""
The sample migrations are structured like this:
app_1 1 --> 2 ---------------------*--> 3 *--> 4
\ / /
*-------------------*----/--> 2_sq_3 --*
\ / /
=============== \ ============= / == / ======================
app_2 *--> 1_sq_2 --* /
\ /
*--> 1 --> 2 --*
Where 2_sq_3 is a replacing migration for 2 and 3 in app_1,
as 1_sq_2 is a replacing migration for 1 and 2 in app_2.
"""
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
# Load with nothing applied: both migrations squashed.
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app1', '1_auto'),
('app2', '1_squashed_2'),
('app1', '2_squashed_3'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
# Fake-apply a few from app1: unsquashes migration in app1.
recorder.record_applied('app1', '1_auto')
recorder.record_applied('app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '1_squashed_2'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
# Fake-apply one from app2: unsquashes migration in app2 too.
recorder.record_applied('app2', '1_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations
expected_plan = {
('app2', '2_auto'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
| bsd-3-clause |
kmoocdev/edx-platform | docs/shared/conf.py | 158 | 10580 | # -*- coding: utf-8 -*-
#
# getting_started documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 16 11:19:12 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# -----------------------------------------------------------------------------
# Common config
#
# This file is imported by the different project conf.py files (in
# course_authors/, data/, and developers/). It includes configuration options
# common to all three.
#
# -----------------------------------------------------------------------------
import os
BASEDIR = os.path.dirname(os.path.abspath(__file__))
def add_base(paths):
"""
Returns a list of paths relative to BASEDIR.
paths: a list of paths
"""
return [os.path.join(BASEDIR, x) for x in paths]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = add_base(['_templates'])
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'edX'
copyright = u'2013, EdX Doc Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<Studio> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = add_base(['_static'])
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'edxdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
'index',
'getting_started.tex',
u'edX Studio Documentation',
u'EdX Doc Team',
'manual',
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'getting_started', u'getting_started Documentation',
[u'EdX Doc Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
'index',
'getting_started',
u'getting_started Documentation',
u'EdX Doc Team',
'getting_started',
'One line description of project.',
'Miscellaneous',
),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'getting_started'
epub_author = u'EdX Doc Team'
epub_publisher = u'EdX Doc Team'
epub_copyright = u'2013, EdX Doc Team'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# If 'no', URL addresses will not be shown.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| agpl-3.0 |
longman694/youtube-dl | youtube_dl/extractor/vube.py | 64 | 6933 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
int_or_none,
ExtractorError,
)
class VubeIE(InfoExtractor):
IE_NAME = 'vube'
IE_DESC = 'Vube.com'
_VALID_URL = r'https?://vube\.com/(?:[^/]+/)+(?P<id>[\da-zA-Z]{10})\b'
_TESTS = [
{
'url': 'http://vube.com/trending/William+Wei/Y8NUZ69Tf7?t=s',
'md5': 'e7aabe1f8f1aa826b9e4735e1f9cee42',
'info_dict': {
'id': 'Y8NUZ69Tf7',
'ext': 'mp4',
'title': 'Best Drummer Ever [HD]',
'description': 'md5:2d63c4b277b85c2277761c2cf7337d71',
'thumbnail': r're:^https?://.*\.jpg',
'uploader': 'William',
'timestamp': 1406876915,
'upload_date': '20140801',
'duration': 258.051,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['amazing', 'hd', 'best drummer ever', 'william wei', 'bucket drumming', 'street drummer', 'epic street drumming'],
},
'skip': 'Not accessible from Travis CI server',
}, {
'url': 'http://vube.com/Chiara+Grispo+Video+Channel/YL2qNPkqon',
'md5': 'db7aba89d4603dadd627e9d1973946fe',
'info_dict': {
'id': 'YL2qNPkqon',
'ext': 'mp4',
'title': 'Chiara Grispo - Price Tag by Jessie J',
'description': 'md5:8ea652a1f36818352428cb5134933313',
'thumbnail': r're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102e7e63057-5ebc-4f5c-4065-6ce4ebde131f\.jpg$',
'uploader': 'Chiara.Grispo',
'timestamp': 1388743358,
'upload_date': '20140103',
'duration': 170.56,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['pop', 'music', 'cover', 'singing', 'jessie j', 'price tag', 'chiara grispo'],
},
'skip': 'Removed due to DMCA',
},
{
'url': 'http://vube.com/SerainaMusic/my-7-year-old-sister-and-i-singing-alive-by-krewella/UeBhTudbfS?t=s&n=1',
'md5': '5d4a52492d76f72712117ce6b0d98d08',
'info_dict': {
'id': 'UeBhTudbfS',
'ext': 'mp4',
'title': 'My 7 year old Sister and I singing "Alive" by Krewella',
'description': 'md5:40bcacb97796339f1690642c21d56f4a',
'thumbnail': r're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/102265d5a9f-0f17-4f6b-5753-adf08484ee1e\.jpg$',
'uploader': 'Seraina',
'timestamp': 1396492438,
'upload_date': '20140403',
'duration': 240.107,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['seraina', 'jessica', 'krewella', 'alive'],
},
'skip': 'Removed due to DMCA',
}, {
'url': 'http://vube.com/vote/Siren+Gene/0nmsMY5vEq?n=2&t=s',
'md5': '0584fc13b50f887127d9d1007589d27f',
'info_dict': {
'id': '0nmsMY5vEq',
'ext': 'mp4',
'title': 'Frozen - Let It Go Cover by Siren Gene',
'description': 'My rendition of "Let It Go" originally sung by Idina Menzel.',
'thumbnail': r're:^http://frame\.thestaticvube\.com/snap/[0-9x]+/10283ab622a-86c9-4681-51f2-30d1f65774af\.jpg$',
'uploader': 'Siren',
'timestamp': 1395448018,
'upload_date': '20140322',
'duration': 221.788,
'like_count': int,
'dislike_count': int,
'comment_count': int,
'categories': ['let it go', 'cover', 'idina menzel', 'frozen', 'singing', 'disney', 'siren gene'],
},
'skip': 'Removed due to DMCA',
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video = self._download_json(
'http://vube.com/t-api/v1/video/%s' % video_id, video_id, 'Downloading video JSON')
public_id = video['public_id']
formats = []
for media in video['media'].get('video', []) + video['media'].get('audio', []):
if media['transcoding_status'] != 'processed':
continue
fmt = {
'url': 'http://video.thestaticvube.com/video/%s/%s.mp4' % (media['media_resolution_id'], public_id),
'abr': int(media['audio_bitrate']),
'format_id': compat_str(media['media_resolution_id']),
}
vbr = int(media['video_bitrate'])
if vbr:
fmt.update({
'vbr': vbr,
'height': int(media['height']),
})
formats.append(fmt)
self._sort_formats(formats)
if not formats and video.get('vst') == 'dmca':
raise ExtractorError(
'This video has been removed in response to a complaint received under the US Digital Millennium Copyright Act.',
expected=True)
title = video['title']
description = video.get('description')
thumbnail = self._proto_relative_url(video.get('thumbnail_src'), scheme='http:')
uploader = video.get('user_alias') or video.get('channel')
timestamp = int_or_none(video.get('upload_time'))
duration = video['duration']
view_count = video.get('raw_view_count')
like_count = video.get('total_likes')
dislike_count = video.get('total_hates')
comments = video.get('comments')
comment_count = None
if comments is None:
comment_data = self._download_json(
'http://vube.com/api/video/%s/comment' % video_id,
video_id, 'Downloading video comment JSON', fatal=False)
if comment_data is not None:
comment_count = int_or_none(comment_data.get('total'))
else:
comment_count = len(comments)
categories = [tag['text'] for tag in video['tags']]
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'timestamp': timestamp,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
}
| unlicense |
kleins11/intdatasci-byte2 | jmankoff-mobile/lib/werkzeug/__init__.py | 55 | 6917 | # -*- coding: utf-8 -*-
"""
werkzeug
~~~~~~~~
Werkzeug is the Swiss Army knife of Python web development.
It provides useful classes and functions for any WSGI application to make
the life of a python web developer much easier. All of the provided
classes are independent from each other so you can mix it with any other
library.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from types import ModuleType
import sys
from werkzeug._compat import iteritems
# the version. Usually set automatically by a script.
__version__ = '0.11'
# This import magic raises concerns quite often which is why the implementation
# and motivation is explained here in detail now.
#
# The majority of the functions and classes provided by Werkzeug work on the
# HTTP and WSGI layer. There is no useful grouping for those which is why
# they are all importable from "werkzeug" instead of the modules where they are
# implemented. The downside of that is, that now everything would be loaded at
# once, even if unused.
#
# The implementation of a lazy-loading module in this file replaces the
# werkzeug package when imported from within. Attribute access to the werkzeug
# module will then lazily import from the modules that implement the objects.
# import mapping to objects in other modules
all_by_module = {
'werkzeug.debug': ['DebuggedApplication'],
'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy', 'LocalStack',
'release_local'],
'werkzeug.serving': ['run_simple'],
'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ',
'run_wsgi_app'],
'werkzeug.testapp': ['test_app'],
'werkzeug.exceptions': ['abort', 'Aborter'],
'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote',
'url_quote_plus', 'url_unquote', 'url_unquote_plus',
'url_fix', 'Href', 'iri_to_uri', 'uri_to_iri'],
'werkzeug.formparser': ['parse_form_data'],
'werkzeug.utils': ['escape', 'environ_property', 'append_slash_redirect',
'redirect', 'cached_property', 'import_string',
'dump_cookie', 'parse_cookie', 'unescape',
'format_string', 'find_modules', 'header_property',
'html', 'xhtml', 'HTMLBuilder', 'validate_arguments',
'ArgumentValidationError', 'bind_arguments',
'secure_filename'],
'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info',
'peek_path_info', 'SharedDataMiddleware',
'DispatcherMiddleware', 'ClosingIterator', 'FileWrapper',
'make_line_iter', 'LimitedStream', 'responder',
'wrap_file', 'extract_path_info'],
'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
'EnvironHeaders', 'ImmutableList',
'ImmutableDict', 'ImmutableMultiDict',
'TypeConversionDict',
'ImmutableTypeConversionDict', 'Accept',
'MIMEAccept', 'CharsetAccept',
'LanguageAccept', 'RequestCacheControl',
'ResponseCacheControl', 'ETags', 'HeaderSet',
'WWWAuthenticate', 'Authorization',
'FileMultiDict', 'CallbackDict', 'FileStorage',
'OrderedMultiDict', 'ImmutableOrderedMultiDict'
],
'werkzeug.useragents': ['UserAgent'],
'werkzeug.http': ['parse_etags', 'parse_date', 'http_date', 'cookie_date',
'parse_cache_control_header', 'is_resource_modified',
'parse_accept_header', 'parse_set_header', 'quote_etag',
'unquote_etag', 'generate_etag', 'dump_header',
'parse_list_header', 'parse_dict_header',
'parse_authorization_header',
'parse_www_authenticate_header', 'remove_entity_headers',
'is_entity_header', 'remove_hop_by_hop_headers',
'parse_options_header', 'dump_options_header',
'is_hop_by_hop_header', 'unquote_header_value',
'quote_header_value', 'HTTP_STATUS_CODES'],
'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request', 'Response',
'AcceptMixin', 'ETagRequestMixin',
'ETagResponseMixin', 'ResponseStreamMixin',
'CommonResponseDescriptorsMixin', 'UserAgentMixin',
'AuthorizationMixin', 'WWWAuthenticateMixin',
'CommonRequestDescriptorsMixin'],
'werkzeug.security': ['generate_password_hash', 'check_password_hash'],
# the undocumented easteregg ;-)
'werkzeug._internal': ['_easteregg']
}
# modules that should be imported when accessed as attributes of werkzeug
attribute_modules = frozenset(['exceptions', 'routing', 'script'])
object_origins = {}
for module, items in iteritems(all_by_module):
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Automatically import objects from the modules."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
elif name in attribute_modules:
__import__('werkzeug.' + name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
"""Just show what we want to show."""
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__',
'__package__', '__version__'))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules['werkzeug']
# setup the new module and patch it into the dict of loaded modules
new_module = sys.modules['werkzeug'] = module('werkzeug')
new_module.__dict__.update({
'__file__': __file__,
'__package__': 'werkzeug',
'__path__': __path__,
'__doc__': __doc__,
'__version__': __version__,
'__all__': tuple(object_origins) + tuple(attribute_modules),
'__docformat__': 'restructuredtext en'
})
# Due to bootstrapping issues we need to import exceptions here.
# Don't ask :-(
__import__('werkzeug.exceptions')
| apache-2.0 |
goliate/sarakha63-persomov | couchpotato/core/media/_base/providers/nzb/binnewz/nzbdownloader.py | 7 | 2796 | import urllib2
from StringIO import StringIO
import gzip
import cookielib
import time
class NZBDownloader(object):
def __init__( self ):
self.cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
self.lastRequestTime = None
def waitBeforeNextRequest(self):
if self.lastRequestTime and self.lastRequestTime > ( time.mktime(time.localtime()) - 10):
time.sleep( 10 )
self.lastRequestTime = time.gmtime()
def open(self, request):
self.waitBeforeNextRequest()
return self.opener.open(request)
class NZBSearchResult(object):
def __init__(self, downloader, sizeInMegs, refererURL, age, nzbid):
self.downloader = downloader
self.refererURL = refererURL
self.sizeInMegs = sizeInMegs
self.age = age
self.nzbid = nzbid
def readRequest(self, request):
request.add_header('Accept-encoding', 'gzip')
request.add_header('Referer', self.refererURL)
request.add_header('Accept-Encoding', 'gzip')
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.57 Safari/537.17')
response = self.downloader.open(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
return f.read()
else:
return response.read()
def getNZB(self):
pass
class NZBGetURLSearchResult( NZBSearchResult ):
def __init__(self, downloader, nzburl, sizeInMegs, refererURL, age, nzbid):
NZBSearchResult.__init__(self, downloader, sizeInMegs, refererURL, age, nzbid)
self.nzburl = nzburl
def getNZB(self):
request = urllib2.Request( self.nzburl )
self.nzbdata = NZBSearchResult.readRequest( self, request )
return self.nzbdata
class NZBPostURLSearchResult( NZBSearchResult ):
def __init__(self, downloader, nzburl, postData, sizeInMegs, refererURL, age, nzbid):
NZBSearchResult.__init__(self, downloader, sizeInMegs, refererURL, age, nzbid)
self.nzburl = nzburl
self.postData = postData
def getNZB(self):
request = urllib2.Request( self.nzburl, self.postData )
self.nzbdata = NZBSearchResult.readRequest( self, request )
return self.nzbdata
class NZBDataSearchResult( NZBSearchResult ):
def __init__(self, nzbdata, sizeInMegs, refererURL, age, nzbid):
NZBSearchResult.__init__(self, None, refererURL, age, nzbid)
self.nzbdata = nzbdata
def getNZB(self):
return self.nzbdata
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.