repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
ModelDriven/SIMF
refs/heads/master
Scripts/MOFize.py
1
# # Python script (Macro) to be run inside of Magicdraw to MOFize a conceptual model. # - camelCase all names # - removed un-named associations (SIMF Restructons) # - add nested package URIs # - Replace term "Conceptual" with "MOF" # # Changes are logged as it modified the model in place. # # Put in a Magicdraw python macro. # # from java.io import File from java.lang import Integer from java.lang import System from com.nomagic.magicdraw.core import Application from com.nomagic.magicdraw.core import Project from com.nomagic.magicdraw.core.project import ProjectsManager from com.nomagic.magicdraw.core.project import ProjectDescriptorsFactory from com.nomagic.magicdraw.uml import BaseElement from com.nomagic.uml2.ext.jmi.helpers import StereotypesHelper import sys import string def log(something): Application.getInstance().getGUILog().log(str(something)) #import com.nomagic.uml2 #import com.nomagic.magicdraw.openapi.uml import com.nomagic.magicdraw.openapi.uml #global array deleteme = [] def doUML(uml, level): name = None fixedname = "" spacer = " "; legal = string.letters+string.digits log("In doUML level "+str(level)) try : name = uml.getName() for i in range(level) : spacer = spacer + ".. " if (name and name>"") : log(spacer+name) except : # End of thing with name return 0 # don't do anything for unnamed element #log ("Check association1 "+str(level)) log (uml) # Remove non MOF element - unnammed association # if (isinstance(uml, com.nomagic.uml2.ext.magicdraw.classes.mdkernel.impl.AssociationImpl)) : <<doesn't work << #log ("Check association2 "+str(level)) if (not(name) or name==""): try : # Association without name dontcare = uml.isDerived() # Odd way to check if association - only remove unnamed associations log(spacer+uml.getOwningPackage().getName()+" --------remove un-named association-----------") deleteme.append(uml) return 2 except : dontcare = True try : #Property without name - only needed for association ends aname = "" try : aname = uml.eContainer().getName() except : aname = "null" #DOn't know how this would happen aname = "_unnamed_"+aname log(aname+"-----------------Add property name------------- ") uml.setName(aname) except : return 1 #check for instance specification try : dontcare = uml.hasSlot() #check if instance spec log(spacer+uml.getOwningPackage().getName()+" --------remove instance specification-----------") deleteme.append(uml) except : dontcare = True log ("Make MOF Name "+str(level)) # Make MOF name if (name and name>"") : squished = False name2 = name.replace("Conceptual", "MOF") name2 = name2.replace("conceptual", "MOF") for c in name2 : #if (c==" "): if (legal.find(c)<0) : squished= True ## Suppress non name characters else : if squished : c = c.capitalize() # Do camel case fixedname += c squished = False if (name!=fixedname) : log(spacer+fixedname+ " --------- Made Camel Case ---------") uml.setName(fixedname) # set the new value log ("Propigate URI "+str(level)) # Propigate nested URI try : uri = uml.getURI() if ((not uri) or uri=="") : # Get URI from owner owneruri = uml.getOwningPackage().getURI() if ((owneruri) and owneruri>"") : newuri = owneruri if (owneruri[-1:]!="/") : newuri += "/" newuri += fixedname log(spacer+newuri+ "--------- Appended URI ---------") uml.setURI(newuri) # Set the new value else : #Change URI to conceptual newuri = uri.replace("Conceptual", "MOF") newuri = newuri.replace("conceptual", "MOF") if (uri!=newuri) : uml.setURI(newuri) except : #Only for things that have a URI uri = "" ## No URI #iterate thru the model if (uml.hasOwnedElement()): # Visit all children for e in uml.getOwnedElement().iterator(): if e.isEditable() : ## Only visit placed we can edit doUML(e, level+1) # Visit editable elements return 1 project = Application.getInstance().getProject() model = project.getModel() log("MOFize Model="+model.getName()) com.nomagic.magicdraw.openapi.uml.SessionManager.getInstance().createSession("MOFize model") #log("-------- created session-----------") try : doUML(model, 0) # delete things that need deleting at end, otherwise iterators are messed up man = com.nomagic.magicdraw.openapi.uml.ModelElementsManager.getInstance() for uml in deleteme : log("Remove "+str(uml)) man.removeElement(uml) finally : com.nomagic.magicdraw.openapi.uml.SessionManager.getInstance().closeSession() #log("-------- closes session-----------")
gauravjns/taiga-back
refs/heads/master
tests/integration/resources_permissions/test_userstories_custom_attributes_resource.py
20
# Copyright (C) 2015 Andrey Antukh <[email protected]> # Copyright (C) 2015 Jesús Espino <[email protected]> # Copyright (C) 2015 David Barragán <[email protected]> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.core.urlresolvers import reverse from taiga.base.utils import json from taiga.projects.custom_attributes import serializers from taiga.permissions.permissions import (MEMBERS_PERMISSIONS, ANON_PERMISSIONS, USER_PERMISSIONS) from tests import factories as f from tests.utils import helper_test_http_method import pytest pytestmark = pytest.mark.django_db @pytest.fixture def data(): m = type("Models", (object,), {}) m.registered_user = f.UserFactory.create() m.project_member_with_perms = f.UserFactory.create() m.project_member_without_perms = f.UserFactory.create() m.project_owner = f.UserFactory.create() m.other_user = f.UserFactory.create() m.superuser = f.UserFactory.create(is_superuser=True) m.public_project = f.ProjectFactory(is_private=False, anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)), public_permissions=list(map(lambda x: x[0], USER_PERMISSIONS)), owner=m.project_owner) m.private_project1 = f.ProjectFactory(is_private=True, anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)), public_permissions=list(map(lambda x: x[0], USER_PERMISSIONS)), owner=m.project_owner) m.private_project2 = f.ProjectFactory(is_private=True, anon_permissions=[], public_permissions=[], owner=m.project_owner) m.public_membership = f.MembershipFactory(project=m.public_project, user=m.project_member_with_perms, email=m.project_member_with_perms.email, role__project=m.public_project, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) m.private_membership1 = f.MembershipFactory(project=m.private_project1, user=m.project_member_with_perms, email=m.project_member_with_perms.email, role__project=m.private_project1, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) f.MembershipFactory(project=m.private_project1, user=m.project_member_without_perms, email=m.project_member_without_perms.email, role__project=m.private_project1, role__permissions=[]) m.private_membership2 = f.MembershipFactory(project=m.private_project2, user=m.project_member_with_perms, email=m.project_member_with_perms.email, role__project=m.private_project2, role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS))) f.MembershipFactory(project=m.private_project2, user=m.project_member_without_perms, email=m.project_member_without_perms.email, role__project=m.private_project2, role__permissions=[]) f.MembershipFactory(project=m.public_project, user=m.project_owner, is_owner=True) f.MembershipFactory(project=m.private_project1, user=m.project_owner, is_owner=True) f.MembershipFactory(project=m.private_project2, user=m.project_owner, is_owner=True) m.public_userstory_ca = f.UserStoryCustomAttributeFactory(project=m.public_project) m.private_userstory_ca1 = f.UserStoryCustomAttributeFactory(project=m.private_project1) m.private_userstory_ca2 = f.UserStoryCustomAttributeFactory(project=m.private_project2) m.public_user_story = f.UserStoryFactory(project=m.public_project, status__project=m.public_project) m.private_user_story1 = f.UserStoryFactory(project=m.private_project1, status__project=m.private_project1) m.private_user_story2 = f.UserStoryFactory(project=m.private_project2, status__project=m.private_project2) m.public_user_story_cav = m.public_user_story.custom_attributes_values m.private_user_story_cav1 = m.private_user_story1.custom_attributes_values m.private_user_story_cav2 = m.private_user_story2.custom_attributes_values return m ######################################################### # User Story Custom Attribute ######################################################### def test_userstory_custom_attribute_retrieve(client, data): public_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.public_userstory_ca.pk}) private1_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.private_userstory_ca1.pk}) private2_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.private_userstory_ca2.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] results = helper_test_http_method(client, 'get', public_url, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private1_url, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private2_url, None, users) assert results == [401, 403, 403, 200, 200] def test_userstory_custom_attribute_create(client, data): public_url = reverse('userstory-custom-attributes-list') private1_url = reverse('userstory-custom-attributes-list') private2_url = reverse('userstory-custom-attributes-list') users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] userstory_ca_data = {"name": "test-new", "project": data.public_project.id} userstory_ca_data = json.dumps(userstory_ca_data) results = helper_test_http_method(client, 'post', public_url, userstory_ca_data, users) assert results == [401, 403, 403, 403, 201] userstory_ca_data = {"name": "test-new", "project": data.private_project1.id} userstory_ca_data = json.dumps(userstory_ca_data) results = helper_test_http_method(client, 'post', private1_url, userstory_ca_data, users) assert results == [401, 403, 403, 403, 201] userstory_ca_data = {"name": "test-new", "project": data.private_project2.id} userstory_ca_data = json.dumps(userstory_ca_data) results = helper_test_http_method(client, 'post', private2_url, userstory_ca_data, users) assert results == [401, 403, 403, 403, 201] def test_userstory_custom_attribute_update(client, data): public_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.public_userstory_ca.pk}) private1_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.private_userstory_ca1.pk}) private2_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.private_userstory_ca2.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] userstory_ca_data = serializers.UserStoryCustomAttributeSerializer(data.public_userstory_ca).data userstory_ca_data["name"] = "test" userstory_ca_data = json.dumps(userstory_ca_data) results = helper_test_http_method(client, 'put', public_url, userstory_ca_data, users) assert results == [401, 403, 403, 403, 200] userstory_ca_data = serializers.UserStoryCustomAttributeSerializer(data.private_userstory_ca1).data userstory_ca_data["name"] = "test" userstory_ca_data = json.dumps(userstory_ca_data) results = helper_test_http_method(client, 'put', private1_url, userstory_ca_data, users) assert results == [401, 403, 403, 403, 200] userstory_ca_data = serializers.UserStoryCustomAttributeSerializer(data.private_userstory_ca2).data userstory_ca_data["name"] = "test" userstory_ca_data = json.dumps(userstory_ca_data) results = helper_test_http_method(client, 'put', private2_url, userstory_ca_data, users) assert results == [401, 403, 403, 403, 200] def test_userstory_custom_attribute_delete(client, data): public_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.public_userstory_ca.pk}) private1_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.private_userstory_ca1.pk}) private2_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.private_userstory_ca2.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] results = helper_test_http_method(client, 'delete', public_url, None, users) assert results == [401, 403, 403, 403, 204] results = helper_test_http_method(client, 'delete', private1_url, None, users) assert results == [401, 403, 403, 403, 204] results = helper_test_http_method(client, 'delete', private2_url, None, users) assert results == [401, 403, 403, 403, 204] def test_userstory_custom_attribute_list(client, data): url = reverse('userstory-custom-attributes-list') response = client.json.get(url) assert len(response.data) == 2 assert response.status_code == 200 client.login(data.registered_user) response = client.json.get(url) assert len(response.data) == 2 assert response.status_code == 200 client.login(data.project_member_without_perms) response = client.json.get(url) assert len(response.data) == 2 assert response.status_code == 200 client.login(data.project_member_with_perms) response = client.json.get(url) assert len(response.data) == 3 assert response.status_code == 200 client.login(data.project_owner) response = client.json.get(url) assert len(response.data) == 3 assert response.status_code == 200 def test_userstory_custom_attribute_patch(client, data): public_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.public_userstory_ca.pk}) private1_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.private_userstory_ca1.pk}) private2_url = reverse('userstory-custom-attributes-detail', kwargs={"pk": data.private_userstory_ca2.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users) assert results == [401, 403, 403, 403, 200] results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users) assert results == [401, 403, 403, 403, 200] results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users) assert results == [401, 403, 403, 403, 200] def test_userstory_custom_attribute_action_bulk_update_order(client, data): url = reverse('userstory-custom-attributes-bulk-update-order') users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] post_data = json.dumps({ "bulk_userstory_custom_attributes": [(1,2)], "project": data.public_project.pk }) results = helper_test_http_method(client, 'post', url, post_data, users) assert results == [401, 403, 403, 403, 204] post_data = json.dumps({ "bulk_userstory_custom_attributes": [(1,2)], "project": data.private_project1.pk }) results = helper_test_http_method(client, 'post', url, post_data, users) assert results == [401, 403, 403, 403, 204] post_data = json.dumps({ "bulk_userstory_custom_attributes": [(1,2)], "project": data.private_project2.pk }) results = helper_test_http_method(client, 'post', url, post_data, users) assert results == [401, 403, 403, 403, 204] ######################################################### # UserStory Custom Attribute ######################################################### def test_userstory_custom_attributes_values_retrieve(client, data): public_url = reverse('userstory-custom-attributes-values-detail', kwargs={ "user_story_id": data.public_user_story.pk}) private_url1 = reverse('userstory-custom-attributes-values-detail', kwargs={ "user_story_id": data.private_user_story1.pk}) private_url2 = reverse('userstory-custom-attributes-values-detail', kwargs={ "user_story_id": data.private_user_story2.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] results = helper_test_http_method(client, 'get', public_url, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private_url1, None, users) assert results == [200, 200, 200, 200, 200] results = helper_test_http_method(client, 'get', private_url2, None, users) assert results == [401, 403, 403, 200, 200] def test_userstory_custom_attributes_values_update(client, data): public_url = reverse('userstory-custom-attributes-values-detail', kwargs={ "user_story_id": data.public_user_story.pk}) private_url1 = reverse('userstory-custom-attributes-values-detail', kwargs={ "user_story_id": data.private_user_story1.pk}) private_url2 = reverse('userstory-custom-attributes-values-detail', kwargs={ "user_story_id": data.private_user_story2.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] user_story_data = serializers.UserStoryCustomAttributesValuesSerializer(data.public_user_story_cav).data user_story_data["attributes_values"] = {str(data.public_userstory_ca.pk): "test"} user_story_data = json.dumps(user_story_data) results = helper_test_http_method(client, 'put', public_url, user_story_data, users) assert results == [401, 403, 403, 200, 200] user_story_data = serializers.UserStoryCustomAttributesValuesSerializer(data.private_user_story_cav1).data user_story_data["attributes_values"] = {str(data.private_userstory_ca1.pk): "test"} user_story_data = json.dumps(user_story_data) results = helper_test_http_method(client, 'put', private_url1, user_story_data, users) assert results == [401, 403, 403, 200, 200] user_story_data = serializers.UserStoryCustomAttributesValuesSerializer(data.private_user_story_cav2).data user_story_data["attributes_values"] = {str(data.private_userstory_ca2.pk): "test"} user_story_data = json.dumps(user_story_data) results = helper_test_http_method(client, 'put', private_url2, user_story_data, users) assert results == [401, 403, 403, 200, 200] def test_userstory_custom_attributes_values_patch(client, data): public_url = reverse('userstory-custom-attributes-values-detail', kwargs={ "user_story_id": data.public_user_story.pk}) private_url1 = reverse('userstory-custom-attributes-values-detail', kwargs={ "user_story_id": data.private_user_story1.pk}) private_url2 = reverse('userstory-custom-attributes-values-detail', kwargs={ "user_story_id": data.private_user_story2.pk}) users = [ None, data.registered_user, data.project_member_without_perms, data.project_member_with_perms, data.project_owner ] patch_data = json.dumps({"attributes_values": {str(data.public_userstory_ca.pk): "test"}, "version": data.public_user_story.version}) results = helper_test_http_method(client, 'patch', public_url, patch_data, users) assert results == [401, 403, 403, 200, 200] patch_data = json.dumps({"attributes_values": {str(data.private_userstory_ca1.pk): "test"}, "version": data.private_user_story1.version}) results = helper_test_http_method(client, 'patch', private_url1, patch_data, users) assert results == [401, 403, 403, 200, 200] patch_data = json.dumps({"attributes_values": {str(data.private_userstory_ca2.pk): "test"}, "version": data.private_user_story2.version}) results = helper_test_http_method(client, 'patch', private_url2, patch_data, users) assert results == [401, 403, 403, 200, 200]
tsunami33/facebook-statistics
refs/heads/master
sample/__init__.py
1
from select_interlocutors import select_interlocutors from sqlite import fill_database, see_database from optparse import OptionParser parser = OptionParser() parser.add_option("-l", "--list-contact", type="int", dest="l", default=1, help="list l * 18 contacts", metavar="NUMBER") parser.add_option("-n", "--messages-number", type="int", dest="n", default=1, help="list n * 23 messages", metavar="NUMBER") parser.add_option("-c", "--contact", type="string", dest="contact", action="store", help="specify a contact to stalk the conversation with", metavar="STRING") ''' parser.add_option("-l", "--list", type="choice", dest="category", action="store", default="animals", choices=["animals", "expression"], help="choose a sentence category to generate", metavar="STRING") ''' parser.add_option("-s", "--see", dest="see", action="store_true", help="see database", metavar="BOOLEAN") parser.add_option("-r", "--reset", dest="reset", action="store_true", help="reset database", metavar="BOOLEAN") parser.add_option("-d", "--debug", dest="debug", action="store_true", help="print debug messages", metavar="BOOLEAN") (options, args) = parser.parse_args() if __name__ == "__main__": if options.see: see_database(options) else: user, partner, inbox = select_interlocutors(options) fill_database(options, user, partner, inbox)
redhat-openstack/neutron
refs/heads/f22-patches
neutron/db/migration/alembic_migrations/mlnx_init_ops.py
17
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Initial operations for the Mellanox plugin from alembic import op import sqlalchemy as sa def upgrade(): op.create_table( 'segmentation_id_allocation', sa.Column('physical_network', sa.String(length=64), nullable=False), sa.Column('segmentation_id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('allocated', sa.Boolean(), nullable=False), sa.PrimaryKeyConstraint('physical_network', 'segmentation_id')) op.create_table( 'mlnx_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('network_type', sa.String(length=32), nullable=False), sa.Column('physical_network', sa.String(length=64), nullable=True), sa.Column('segmentation_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) op.create_table( 'port_profile', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('vnic_type', sa.String(length=32), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) def downgrade(): op.drop_table('port_profile') op.drop_table('mlnx_network_bindings') op.drop_table('segmentation_id_allocation')
mika76/Wox
refs/heads/master
PythonHome/Lib/site-packages/chardet/eucjpprober.py
2918
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from . import constants from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCJPDistributionAnalysis from .jpcntx import EUCJPContextAnalysis from .mbcssm import EUCJPSMModel class EUCJPProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCJPSMModel) self._mDistributionAnalyzer = EUCJPDistributionAnalysis() self._mContextAnalyzer = EUCJPContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return "EUC-JP" def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): # PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar, charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mContextAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
phobson/conda-env
refs/heads/develop
conda_env/installers/__init__.py
12133432
helldorado/ansible
refs/heads/devel
lib/ansible/modules/network/eos/__init__.py
12133432
chugunovyar/factoryForBuild
refs/heads/master
env/lib/python2.7/site-packages/django/db/backends/base/__init__.py
12133432
danieljaouen/ansible
refs/heads/devel
lib/ansible/modules/network/bigswitch/__init__.py
12133432
saisaizhang/Food
refs/heads/master
flask/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.py
29
"""CacheControl import Interface. Make it easy to import from cachecontrol without long namespaces. """ from .wrapper import CacheControl from .adapter import CacheControlAdapter from .controller import CacheController
Dhivyap/ansible
refs/heads/devel
lib/ansible/modules/network/fortios/fortios_switch_controller_quarantine.py
13
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_switch_controller_quarantine short_description: Configure FortiSwitch quarantine support in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify switch_controller feature and quarantine category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.9" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true switch_controller_quarantine: description: - Configure FortiSwitch quarantine support. default: null type: dict suboptions: quarantine: description: - Enable/disable quarantine. type: str choices: - enable - disable targets: description: - Quarantine MACs. type: list suboptions: description: description: - Description for the quarantine MAC. type: str entry_id: description: - FSW entry id for the quarantine MAC. type: int mac: description: - Quarantine MAC. required: true type: str tag: description: - Tags for the quarantine MAC. type: list suboptions: tags: description: - Tag string(eg. string1 string2 string3). required: true type: str ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure FortiSwitch quarantine support. fortios_switch_controller_quarantine: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" switch_controller_quarantine: quarantine: "enable" targets: - description: "<your_own_value>" entry_id: "6" mac: "<your_own_value>" tag: - tags: "<your_own_value>" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_switch_controller_quarantine_data(json): option_list = ['quarantine', 'targets'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def switch_controller_quarantine(data, fos): vdom = data['vdom'] switch_controller_quarantine_data = data['switch_controller_quarantine'] filtered_data = underscore_to_hyphen(filter_switch_controller_quarantine_data(switch_controller_quarantine_data)) return fos.set('switch-controller', 'quarantine', data=filtered_data, vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_switch_controller(data, fos): if data['switch_controller_quarantine']: resp = switch_controller_quarantine(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "switch_controller_quarantine": { "required": False, "type": "dict", "default": None, "options": { "quarantine": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "targets": {"required": False, "type": "list", "options": { "description": {"required": False, "type": "str"}, "entry_id": {"required": False, "type": "int"}, "mac": {"required": True, "type": "str"}, "tag": {"required": False, "type": "list", "options": { "tags": {"required": True, "type": "str"} }} }} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_switch_controller(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_switch_controller(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
thinker0/aurora
refs/heads/master
src/test/python/apache/aurora/client/cli/test_diff_formatter.py
5
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import textwrap from copy import deepcopy import pytest from mock import Mock, call from pystachio import Empty from apache.aurora.client.cli import Context from apache.aurora.client.cli.diff_formatter import DiffFormatter from apache.aurora.client.cli.jobs import DiffCommand from apache.aurora.client.cli.options import TaskInstanceKey from apache.aurora.config import AuroraConfig from apache.aurora.config.schema.base import Job from apache.thermos.config.schema_base import MB, Process, Resources, Task from .util import AuroraClientCommandTest, FakeAuroraCommandContext, mock_verb_options from gen.apache.aurora.api.constants import ACTIVE_STATES from gen.apache.aurora.api.ttypes import ( ConfigGroup, Constraint, GetJobUpdateDiffResult, Range, Result, TaskQuery ) class TestDiffFormatter(AuroraClientCommandTest): def setUp(self): self._fake_context = FakeAuroraCommandContext() self._mock_options = mock_verb_options(DiffCommand()) self._mock_options.instance_spec = TaskInstanceKey(self.TEST_JOBKEY, [0, 1]) self._fake_context.set_options(self._mock_options) self._mock_api = self._fake_context.get_api("west") @classmethod def get_job_config(self, is_cron=False): return AuroraConfig(job=Job( cluster='west', role='bozo', environment='test', name='the_job', service=True if not is_cron else False, cron_schedule='* * * * *' if is_cron else Empty, task=Task( name='task', processes=[Process(cmdline='ls -la', name='process')], resources=Resources(cpu=1.0, ram=1024 * MB, disk=1024 * MB) ), instances=3, )) @classmethod def get_job_update_diff_result(cls, task=None): diff = cls.create_simple_success_response() if task is None: task = cls.create_task_config('foo') diff.result = Result(getJobUpdateDiffResult=GetJobUpdateDiffResult( add=set([ConfigGroup( config=task, instances=frozenset([Range(first=10, last=10), Range(first=12, last=14)]))]), remove=frozenset(), update=frozenset([ConfigGroup( config=task, instances=frozenset([Range(first=11, last=11)]))]), unchanged=frozenset([ConfigGroup( config=task, instances=frozenset([Range(first=0, last=9)]))]) )) return diff @classmethod def get_job_update_no_change_diff_result(cls): diff = cls.create_simple_success_response() task = cls.create_task_config('foo') diff.result = Result(getJobUpdateDiffResult=GetJobUpdateDiffResult( add=frozenset(), remove=frozenset(), update=frozenset(), unchanged=frozenset([ConfigGroup( config=task, instances=frozenset([Range(first=0, last=3)]))]) )) return diff def test_show_job_update_diff_with_task_diff(self): config = self.get_job_config() self._fake_context.get_job_config = Mock(return_value=config) formatter = DiffFormatter(self._fake_context, config) local_task = self.create_scheduled_tasks()[0].assignedTask.task local_task.constraints = set([Constraint(name='host'), Constraint(name='rack')]) self._mock_api.get_job_update_diff.return_value = self.get_job_update_diff_result() formatter.show_job_update_diff(self._mock_options.instance_spec.instance, local_task) assert self._mock_api.get_job_update_diff.mock_calls == [ call(config, self._mock_options.instance_spec.instance) ] assert "\n".join(self._fake_context.get_out()) == textwrap.dedent("""\ This job update will: add instances: [10], [12-14] update instances: [11] with diff:\n 2c2,3 < 'constraints': None, --- > 'constraints': [ Constraint(name='host', constraint=None), > Constraint(name='rack', constraint=None)], \n not change instances: [0-9]""") def test_show_job_update_diff_no_diff_out_of_order_constraints(self): config = self.get_job_config() self._fake_context.get_job_config = Mock(return_value=config) formatter = DiffFormatter(self._fake_context, config) local_task = self.create_scheduled_tasks()[0].assignedTask.task local_task.constraints = set([Constraint(name='host'), Constraint(name='rack')]) remote_task = deepcopy(local_task) remote_task.constraints = set([Constraint(name='rack'), Constraint(name='host')]) self._mock_api.get_job_update_diff.return_value = self.get_job_update_diff_result(remote_task) formatter.show_job_update_diff(self._mock_options.instance_spec.instance, local_task) assert self._mock_api.get_job_update_diff.mock_calls == [ call(config, self._mock_options.instance_spec.instance) ] assert "\n".join(self._fake_context.get_out()) == textwrap.dedent("""\ This job update will: add instances: [10], [12-14] update instances: [11] with diff:\n\n\n not change instances: [0-9]""") def test_show_job_update_diff_without_task_diff(self): config = self.get_job_config() self._fake_context.get_job_config = Mock(return_value=config) formatter = DiffFormatter(self._fake_context, config) self._mock_api.get_job_update_diff.return_value = self.get_job_update_diff_result() formatter.show_job_update_diff(self._mock_options.instance_spec.instance) assert self._mock_api.get_job_update_diff.mock_calls == [ call(config, self._mock_options.instance_spec.instance) ] assert "\n".join(self._fake_context.get_out()) == textwrap.dedent("""\ This job update will: add instances: [10], [12-14] update instances: [11] not change instances: [0-9]""") def test_show_job_update_diff_no_change(self): config = self.get_job_config() self._fake_context.get_job_config = Mock(return_value=config) formatter = DiffFormatter(self._fake_context, config) self._mock_api.get_job_update_diff.return_value = self.get_job_update_no_change_diff_result() formatter.show_job_update_diff(self._mock_options.instance_spec.instance) assert self._mock_api.get_job_update_diff.mock_calls == [ call(config, self._mock_options.instance_spec.instance) ] assert "\n".join(self._fake_context.get_out()) == textwrap.dedent("""\ This job update will: not change instances: [0-3]""") def test_get_job_update_diff_error(self): mock_config = self.get_job_config() self._fake_context.get_job_config = Mock(return_value=mock_config) formatter = DiffFormatter(self._fake_context, mock_config) self._mock_api.get_job_update_diff.return_value = self.create_error_response() with pytest.raises(Context.CommandError): formatter.show_job_update_diff(self._mock_options.instance_spec.instance) assert self._mock_api.get_job_update_diff.mock_calls == [ call(mock_config, self._mock_options.instance_spec.instance) ] assert self._fake_context.get_out() == [] assert self._fake_context.get_err() == ["Error getting diff info from scheduler", "\tWhoops"] def test_diff_no_update_details_success(self): config = self.get_job_config(is_cron=True) self._fake_context.get_job_config = Mock(return_value=config) formatter = DiffFormatter(self._fake_context, config) self._mock_api.query.return_value = self.create_empty_task_result() query = TaskQuery( jobKeys=[self.TEST_JOBKEY.to_thrift()], statuses=ACTIVE_STATES) self._mock_api.build_query.return_value = query local_tasks = [] formatter.diff_no_update_details(local_tasks)
AyoubZahid/odoo
refs/heads/9.0
addons/account/wizard/account_unreconcile.py
47
from openerp import models, api class AccountUnreconcile(models.TransientModel): _name = "account.unreconcile" _description = "Account Unreconcile" @api.multi def trans_unrec(self): context = dict(self._context or {}) if context.get('active_ids', False): self.env['account.move.line'].browse(context.get('active_ids')).remove_move_reconcile() return {'type': 'ir.actions.act_window_close'}
otdw/ansible-modules-core
refs/heads/devel
system/seboolean.py
152
#!/usr/bin/python # (c) 2012, Stephen Fromm <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: seboolean short_description: Toggles SELinux booleans. description: - Toggles SELinux booleans. version_added: "0.7" options: name: description: - Name of the boolean to configure required: true default: null persistent: description: - Set to C(yes) if the boolean setting should survive a reboot required: false default: no choices: [ "yes", "no" ] state: description: - Desired boolean value required: true default: null choices: [ 'yes', 'no' ] notes: - Not tested on any debian based system requirements: [ ] author: "Stephen Fromm (@sfromm)" ''' EXAMPLES = ''' # Set (httpd_can_network_connect) flag on and keep it persistent across reboots - seboolean: name=httpd_can_network_connect state=yes persistent=yes ''' try: import selinux HAVE_SELINUX=True except ImportError: HAVE_SELINUX=False try: import semanage HAVE_SEMANAGE=True except ImportError: HAVE_SEMANAGE=False def has_boolean_value(module, name): bools = [] try: rc, bools = selinux.security_get_boolean_names() except OSError, e: module.fail_json(msg="Failed to get list of boolean names") if name in bools: return True else: return False def get_boolean_value(module, name): state = 0 try: state = selinux.security_get_boolean_active(name) except OSError, e: module.fail_json(msg="Failed to determine current state for boolean %s" % name) if state == 1: return True else: return False # The following method implements what setsebool.c does to change # a boolean and make it persist after reboot.. def semanage_boolean_value(module, name, state): rc = 0 value = 0 if state: value = 1 handle = semanage.semanage_handle_create() if handle is None: module.fail_json(msg="Failed to create semanage library handle") try: managed = semanage.semanage_is_managed(handle) if managed < 0: module.fail_json(msg="Failed to determine whether policy is manage") if managed == 0: if os.getuid() == 0: module.fail_json(msg="Cannot set persistent booleans without managed policy") else: module.fail_json(msg="Cannot set persistent booleans; please try as root") if semanage.semanage_connect(handle) < 0: module.fail_json(msg="Failed to connect to semanage") if semanage.semanage_begin_transaction(handle) < 0: module.fail_json(msg="Failed to begin semanage transaction") rc, sebool = semanage.semanage_bool_create(handle) if rc < 0: module.fail_json(msg="Failed to create seboolean with semanage") if semanage.semanage_bool_set_name(handle, sebool, name) < 0: module.fail_json(msg="Failed to set seboolean name with semanage") semanage.semanage_bool_set_value(sebool, value) rc, boolkey = semanage.semanage_bool_key_extract(handle, sebool) if rc < 0: module.fail_json(msg="Failed to extract boolean key with semanage") if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0: module.fail_json(msg="Failed to modify boolean key with semanage") if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0: module.fail_json(msg="Failed to set boolean key active with semanage") semanage.semanage_bool_key_free(boolkey) semanage.semanage_bool_free(sebool) semanage.semanage_set_reload(handle, 0) if semanage.semanage_commit(handle) < 0: module.fail_json(msg="Failed to commit changes to semanage") semanage.semanage_disconnect(handle) semanage.semanage_handle_destroy(handle) except Exception, e: module.fail_json(msg="Failed to manage policy for boolean %s: %s" % (name, str(e))) return True def set_boolean_value(module, name, state): rc = 0 value = 0 if state: value = 1 try: rc = selinux.security_set_boolean(name, value) except OSError, e: module.fail_json(msg="Failed to set boolean %s to %s" % (name, value)) if rc == 0: return True else: return False def main(): module = AnsibleModule( argument_spec = dict( name=dict(required=True), persistent=dict(default='no', type='bool'), state=dict(required=True, type='bool') ), supports_check_mode=True ) if not HAVE_SELINUX: module.fail_json(msg="This module requires libselinux-python support") if not HAVE_SEMANAGE: module.fail_json(msg="This module requires libsemanage-python support") if not selinux.is_selinux_enabled(): module.fail_json(msg="SELinux is disabled on this host.") name = module.params['name'] persistent = module.params['persistent'] state = module.params['state'] result = {} result['name'] = name if not has_boolean_value(module, name): module.fail_json(msg="SELinux boolean %s does not exist." % name) cur_value = get_boolean_value(module, name) if cur_value == state: result['state'] = cur_value result['changed'] = False module.exit_json(**result) if module.check_mode: module.exit_json(changed=True) if persistent: r = semanage_boolean_value(module, name, state) else: r = set_boolean_value(module, name, state) result['changed'] = r if not r: module.fail_json(msg="Failed to set boolean %s to %s" % (name, value)) try: selinux.security_commit_booleans() except: module.fail_json(msg="Failed to commit pending boolean %s value" % name) module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * main()
ucoin-io/cutecoin
refs/heads/master
src/sakia/gui/main_window/status_bar/model.py
3
from PyQt5.QtCore import QObject from sakia.money import Referentials class StatusBarModel(QObject): """ The model of status bar component """ def __init__(self, parent, app, blockchain_processor): """ The status bar model :param parent: :param sakia.app.Application app: the app :param sakia.data.processors.BlockchainProcessor blockchain_processor: """ super().__init__(parent) self.app = app self.blockchain_processor = blockchain_processor def referentials(self): return Referentials def default_referential(self): return self.app.parameters.referential def current_block(self): return self.blockchain_processor.current_buid(self.app.currency) def current_time(self): time = self.blockchain_processor.time(self.app.currency) return self.blockchain_processor.adjusted_ts(self.app.currency, time)
CloudServer/devstack
refs/heads/master
tools/uec/meta.py
71
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import BaseHTTPServer import SimpleHTTPServer import sys def main(host, port, HandlerClass=SimpleHTTPServer.SimpleHTTPRequestHandler, ServerClass=BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"): """simple http server that listens on a give address:port.""" server_address = (host, port) HandlerClass.protocol_version = protocol httpd = ServerClass(server_address, HandlerClass) sa = httpd.socket.getsockname() print("Serving HTTP on", sa[0], "port", sa[1], "...") httpd.serve_forever() if __name__ == '__main__': if sys.argv[1:]: address = sys.argv[1] else: address = '0.0.0.0' if ':' in address: host, port = address.split(':') else: host = address port = 8080 main(host, int(port))
richardcs/ansible
refs/heads/devel
lib/ansible/modules/storage/netapp/netapp_e_ldap.py
25
#!/usr/bin/python # (c) 2018, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: netapp_e_ldap short_description: NetApp E-Series manage LDAP integration to use for authentication description: - Configure an E-Series system to allow authentication via an LDAP server version_added: '2.7' author: Michael Price (@lmprice) extends_documentation_fragment: - netapp.eseries options: state: description: - Enable/disable LDAP support on the system. Disabling will clear out any existing defined domains. choices: - present - absent default: present identifier: description: - This is a unique identifier for the configuration (for cases where there are multiple domains configured). - If this is not specified, but I(state=present), we will utilize a default value of 'default'. username: description: - This is the user account that will be used for querying the LDAP server. - "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com" required: yes aliases: - bind_username password: description: - This is the password for the bind user account. required: yes aliases: - bind_password attributes: description: - The user attributes that should be considered for the group to role mapping. - Typically this is used with something like 'memberOf', and a user's access is tested against group membership or lack thereof. default: memberOf server: description: - This is the LDAP server url. - The connection string should be specified as using the ldap or ldaps protocol along with the port information. aliases: - server_url required: yes name: description: - The domain name[s] that will be utilized when authenticating to identify which domain to utilize. - Default to use the DNS name of the I(server). - The only requirement is that the name[s] be resolvable. - "Example: [email protected]" required: no search_base: description: - The search base is used to find group memberships of the user. - "Example: ou=users,dc=example,dc=com" required: yes role_mappings: description: - This is where you specify which groups should have access to what permissions for the storage-system. - For example, all users in group A will be assigned all 4 available roles, which will allow access to all the management functionality of the system (super-user). Those in group B only have the storage.monitor role, which will allow only read-only acess. - This is specified as a mapping of regular expressions to a list of roles. See the examples. - The roles that will be assigned to to the group/groups matching the provided regex. - storage.admin allows users full read/write access to storage objects and operations. - storage.monitor allows users read-only access to storage objects and operations. - support.admin allows users access to hardware, diagnostic information, the Major Event Log, and other critical support-related functionality, but not the storage configuration. - security.admin allows users access to authentication/authorization configuration, as well as the audit log configuration, and certification management. required: yes user_attribute: description: - This is the attribute we will use to match the provided username when a user attempts to authenticate. default: sAMAccountName log_path: description: - A local path to a file to be used for debug logging required: no notes: - Check mode is supported. - This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given different (or no), access to certain aspects of the system and API. - The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible. - Generally, you'll need to get the details of your organization's LDAP server before you'll be able to configure the system for using LDAP authentication; every implementation is likely to be very different. - This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy v3.0 and higher. ''' EXAMPLES = ''' - name: Disable LDAP authentication netapp_e_ldap: api_url: "10.1.1.1:8443" api_username: "admin" api_password: "myPass" ssid: "1" state: absent - name: Remove the 'default' LDAP domain configuration netapp_e_ldap: state: absent identifier: default - name: Define a new LDAP domain, utilizing defaults where possible netapp_e_ldap: state: present bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com" bind_password: "mySecretPass" server: "ldap://example.com:389" search_base: 'OU=Users,DC=example,DC=com' role_mappings: ".*dist-dev-storage.*": - storage.admin - security.admin - support.admin - storage.monitor ''' RETURN = """ msg: description: Success message returned: on success type: string sample: The ldap settings have been updated. """ import json import logging try: import urlparse except ImportError: import urllib.parse as urlparse from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.netapp import request, eseries_host_argument_spec from ansible.module_utils._text import to_native class Ldap(object): NO_CHANGE_MSG = "No changes were necessary." def __init__(self): argument_spec = eseries_host_argument_spec() argument_spec.update(dict( state=dict(type='str', required=False, default='present', choices=['present', 'absent']), identifier=dict(type='str', required=False, ), username=dict(type='str', required=False, aliases=['bind_username']), password=dict(type='str', required=False, aliases=['bind_password'], no_log=True), name=dict(type='list', required=False, ), server=dict(type='str', required=False, aliases=['server_url']), search_base=dict(type='str', required=False, ), role_mappings=dict(type='dict', required=False, ), user_attribute=dict(type='str', required=False, default='sAMAccountName'), attributes=dict(type='list', default=['memberOf'], required=False, ), log_path=dict(type='str', required=False), )) required_if = [ ["state", "present", ["username", "password", "server", "search_base", "role_mappings", ]] ] self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) args = self.module.params self.ldap = args['state'] == 'present' self.identifier = args['identifier'] self.username = args['username'] self.password = args['password'] self.names = args['name'] self.server = args['server'] self.search_base = args['search_base'] self.role_mappings = args['role_mappings'] self.user_attribute = args['user_attribute'] self.attributes = args['attributes'] self.ssid = args['ssid'] self.url = args['api_url'] self.creds = dict(url_password=args['api_password'], validate_certs=args['validate_certs'], url_username=args['api_username'], timeout=60) self.check_mode = self.module.check_mode log_path = args['log_path'] # logging setup self._logger = logging.getLogger(self.__class__.__name__) if log_path: logging.basicConfig( level=logging.DEBUG, filename=log_path, filemode='w', format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') if not self.url.endswith('/'): self.url += '/' self.embedded = None self.base_path = None def make_configuration(self): if not self.identifier: self.identifier = 'default' if not self.names: parts = urlparse.urlparse(self.server) netloc = parts.netloc if ':' in netloc: netloc = netloc.split(':')[0] self.names = [netloc] roles = list() for regex in self.role_mappings: for role in self.role_mappings[regex]: roles.append(dict(groupRegex=regex, ignoreCase=True, name=role)) domain = dict(id=self.identifier, ldapUrl=self.server, bindLookupUser=dict(user=self.username, password=self.password), roleMapCollection=roles, groupAttributes=self.attributes, names=self.names, searchBase=self.search_base, userAttribute=self.user_attribute, ) return domain def is_embedded(self): """Determine whether or not we're using the embedded or proxy implemenation of Web Services""" if self.embedded is None: url = self.url try: parts = urlparse.urlparse(url) parts = parts._replace(path='/devmgr/utils/') url = urlparse.urlunparse(parts) (rc, result) = request(url + 'about', **self.creds) self.embedded = not result['runningAsProxy'] except Exception as err: self._logger.exception("Failed to retrieve the About information.") self.module.fail_json(msg="Failed to determine the Web Services implementation type!" " Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) return self.embedded def get_full_configuration(self): try: (rc, result) = request(self.url + self.base_path, **self.creds) return result except Exception as err: self._logger.exception("Failed to retrieve the LDAP configuration.") self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) def get_configuration(self, identifier): try: (rc, result) = request(self.url + self.base_path + '%s' % (identifier), ignore_errors=True, **self.creds) if rc == 200: return result elif rc == 404: return None else: self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]." % (self.ssid, result)) except Exception as err: self._logger.exception("Failed to retrieve the LDAP configuration.") self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) def update_configuration(self): # Define a new domain based on the user input domain = self.make_configuration() # This is the current list of configurations current = self.get_configuration(self.identifier) update = current != domain msg = "No changes were necessary for [%s]." % self.identifier self._logger.info("Is updated: %s", update) if update and not self.check_mode: msg = "The configuration changes were made for [%s]." % self.identifier try: if current is None: api = self.base_path + 'addDomain' else: api = self.base_path + '%s' % (domain['id']) (rc, result) = request(self.url + api, method='POST', data=json.dumps(domain), **self.creds) except Exception as err: self._logger.exception("Failed to modify the LDAP configuration.") self.module.fail_json(msg="Failed to modify LDAP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) return msg, update def clear_single_configuration(self, identifier=None): if identifier is None: identifier = self.identifier configuration = self.get_configuration(identifier) updated = False msg = self.NO_CHANGE_MSG if configuration: updated = True msg = "The LDAP domain configuration for [%s] was cleared." % identifier if not self.check_mode: try: (rc, result) = request(self.url + self.base_path + '%s' % identifier, method='DELETE', **self.creds) except Exception as err: self.module.fail_json(msg="Failed to remove LDAP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) return msg, updated def clear_configuration(self): configuration = self.get_full_configuration() updated = False msg = self.NO_CHANGE_MSG if configuration['ldapDomains']: updated = True msg = "The LDAP configuration for all domains was cleared." if not self.check_mode: try: (rc, result) = request(self.url + self.base_path, method='DELETE', ignore_errors=True, **self.creds) # Older versions of NetApp E-Series restAPI does not possess an API to remove all existing configs if rc == 405: for config in configuration['ldapDomains']: self.clear_single_configuration(config['id']) except Exception as err: self.module.fail_json(msg="Failed to clear LDAP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) return msg, updated def get_base_path(self): embedded = self.is_embedded() if embedded: return 'storage-systems/%s/ldap/' % self.ssid else: return '/ldap/' def update(self): self.base_path = self.get_base_path() if self.ldap: msg, update = self.update_configuration() elif self.identifier: msg, update = self.clear_single_configuration() else: msg, update = self.clear_configuration() self.module.exit_json(msg=msg, changed=update, ) def __call__(self, *args, **kwargs): self.update() def main(): settings = Ldap() settings() if __name__ == '__main__': main()
waseem18/oh-mainline
refs/heads/master
vendor/packages/Pygments/scripts/check_sources.py
117
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Checker for file headers ~~~~~~~~~~~~~~~~~~~~~~~~ Make sure each Python file has a correct file header including copyright and license information. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import sys, os, re import getopt import cStringIO from os.path import join, splitext, abspath checkers = {} def checker(*suffixes, **kwds): only_pkg = kwds.pop('only_pkg', False) def deco(func): for suffix in suffixes: checkers.setdefault(suffix, []).append(func) func.only_pkg = only_pkg return func return deco name_mail_re = r'[\w ]+(<.*?>)?' copyright_re = re.compile(r'^ :copyright: Copyright 2006-2013 by ' r'the Pygments team, see AUTHORS\.$', re.UNICODE) copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' % (name_mail_re, name_mail_re), re.UNICODE) coding_re = re.compile(r'coding[:=]\s*([-\w.]+)') not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+') is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b') misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING "informations"] # ALLOW-MISSPELLING @checker('.py') def check_syntax(fn, lines): try: compile(''.join(lines), fn, "exec") except SyntaxError, err: yield 0, "not compilable: %s" % err @checker('.py') def check_style_and_encoding(fn, lines): encoding = 'ascii' for lno, line in enumerate(lines): if len(line) > 90: yield lno+1, "line too long" m = not_ix_re.search(line) if m: yield lno+1, '"' + m.group() + '"' if is_const_re.search(line): yield lno+1, 'using == None/True/False' if lno < 2: co = coding_re.search(line) if co: encoding = co.group(1) try: line.decode(encoding) except UnicodeDecodeError, err: yield lno+1, "not decodable: %s\n Line: %r" % (err, line) except LookupError, err: yield 0, "unknown encoding: %s" % encoding encoding = 'latin1' @checker('.py', only_pkg=True) def check_fileheader(fn, lines): # line number correction c = 1 if lines[0:1] == ['#!/usr/bin/env python\n']: lines = lines[1:] c = 2 llist = [] docopen = False for lno, l in enumerate(lines): llist.append(l) if lno == 0: if l == '# -*- coding: rot13 -*-\n': # special-case pony package return elif l != '# -*- coding: utf-8 -*-\n': yield 1, "missing coding declaration" elif lno == 1: if l != '"""\n' and l != 'r"""\n': yield 2, 'missing docstring begin (""")' else: docopen = True elif docopen: if l == '"""\n': # end of docstring if lno <= 4: yield lno+c, "missing module name in docstring" break if l != "\n" and l[:4] != ' ' and docopen: yield lno+c, "missing correct docstring indentation" if lno == 2: # if not in package, don't check the module name modname = fn[:-3].replace('/', '.').replace('.__init__', '') while modname: if l.lower()[4:-1] == modname: break modname = '.'.join(modname.split('.')[1:]) else: yield 3, "wrong module name in docstring heading" modnamelen = len(l.strip()) elif lno == 3: if l.strip() != modnamelen * "~": yield 4, "wrong module name underline, should be ~~~...~" else: yield 0, "missing end and/or start of docstring..." # check for copyright and license fields license = llist[-2:-1] if license != [" :license: BSD, see LICENSE for details.\n"]: yield 0, "no correct license info" ci = -3 copyright = [s.decode('utf-8') for s in llist[ci:ci+1]] while copyright and copyright_2_re.match(copyright[0]): ci -= 1 copyright = llist[ci:ci+1] if not copyright or not copyright_re.match(copyright[0]): yield 0, "no correct copyright info" @checker('.py', '.html', '.js') def check_whitespace_and_spelling(fn, lines): for lno, line in enumerate(lines): if "\t" in line: yield lno+1, "OMG TABS!!!1 " if line[:-1].rstrip(' \t') != line[:-1]: yield lno+1, "trailing whitespace" for word in misspellings: if word in line and 'ALLOW-MISSPELLING' not in line: yield lno+1, '"%s" used' % word bad_tags = ('<b>', '<i>', '<u>', '<s>', '<strike>' '<center>', '<big>', '<small>', '<font') @checker('.html') def check_xhtml(fn, lines): for lno, line in enumerate(lines): for bad_tag in bad_tags: if bad_tag in line: yield lno+1, "used " + bad_tag def main(argv): try: gopts, args = getopt.getopt(argv[1:], "vi:") except getopt.GetoptError: print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0] return 2 opts = {} for opt, val in gopts: if opt == '-i': val = abspath(val) opts.setdefault(opt, []).append(val) if len(args) == 0: path = '.' elif len(args) == 1: path = args[0] else: print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0] return 2 verbose = '-v' in opts num = 0 out = cStringIO.StringIO() # TODO: replace os.walk run with iteration over output of # `svn list -R`. for root, dirs, files in os.walk(path): if '.svn' in dirs: dirs.remove('.svn') if '-i' in opts and abspath(root) in opts['-i']: del dirs[:] continue # XXX: awkward: for the Makefile call: don't check non-package # files for file headers in_pocoo_pkg = root.startswith('./pygments') for fn in files: fn = join(root, fn) if fn[:2] == './': fn = fn[2:] if '-i' in opts and abspath(fn) in opts['-i']: continue ext = splitext(fn)[1] checkerlist = checkers.get(ext, None) if not checkerlist: continue if verbose: print "Checking %s..." % fn try: f = open(fn, 'r') lines = list(f) except (IOError, OSError), err: print "%s: cannot open: %s" % (fn, err) num += 1 continue for checker in checkerlist: if not in_pocoo_pkg and checker.only_pkg: continue for lno, msg in checker(fn, lines): print >>out, "%s:%d: %s" % (fn, lno, msg) num += 1 if verbose: print if num == 0: print "No errors found." else: print out.getvalue().rstrip('\n') print "%d error%s found." % (num, num > 1 and "s" or "") return int(num > 0) if __name__ == '__main__': sys.exit(main(sys.argv))
yuhui-lin/web_page_classification
refs/heads/master
models/crnn.py
1
"""CNN model class""" import tensorflow as tf # import model import models.cnn ######################################### # FLAGS ######################################### FLAGS = tf.app.flags.FLAGS class CRNN(models.cnn.CNN): """convolutional neural network model. classify web page only based on target html.""" def inference(self, page_batch): """Build the CNN model. Args: page_batch: Sequences returned from inputs_train() or inputs_eval. Returns: Logits. """ self.filter_sizes = [3, 4, 5] self.num_filters = len(self.filter_sizes) self.sequence_length = FLAGS.html_len return self.high_classifier(page_batch, self.cnn)
yize/grunt-tps
refs/heads/master
tasks/lib/python/Lib/python2.7/test/test_dummy_thread.py
127
"""Generic thread tests. Meant to be used by dummy_thread and thread. To allow for different modules to be used, test_main() can be called with the module to use as the thread implementation as its sole argument. """ import dummy_thread as _thread import time import Queue import random import unittest from test import test_support DELAY = 0 # Set > 0 when testing a module other than dummy_thread, such as # the 'thread' module. class LockTests(unittest.TestCase): """Test lock objects.""" def setUp(self): # Create a lock self.lock = _thread.allocate_lock() def test_initlock(self): #Make sure locks start locked self.assertTrue(not self.lock.locked(), "Lock object is not initialized unlocked.") def test_release(self): # Test self.lock.release() self.lock.acquire() self.lock.release() self.assertTrue(not self.lock.locked(), "Lock object did not release properly.") def test_improper_release(self): #Make sure release of an unlocked thread raises _thread.error self.assertRaises(_thread.error, self.lock.release) def test_cond_acquire_success(self): #Make sure the conditional acquiring of the lock works. self.assertTrue(self.lock.acquire(0), "Conditional acquiring of the lock failed.") def test_cond_acquire_fail(self): #Test acquiring locked lock returns False self.lock.acquire(0) self.assertTrue(not self.lock.acquire(0), "Conditional acquiring of a locked lock incorrectly " "succeeded.") def test_uncond_acquire_success(self): #Make sure unconditional acquiring of a lock works. self.lock.acquire() self.assertTrue(self.lock.locked(), "Uncondional locking failed.") def test_uncond_acquire_return_val(self): #Make sure that an unconditional locking returns True. self.assertTrue(self.lock.acquire(1) is True, "Unconditional locking did not return True.") self.assertTrue(self.lock.acquire() is True) def test_uncond_acquire_blocking(self): #Make sure that unconditional acquiring of a locked lock blocks. def delay_unlock(to_unlock, delay): """Hold on to lock for a set amount of time before unlocking.""" time.sleep(delay) to_unlock.release() self.lock.acquire() start_time = int(time.time()) _thread.start_new_thread(delay_unlock,(self.lock, DELAY)) if test_support.verbose: print print "*** Waiting for thread to release the lock "\ "(approx. %s sec.) ***" % DELAY self.lock.acquire() end_time = int(time.time()) if test_support.verbose: print "done" self.assertTrue((end_time - start_time) >= DELAY, "Blocking by unconditional acquiring failed.") class MiscTests(unittest.TestCase): """Miscellaneous tests.""" def test_exit(self): #Make sure _thread.exit() raises SystemExit self.assertRaises(SystemExit, _thread.exit) def test_ident(self): #Test sanity of _thread.get_ident() self.assertIsInstance(_thread.get_ident(), int, "_thread.get_ident() returned a non-integer") self.assertTrue(_thread.get_ident() != 0, "_thread.get_ident() returned 0") def test_LockType(self): #Make sure _thread.LockType is the same type as _thread.allocate_locke() self.assertIsInstance(_thread.allocate_lock(), _thread.LockType, "_thread.LockType is not an instance of what " "is returned by _thread.allocate_lock()") def test_interrupt_main(self): #Calling start_new_thread with a function that executes interrupt_main # should raise KeyboardInterrupt upon completion. def call_interrupt(): _thread.interrupt_main() self.assertRaises(KeyboardInterrupt, _thread.start_new_thread, call_interrupt, tuple()) def test_interrupt_in_main(self): # Make sure that if interrupt_main is called in main threat that # KeyboardInterrupt is raised instantly. self.assertRaises(KeyboardInterrupt, _thread.interrupt_main) class ThreadTests(unittest.TestCase): """Test thread creation.""" def test_arg_passing(self): #Make sure that parameter passing works. def arg_tester(queue, arg1=False, arg2=False): """Use to test _thread.start_new_thread() passes args properly.""" queue.put((arg1, arg2)) testing_queue = Queue.Queue(1) _thread.start_new_thread(arg_tester, (testing_queue, True, True)) result = testing_queue.get() self.assertTrue(result[0] and result[1], "Argument passing for thread creation using tuple failed") _thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue, 'arg1':True, 'arg2':True}) result = testing_queue.get() self.assertTrue(result[0] and result[1], "Argument passing for thread creation using kwargs failed") _thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True}) result = testing_queue.get() self.assertTrue(result[0] and result[1], "Argument passing for thread creation using both tuple" " and kwargs failed") def test_multi_creation(self): #Make sure multiple threads can be created. def queue_mark(queue, delay): """Wait for ``delay`` seconds and then put something into ``queue``""" time.sleep(delay) queue.put(_thread.get_ident()) thread_count = 5 testing_queue = Queue.Queue(thread_count) if test_support.verbose: print print "*** Testing multiple thread creation "\ "(will take approx. %s to %s sec.) ***" % (DELAY, thread_count) for count in xrange(thread_count): if DELAY: local_delay = round(random.random(), 1) else: local_delay = 0 _thread.start_new_thread(queue_mark, (testing_queue, local_delay)) time.sleep(DELAY) if test_support.verbose: print 'done' self.assertTrue(testing_queue.qsize() == thread_count, "Not all %s threads executed properly after %s sec." % (thread_count, DELAY)) def test_main(imported_module=None): global _thread, DELAY if imported_module: _thread = imported_module DELAY = 2 if test_support.verbose: print print "*** Using %s as _thread module ***" % _thread test_support.run_unittest(LockTests, MiscTests, ThreadTests) if __name__ == '__main__': test_main()
40223139/2015cdaa5-12
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/sre_parse.py
630
# # Secret Labs' Regular Expression Engine # # convert re-style regular expression to sre pattern # # Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved. # # See the sre.py file for information on usage and redistribution. # """Internal support module for sre""" # XXX: show string offset and offending character for all errors import sys from sre_constants import * from _sre import MAXREPEAT SPECIAL_CHARS = ".\\[{()*+?^$|" REPEAT_CHARS = "*+?{" DIGITS = set("0123456789") OCTDIGITS = set("01234567") HEXDIGITS = set("0123456789abcdefABCDEF") WHITESPACE = set(" \t\n\r\v\f") ESCAPES = { r"\a": (LITERAL, ord("\a")), r"\b": (LITERAL, ord("\b")), r"\f": (LITERAL, ord("\f")), r"\n": (LITERAL, ord("\n")), r"\r": (LITERAL, ord("\r")), r"\t": (LITERAL, ord("\t")), r"\v": (LITERAL, ord("\v")), r"\\": (LITERAL, ord("\\")) } CATEGORIES = { r"\A": (AT, AT_BEGINNING_STRING), # start of string r"\b": (AT, AT_BOUNDARY), r"\B": (AT, AT_NON_BOUNDARY), r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]), r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]), r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]), r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]), r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]), r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]), r"\Z": (AT, AT_END_STRING), # end of string } FLAGS = { # standard flags "i": SRE_FLAG_IGNORECASE, "L": SRE_FLAG_LOCALE, "m": SRE_FLAG_MULTILINE, "s": SRE_FLAG_DOTALL, "x": SRE_FLAG_VERBOSE, # extensions "a": SRE_FLAG_ASCII, "t": SRE_FLAG_TEMPLATE, "u": SRE_FLAG_UNICODE, } class Pattern: # master pattern object. keeps track of global attributes def __init__(self): self.flags = 0 self.open = [] self.groups = 1 self.groupdict = {} def opengroup(self, name=None): gid = self.groups self.groups = gid + 1 if name is not None: ogid = self.groupdict.get(name, None) if ogid is not None: raise error("redefinition of group name %s as group %d; " "was group %d" % (repr(name), gid, ogid)) self.groupdict[name] = gid self.open.append(gid) return gid def closegroup(self, gid): self.open.remove(gid) def checkgroup(self, gid): return gid < self.groups and gid not in self.open class SubPattern: # a subpattern, in intermediate form def __init__(self, pattern, data=None): self.pattern = pattern if data is None: data = [] self.data = data self.width = None def __iter__(self): return iter(self.data) def dump(self, level=0): nl = 1 seqtypes = (tuple, list) for op, av in self.data: print(level*" " + op, end=' '); nl = 0 if op == "in": # member sublanguage print(); nl = 1 for op, a in av: print((level+1)*" " + op, a) elif op == "branch": print(); nl = 1 i = 0 for a in av[1]: if i > 0: print(level*" " + "or") a.dump(level+1); nl = 1 i = i + 1 elif isinstance(av, seqtypes): for a in av: if isinstance(a, SubPattern): if not nl: print() a.dump(level+1); nl = 1 else: print(a, end=' ') ; nl = 0 else: print(av, end=' ') ; nl = 0 if not nl: print() def __repr__(self): return repr(self.data) def __len__(self): return len(self.data) def __delitem__(self, index): del self.data[index] def __getitem__(self, index): if isinstance(index, slice): return SubPattern(self.pattern, self.data[index]) return self.data[index] def __setitem__(self, index, code): self.data[index] = code def insert(self, index, code): self.data.insert(index, code) def append(self, code): self.data.append(code) def getwidth(self): # determine the width (min, max) for this subpattern if self.width: return self.width lo = hi = 0 UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY) REPEATCODES = (MIN_REPEAT, MAX_REPEAT) for op, av in self.data: if op is BRANCH: i = sys.maxsize j = 0 for av in av[1]: l, h = av.getwidth() i = min(i, l) j = max(j, h) lo = lo + i hi = hi + j elif op is CALL: i, j = av.getwidth() lo = lo + i hi = hi + j elif op is SUBPATTERN: i, j = av[1].getwidth() lo = lo + i hi = hi + j elif op in REPEATCODES: i, j = av[2].getwidth() lo = lo + int(i) * av[0] hi = hi + int(j) * av[1] elif op in UNITCODES: lo = lo + 1 hi = hi + 1 elif op == SUCCESS: break self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize)) return self.width class Tokenizer: def __init__(self, string): self.istext = isinstance(string, str) self.string = string self.index = 0 self.__next() def __next(self): if self.index >= len(self.string): self.next = None return char = self.string[self.index:self.index+1] # Special case for the str8, since indexing returns a integer # XXX This is only needed for test_bug_926075 in test_re.py if char and not self.istext: char = chr(char[0]) if char == "\\": try: c = self.string[self.index + 1] except IndexError: raise error("bogus escape (end of line)") if not self.istext: c = chr(c) char = char + c self.index = self.index + len(char) self.next = char def match(self, char, skip=1): if char == self.next: if skip: self.__next() return 1 return 0 def get(self): this = self.next self.__next() return this def getwhile(self, n, charset): result = '' for _ in range(n): c = self.next if c not in charset: break result += c self.__next() return result def tell(self): return self.index, self.next def seek(self, index): self.index, self.next = index def isident(char): return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_" def isdigit(char): return "0" <= char <= "9" def isname(name): # check that group name is a valid string if not isident(name[0]): return False for char in name[1:]: if not isident(char) and not isdigit(char): return False return True def _class_escape(source, escape): # handle escape code inside character class code = ESCAPES.get(escape) if code: return code code = CATEGORIES.get(escape) if code and code[0] == IN: return code try: c = escape[1:2] if c == "x": # hexadecimal escape (exactly two digits) escape += source.getwhile(2, HEXDIGITS) if len(escape) != 4: raise ValueError return LITERAL, int(escape[2:], 16) & 0xff elif c == "u" and source.istext: # unicode escape (exactly four digits) escape += source.getwhile(4, HEXDIGITS) if len(escape) != 6: raise ValueError return LITERAL, int(escape[2:], 16) elif c == "U" and source.istext: # unicode escape (exactly eight digits) escape += source.getwhile(8, HEXDIGITS) if len(escape) != 10: raise ValueError c = int(escape[2:], 16) chr(c) # raise ValueError for invalid code return LITERAL, c elif c in OCTDIGITS: # octal escape (up to three digits) escape += source.getwhile(2, OCTDIGITS) return LITERAL, int(escape[1:], 8) & 0xff elif c in DIGITS: raise ValueError if len(escape) == 2: return LITERAL, ord(escape[1]) except ValueError: pass raise error("bogus escape: %s" % repr(escape)) def _escape(source, escape, state): # handle escape code in expression code = CATEGORIES.get(escape) if code: return code code = ESCAPES.get(escape) if code: return code try: c = escape[1:2] if c == "x": # hexadecimal escape escape += source.getwhile(2, HEXDIGITS) if len(escape) != 4: raise ValueError return LITERAL, int(escape[2:], 16) & 0xff elif c == "u" and source.istext: # unicode escape (exactly four digits) escape += source.getwhile(4, HEXDIGITS) if len(escape) != 6: raise ValueError return LITERAL, int(escape[2:], 16) elif c == "U" and source.istext: # unicode escape (exactly eight digits) escape += source.getwhile(8, HEXDIGITS) if len(escape) != 10: raise ValueError c = int(escape[2:], 16) chr(c) # raise ValueError for invalid code return LITERAL, c elif c == "0": # octal escape escape += source.getwhile(2, OCTDIGITS) return LITERAL, int(escape[1:], 8) & 0xff elif c in DIGITS: # octal escape *or* decimal group reference (sigh) if source.next in DIGITS: escape = escape + source.get() if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and source.next in OCTDIGITS): # got three octal digits; this is an octal escape escape = escape + source.get() return LITERAL, int(escape[1:], 8) & 0xff # not an octal escape, so this is a group reference group = int(escape[1:]) if group < state.groups: if not state.checkgroup(group): raise error("cannot refer to open group") return GROUPREF, group raise ValueError if len(escape) == 2: return LITERAL, ord(escape[1]) except ValueError: pass raise error("bogus escape: %s" % repr(escape)) def _parse_sub(source, state, nested=1): # parse an alternation: a|b|c items = [] itemsappend = items.append sourcematch = source.match while 1: itemsappend(_parse(source, state)) if sourcematch("|"): continue if not nested: break if not source.next or sourcematch(")", 0): break else: raise error("pattern not properly closed") if len(items) == 1: return items[0] subpattern = SubPattern(state) subpatternappend = subpattern.append # check if all items share a common prefix while 1: prefix = None for item in items: if not item: break if prefix is None: prefix = item[0] elif item[0] != prefix: break else: # all subitems start with a common "prefix". # move it out of the branch for item in items: del item[0] subpatternappend(prefix) continue # check next one break # check if the branch can be replaced by a character set for item in items: if len(item) != 1 or item[0][0] != LITERAL: break else: # we can store this as a character set instead of a # branch (the compiler may optimize this even more) set = [] setappend = set.append for item in items: setappend(item[0]) subpatternappend((IN, set)) return subpattern subpattern.append((BRANCH, (None, items))) return subpattern def _parse_sub_cond(source, state, condgroup): item_yes = _parse(source, state) if source.match("|"): item_no = _parse(source, state) if source.match("|"): raise error("conditional backref with more than two branches") else: item_no = None if source.next and not source.match(")", 0): raise error("pattern not properly closed") subpattern = SubPattern(state) subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no))) return subpattern _PATTERNENDERS = set("|)") _ASSERTCHARS = set("=!<") _LOOKBEHINDASSERTCHARS = set("=!") _REPEATCODES = set([MIN_REPEAT, MAX_REPEAT]) def _parse(source, state): # parse a simple pattern subpattern = SubPattern(state) # precompute constants into local variables subpatternappend = subpattern.append sourceget = source.get sourcematch = source.match _len = len PATTERNENDERS = _PATTERNENDERS ASSERTCHARS = _ASSERTCHARS LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS REPEATCODES = _REPEATCODES while 1: if source.next in PATTERNENDERS: break # end of subpattern this = sourceget() if this is None: break # end of pattern if state.flags & SRE_FLAG_VERBOSE: # skip whitespace and comments if this in WHITESPACE: continue if this == "#": while 1: this = sourceget() if this in (None, "\n"): break continue if this and this[0] not in SPECIAL_CHARS: subpatternappend((LITERAL, ord(this))) elif this == "[": # character set set = [] setappend = set.append ## if sourcematch(":"): ## pass # handle character classes if sourcematch("^"): setappend((NEGATE, None)) # check remaining characters start = set[:] while 1: this = sourceget() if this == "]" and set != start: break elif this and this[0] == "\\": code1 = _class_escape(source, this) elif this: code1 = LITERAL, ord(this) else: raise error("unexpected end of regular expression") if sourcematch("-"): # potential range this = sourceget() if this == "]": if code1[0] is IN: code1 = code1[1][0] setappend(code1) setappend((LITERAL, ord("-"))) break elif this: if this[0] == "\\": code2 = _class_escape(source, this) else: code2 = LITERAL, ord(this) if code1[0] != LITERAL or code2[0] != LITERAL: raise error("bad character range") lo = code1[1] hi = code2[1] if hi < lo: raise error("bad character range") setappend((RANGE, (lo, hi))) else: raise error("unexpected end of regular expression") else: if code1[0] is IN: code1 = code1[1][0] setappend(code1) # XXX: <fl> should move set optimization to compiler! if _len(set)==1 and set[0][0] is LITERAL: subpatternappend(set[0]) # optimization elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL: subpatternappend((NOT_LITERAL, set[1][1])) # optimization else: # XXX: <fl> should add charmap optimization here subpatternappend((IN, set)) elif this and this[0] in REPEAT_CHARS: # repeat previous item if this == "?": min, max = 0, 1 elif this == "*": min, max = 0, MAXREPEAT elif this == "+": min, max = 1, MAXREPEAT elif this == "{": if source.next == "}": subpatternappend((LITERAL, ord(this))) continue here = source.tell() min, max = 0, MAXREPEAT lo = hi = "" while source.next in DIGITS: lo = lo + source.get() if sourcematch(","): while source.next in DIGITS: hi = hi + sourceget() else: hi = lo if not sourcematch("}"): subpatternappend((LITERAL, ord(this))) source.seek(here) continue if lo: min = int(lo) if min >= MAXREPEAT: raise OverflowError("the repetition number is too large") if hi: max = int(hi) if max >= MAXREPEAT: raise OverflowError("the repetition number is too large") if max < min: raise error("bad repeat interval") else: raise error("not supported") # figure out which item to repeat if subpattern: item = subpattern[-1:] else: item = None if not item or (_len(item) == 1 and item[0][0] == AT): raise error("nothing to repeat") if item[0][0] in REPEATCODES: raise error("multiple repeat") if sourcematch("?"): subpattern[-1] = (MIN_REPEAT, (min, max, item)) else: subpattern[-1] = (MAX_REPEAT, (min, max, item)) elif this == ".": subpatternappend((ANY, None)) elif this == "(": group = 1 name = None condgroup = None if sourcematch("?"): group = 0 # options if sourcematch("P"): # python extensions if sourcematch("<"): # named group: skip forward to end of name name = "" while 1: char = sourceget() if char is None: raise error("unterminated name") if char == ">": break name = name + char group = 1 if not name: raise error("missing group name") if not isname(name): raise error("bad character in group name") elif sourcematch("="): # named backreference name = "" while 1: char = sourceget() if char is None: raise error("unterminated name") if char == ")": break name = name + char if not name: raise error("missing group name") if not isname(name): raise error("bad character in group name") gid = state.groupdict.get(name) if gid is None: raise error("unknown group name") subpatternappend((GROUPREF, gid)) continue else: char = sourceget() if char is None: raise error("unexpected end of pattern") raise error("unknown specifier: ?P%s" % char) elif sourcematch(":"): # non-capturing group group = 2 elif sourcematch("#"): # comment while 1: if source.next is None or source.next == ")": break sourceget() if not sourcematch(")"): raise error("unbalanced parenthesis") continue elif source.next in ASSERTCHARS: # lookahead assertions char = sourceget() dir = 1 if char == "<": if source.next not in LOOKBEHINDASSERTCHARS: raise error("syntax error") dir = -1 # lookbehind char = sourceget() p = _parse_sub(source, state) if not sourcematch(")"): raise error("unbalanced parenthesis") if char == "=": subpatternappend((ASSERT, (dir, p))) else: subpatternappend((ASSERT_NOT, (dir, p))) continue elif sourcematch("("): # conditional backreference group condname = "" while 1: char = sourceget() if char is None: raise error("unterminated name") if char == ")": break condname = condname + char group = 2 if not condname: raise error("missing group name") if isname(condname): condgroup = state.groupdict.get(condname) if condgroup is None: raise error("unknown group name") else: try: condgroup = int(condname) except ValueError: raise error("bad character in group name") else: # flags if not source.next in FLAGS: raise error("unexpected end of pattern") while source.next in FLAGS: state.flags = state.flags | FLAGS[sourceget()] if group: # parse group contents if group == 2: # anonymous group group = None else: group = state.opengroup(name) if condgroup: p = _parse_sub_cond(source, state, condgroup) else: p = _parse_sub(source, state) if not sourcematch(")"): raise error("unbalanced parenthesis") if group is not None: state.closegroup(group) subpatternappend((SUBPATTERN, (group, p))) else: while 1: char = sourceget() if char is None: raise error("unexpected end of pattern") if char == ")": break raise error("unknown extension") elif this == "^": subpatternappend((AT, AT_BEGINNING)) elif this == "$": subpattern.append((AT, AT_END)) elif this and this[0] == "\\": code = _escape(source, this, state) subpatternappend(code) else: raise error("parser error") return subpattern def fix_flags(src, flags): # Check and fix flags according to the type of pattern (str or bytes) if isinstance(src, str): if not flags & SRE_FLAG_ASCII: flags |= SRE_FLAG_UNICODE elif flags & SRE_FLAG_UNICODE: raise ValueError("ASCII and UNICODE flags are incompatible") else: if flags & SRE_FLAG_UNICODE: raise ValueError("can't use UNICODE flag with a bytes pattern") return flags def parse(str, flags=0, pattern=None): # parse 're' pattern into list of (opcode, argument) tuples source = Tokenizer(str) if pattern is None: pattern = Pattern() pattern.flags = flags pattern.str = str p = _parse_sub(source, pattern, 0) p.pattern.flags = fix_flags(str, p.pattern.flags) tail = source.get() if tail == ")": raise error("unbalanced parenthesis") elif tail: raise error("bogus characters at end of regular expression") if flags & SRE_FLAG_DEBUG: p.dump() if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE: # the VERBOSE flag was switched on inside the pattern. to be # on the safe side, we'll parse the whole thing again... return parse(str, p.pattern.flags) return p def parse_template(source, pattern): # parse 're' replacement string into list of literals and # group references s = Tokenizer(source) sget = s.get p = [] a = p.append def literal(literal, p=p, pappend=a): if p and p[-1][0] is LITERAL: p[-1] = LITERAL, p[-1][1] + literal else: pappend((LITERAL, literal)) sep = source[:0] if isinstance(sep, str): makechar = chr else: makechar = chr while 1: this = sget() if this is None: break # end of replacement string if this and this[0] == "\\": # group c = this[1:2] if c == "g": name = "" if s.match("<"): while 1: char = sget() if char is None: raise error("unterminated group name") if char == ">": break name = name + char if not name: raise error("missing group name") try: index = int(name) if index < 0: raise error("negative group number") except ValueError: if not isname(name): raise error("bad character in group name") try: index = pattern.groupindex[name] except KeyError: raise IndexError("unknown group name") a((MARK, index)) elif c == "0": if s.next in OCTDIGITS: this = this + sget() if s.next in OCTDIGITS: this = this + sget() literal(makechar(int(this[1:], 8) & 0xff)) elif c in DIGITS: isoctal = False if s.next in DIGITS: this = this + sget() if (c in OCTDIGITS and this[2] in OCTDIGITS and s.next in OCTDIGITS): this = this + sget() isoctal = True literal(makechar(int(this[1:], 8) & 0xff)) if not isoctal: a((MARK, int(this[1:]))) else: try: this = makechar(ESCAPES[this][1]) except KeyError: pass literal(this) else: literal(this) # convert template to groups and literals lists i = 0 groups = [] groupsappend = groups.append literals = [None] * len(p) if isinstance(source, str): encode = lambda x: x else: # The tokenizer implicitly decodes bytes objects as latin-1, we must # therefore re-encode the final representation. encode = lambda x: x.encode('latin-1') for c, s in p: if c is MARK: groupsappend((i, s)) # literal[i] is already None else: literals[i] = encode(s) i = i + 1 return groups, literals def expand_template(template, match): g = match.group sep = match.string[:0] groups, literals = template literals = literals[:] try: for index, group in groups: literals[index] = s = g(group) if s is None: raise error("unmatched group") except IndexError: raise error("invalid group reference") return sep.join(literals)
home-assistant/home-assistant
refs/heads/dev
tests/components/zwave_js/test_switch.py
6
"""Test the Z-Wave JS switch platform.""" from zwave_js_server.event import Event from homeassistant.components.switch import DOMAIN, SERVICE_TURN_OFF, SERVICE_TURN_ON from homeassistant.const import STATE_OFF, STATE_ON from .common import SWITCH_ENTITY async def test_switch(hass, hank_binary_switch, integration, client): """Test the switch.""" state = hass.states.get(SWITCH_ENTITY) node = hank_binary_switch assert state assert state.state == "off" # Test turning on await hass.services.async_call( "switch", "turn_on", {"entity_id": SWITCH_ENTITY}, blocking=True ) args = client.async_send_command.call_args[0][0] assert args["command"] == "node.set_value" assert args["nodeId"] == 32 assert args["valueId"] == { "commandClassName": "Binary Switch", "commandClass": 37, "endpoint": 0, "property": "targetValue", "propertyName": "targetValue", "metadata": { "type": "boolean", "readable": True, "writeable": True, "label": "Target value", }, "value": False, } assert args["value"] is True # Test state updates from value updated event event = Event( type="value updated", data={ "source": "node", "event": "value updated", "nodeId": 32, "args": { "commandClassName": "Binary Switch", "commandClass": 37, "endpoint": 0, "property": "currentValue", "newValue": True, "prevValue": False, "propertyName": "currentValue", }, }, ) node.receive_event(event) state = hass.states.get(SWITCH_ENTITY) assert state.state == "on" # Test turning off await hass.services.async_call( "switch", "turn_off", {"entity_id": SWITCH_ENTITY}, blocking=True ) args = client.async_send_command.call_args[0][0] assert args["command"] == "node.set_value" assert args["nodeId"] == 32 assert args["valueId"] == { "commandClassName": "Binary Switch", "commandClass": 37, "endpoint": 0, "property": "targetValue", "propertyName": "targetValue", "metadata": { "type": "boolean", "readable": True, "writeable": True, "label": "Target value", }, "value": False, } assert args["value"] is False async def test_barrier_signaling_switch(hass, gdc_zw062, integration, client): """Test barrier signaling state switch.""" node = gdc_zw062 entity = "switch.aeon_labs_garage_door_controller_gen5_signaling_state_visual" state = hass.states.get(entity) assert state assert state.state == "on" # Test turning off await hass.services.async_call( DOMAIN, SERVICE_TURN_OFF, {"entity_id": entity}, blocking=True ) assert len(client.async_send_command.call_args_list) == 1 args = client.async_send_command.call_args[0][0] assert args["command"] == "node.set_value" assert args["nodeId"] == 12 assert args["value"] == 0 assert args["valueId"] == { "ccVersion": 0, "commandClass": 102, "commandClassName": "Barrier Operator", "endpoint": 0, "metadata": { "label": "Signaling State (Visual)", "max": 255, "min": 0, "readable": True, "states": {"0": "Off", "255": "On"}, "type": "number", "writeable": True, }, "property": "signalingState", "propertyKey": 2, "propertyKeyName": "2", "propertyName": "signalingState", "value": 255, } # state change is optimistic and writes state await hass.async_block_till_done() state = hass.states.get(entity) assert state.state == STATE_OFF client.async_send_command.reset_mock() # Test turning on await hass.services.async_call( DOMAIN, SERVICE_TURN_ON, {"entity_id": entity}, blocking=True ) # Note: the valueId's value is still 255 because we never # received an updated value assert len(client.async_send_command.call_args_list) == 1 args = client.async_send_command.call_args[0][0] assert args["command"] == "node.set_value" assert args["nodeId"] == 12 assert args["value"] == 255 assert args["valueId"] == { "ccVersion": 0, "commandClass": 102, "commandClassName": "Barrier Operator", "endpoint": 0, "metadata": { "label": "Signaling State (Visual)", "max": 255, "min": 0, "readable": True, "states": {"0": "Off", "255": "On"}, "type": "number", "writeable": True, }, "property": "signalingState", "propertyKey": 2, "propertyKeyName": "2", "propertyName": "signalingState", "value": 255, } # state change is optimistic and writes state await hass.async_block_till_done() state = hass.states.get(entity) assert state.state == STATE_ON # Received a refresh off event = Event( type="value updated", data={ "source": "node", "event": "value updated", "nodeId": 12, "args": { "commandClassName": "Barrier Operator", "commandClass": 102, "endpoint": 0, "property": "signalingState", "propertyKey": 2, "newValue": 0, "prevValue": 0, "propertyName": "signalingState", "propertyKeyName": "2", }, }, ) node.receive_event(event) state = hass.states.get(entity) assert state.state == STATE_OFF # Received a refresh off event = Event( type="value updated", data={ "source": "node", "event": "value updated", "nodeId": 12, "args": { "commandClassName": "Barrier Operator", "commandClass": 102, "endpoint": 0, "property": "signalingState", "propertyKey": 2, "newValue": 255, "prevValue": 255, "propertyName": "signalingState", "propertyKeyName": "2", }, }, ) node.receive_event(event) state = hass.states.get(entity) assert state.state == STATE_ON
DataDog/integrations-core
refs/heads/master
datadog_checks_dev/datadog_checks/dev/tooling/commands/dep.py
1
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from collections import defaultdict import click from packaging.markers import InvalidMarker, Marker from packaging.specifiers import SpecifierSet from ...fs import read_file_lines, write_file_lines from ..constants import get_agent_requirements from ..dependencies import read_check_dependencies from .console import CONTEXT_SETTINGS, abort, echo_failure, echo_info @click.group(context_settings=CONTEXT_SETTINGS, short_help='Manage dependencies') def dep(): pass @dep.command(context_settings=CONTEXT_SETTINGS, short_help='Pin a dependency for all checks that require it') @click.argument('package') @click.argument('version') @click.option('--marker', '-m', help='Environment marker to use') def pin(package, version, marker): """Pin a dependency for all checks that require it. This can also resolve transient dependencies. Setting the version to `none` will remove the package. You can specify an unlimited number of additional checks to apply the pin for via arguments. """ if marker is not None: try: marker = Marker(marker) except InvalidMarker as e: abort(f'Invalid marker: {e}') dependencies, errors = read_check_dependencies() if errors: for error in errors: echo_failure(error) abort() package = package.lower() if package not in dependencies: abort(f'Unknown package: {package}') files_to_update = defaultdict(list) files_updated = 0 versions = dependencies[package] for dependency_definitions in versions.values(): for dependency_definition in dependency_definitions: files_to_update[dependency_definition.file_path].append(dependency_definition) for file_path, dependency_definitions in sorted(files_to_update.items()): old_lines = read_file_lines(file_path) new_lines = old_lines.copy() for dependency_definition in dependency_definitions: requirement = dependency_definition.requirement if marker != requirement.marker: continue requirement.specifier = SpecifierSet(f'=={version}') new_lines[dependency_definition.line_number] = f'{requirement}\n' if new_lines != old_lines: files_updated += 1 write_file_lines(file_path, new_lines) if not files_updated: abort('No dependency definitions to update') echo_info(f'Files updated: {files_updated}') @dep.command( context_settings=CONTEXT_SETTINGS, short_help="Combine all dependencies for the Agent's static environment" ) def freeze(): """Combine all dependencies for the Agent's static environment.""" dependencies, errors = read_check_dependencies() if errors: for error in errors: echo_failure(error) abort() static_file = get_agent_requirements() echo_info(f'Static file: {static_file}') data = sorted( ( (dependency_definition.name, str(dependency_definition.requirement).lower()) for versions in dependencies.values() for dependency_definitions in versions.values() for dependency_definition in dependency_definitions ), key=lambda d: d[0], ) lines = sorted(set(f'{d[1]}\n' for d in data)) write_file_lines(static_file, lines)
ehashman/oh-mainline
refs/heads/master
vendor/packages/celery/funtests/setup.py
19
#!/usr/bin/env python # -*- coding: utf-8 -*- try: from setuptools import setup from setuptools.command.install import install except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup from setuptools.command.install import install import os import sys sys.path.insert(0, os.getcwd()) sys.path.insert(0, os.path.join(os.getcwd(), os.pardir)) import suite class no_install(install): def run(self, *args, **kwargs): import sys sys.stderr.write(""" ------------------------------------------------------ The Celery functional test suite cannot be installed. ------------------------------------------------------ But you can execute the tests by running the command: $ python setup.py test """) setup( name='celery-funtests', version="DEV", description="Functional test suite for Celery", author="Ask Solem", author_email="[email protected]", url="http://github.com/ask/celery", platforms=["any"], packages=[], data_files=[], zip_safe=False, cmdclass={"install": no_install}, test_suite="nose.collector", tests_require=[ "unittest2>=0.4.0", "simplejson", "nose", "pytyrant", "redis", "pymongo", ], classifiers=[ "Operating System :: OS Independent", "Programming Language :: Python", "License :: OSI Approved :: BSD License", "Intended Audience :: Developers", ], long_description="Do not install this package", )
ancafarcas/superdesk-core
refs/heads/superdesk-master
tests/publish/newsml_1_2_formatter_tests.py
5
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license from unittest import mock from superdesk.utc import utcnow from superdesk.tests import TestCase from superdesk.publish.formatters.newsml_1_2_formatter import NewsML12Formatter from lxml import etree import datetime from superdesk.publish import init_app @mock.patch('superdesk.publish.subscribers.SubscribersService.generate_sequence_number', lambda self, subscriber: 1) class Newsml12FormatterTest(TestCase): article = { '_id': 'urn:localhost.abc', 'guid': 'urn:localhost.abc', 'source': 'AAP', 'anpa_category': [{'qcode': 'a', 'name': 'Australian General News'}], 'headline': 'This is a test headline', 'byline': 'joe', 'slugline': 'slugline', 'subject': [{'qcode': '02011001'}, {'qcode': '02011002'}], 'anpa_take_key': 'take_key', 'unique_id': '1', 'body_html': '<p>The story body</p>', 'type': 'text', 'word_count': '1', 'priority': 1, '_current_version': 5, 'state': 'published', 'urgency': 2, 'pubstatus': 'usable', 'dateline': { 'source': 'AAP', 'text': 'sample dateline', 'located': { 'alt_name': '', 'state': 'California', 'city_code': 'Los Angeles', 'city': 'Los Angeles', 'dateline': 'city', 'country_code': 'US', 'country': 'USA', 'tz': 'America/Los_Angeles', 'state_code': 'CA' } }, 'keywords': ['traffic'], 'abstract': 'sample abstract', 'place': [ {'qcode': 'NSW', 'name': 'NSW', 'state': 'New South Wales', 'country': 'Australia', 'world_region': 'Oceania'} ], 'ednote': 'this is test', 'body_footer': '<p>call helpline 999 if you are planning to quit smoking</p>', 'company_codes': [{'name': 'YANCOAL AUSTRALIA LIMITED', 'qcode': 'YAL', 'security_exchange': 'ASX'}] } preformatted = { '_id': 'urn:localhost.123', 'guid': 'urn:localhost.123', 'source': 'AAP', 'anpa_category': [{'qcode': 'a', 'name': 'Australian General News'}], 'headline': 'This is a test headline', 'byline': 'joe', 'slugline': 'slugline', 'subject': [{'qcode': '02011001'}, {'qcode': '02011002'}], 'anpa_take_key': 'take_key', 'unique_id': '1', 'type': 'preformatted', 'body_html': 'The story body', 'word_count': '1', '_current_version': 5, 'state': 'published', 'urgency': 2, 'pubstatus': 'usable', 'dateline': {'text': 'sample dateline'}, 'keywords': ['traffic'], 'abstract': 'sample abstract', 'place': [ {'qcode': 'Australia', 'name': 'Australia', 'state': '', 'country': 'Australia', 'world_region': 'Oceania'} ] } picture = { '_id': 'tag:localhost:2015:cf15b059-b997-4e34-a103-85b8d7ea4ba3', 'guid': 'tag:localhost:2015:cf15b059-b997-4e34-a103-85b8d7ea4ba3', 'firstcreated': '2015-09-20T06:12:57.000Z', 'versioncreated': '2015-09-20T06:14:11.000Z', 'dateline': { 'source': 'AAP', 'date': '2015-08-14T04:45:53.000Z' }, 'renditions': { 'viewImage': { 'height': 415, 'href': 'http://localhost:5000/api/upload/55cd72811d41c828e1773786/raw?_schema=http', 'media': '55cd72811d41c828e1773786', 'mimetype': 'image/jpeg', 'width': 640 }, 'baseImage': { 'height': 909, 'href': 'http://localhost:5000/api/upload/55cd72811d41c828e1773782/raw?_schema=http', 'media': '55cd72811d41c828e1773782', 'mimetype': 'image/jpeg', 'width': 1400 }, 'thumbnail': { 'height': 120, 'href': 'http://localhost:5000/api/upload/55cd72811d41c828e1773784/raw?_schema=http', 'media': '55cd72811d41c828e1773784', 'mimetype': 'image/jpeg', 'width': 184 }, 'original': { 'height': 2455, 'href': 'http://localhost:5000/api/upload/55cd72801d41c828e1773762/raw?_schema=http', 'media': '55cd72801d41c828e1773762', 'mimetype': 'image/jpeg', 'width': 3777 } }, 'state': 'published', 'anpa_category': [{'qcode': 'a', 'name': 'Australian General News'}], 'source': 'AAP Image', '_current_version': 1, 'original_source': 'AAP Image/AAP', 'description': 'Federal Education Minister Christopher Pyne launches his new book NO ARCHIVING', 'type': 'picture', 'slugline': 'NUS CHRISTOPHER PYNE PROTEST', 'headline': 'NUS CHRISTOPHER PYNE PROTEST', 'pubstatus': 'usable', 'ednote': '', 'byline': 'TRACEY NEARMY', 'filemeta': { 'yresolution': [ 300, 1 ], 'exposuretime': [ 1, 200 ], 'copyright': ' ', 'scenecapturetype': 0, 'sensingmethod': 2, 'fnumber': [ 14, 5 ], 'flashpixversion': '0100', 'xresolution': [ 300, 1 ], 'resolutionunit': 2, 'subsectimedigitized': '20', 'exposureprogram': 1, 'subsectimeoriginal': '20', 'make': 'NIKON CORPORATION', 'focallengthin35mmfilm': 200, 'scenetype': 1, 'exifimageheight': 2455, 'saturation': 0, 'colorspace': 1, 'subjectdistancerange': 0, 'datetime': '2015:07:31 18:55:37', 'software': 'Photogene for iPad v4.3', 'flash': 16, 'focallength': [ 200, 1 ], 'componentsconfiguration': '\u0001\u0002\u0003\u0000', 'lightsource': 3, 'artist': ' ', 'isospeedratings': 2000, 'whitepoint': [ 313, 1000 ], 'sharpness': 2, 'exposuremode': 1, 'meteringmode': 3, 'compressedbitsperpixel': [ 4, 1 ], 'model': 'NIKON D800E', 'subsectime': '20', 'datetimedigitized': '2015:07:31 18:55:37', 'exifoffset': 406, 'contrast': 0, 'whitebalance': 1, 'exifimagewidth': 3777, 'datetimeoriginal': '2015:07:31 18:55:37', 'customrendered': 0, 'maxaperturevalue': [ 3, 1 ], 'digitalzoomratio': [ 1, 1 ], 'primarychromaticities': [ 16, 25 ], 'length': 8009209, 'exifversion': '0230', 'gaincontrol': 2, 'gamma': [ 11, 5 ], 'filesource': 3 }, 'language': 'en', 'mimetype': 'image/jpeg', 'sign_off': 'mar', 'unique_id': 573 } video = { '_id': 'urn:newsml:localhost:2015-09-20T16:12:57.333001:f3856812-0999-4ed8-b69e-68dcdeb1ed2e', 'guid': 'tag:localhost:2015:c11e11c4-cdbc-41ef-b939-2b30dd8365fb', 'language': 'en', 'family_id': 'urn:newsml:localhost:2015-09-20T16:12:57.333001:f3856812-0999-4ed8-b69e-68dcdeb1ed2e', '_current_version': 3, 'versioncreated': '2015-09-20T06:14:11.000Z', 'unique_id': 274, 'renditions': { 'original': { 'media': '55fe4e691d41c8cac923ceb2', 'href': 'http://192.168.220.176:5000/api/upload/55fe4e691d41c8cac923ceb2/raw?_schema=http', 'mimetype': 'video/mp4' } }, 'state': 'in_progress', 'version_creator': '55ee82871d41c86ee1d78c45', 'sign_off': 'ADM', 'media': '55fe4e691d41c8cac923ceb2', 'source': 'AAP', 'original_source': 'AAP Video/AAP', 'pubstatus': 'usable', 'filemeta': { 'mime_type': 'video/mp4', 'last_modification': '1904-01-01T00:00:00+00:00', 'creation_date': '1904-01-01T00:00:00+00:00', 'height': '270', 'width': '480', 'duration': '0:00:10.224000', 'comment': 'User volume: 100.0%', 'length': 877869, 'endian': 'Big endian' }, 'event_id': 'tag:localhost:2015:f3ae4441-4721-4987-8265-88d747b6a550', 'original_creator': '55ee82871d41c86ee1d78c45', 'expiry': '2016-12-21T14:14:11.000Z', 'firstcreated': '2015-09-20T06:12:57.000Z', '_created': '2015-09-20T06:12:57.000Z', 'type': 'video', 'unique_name': '#274', 'mimetype': 'video/mp4', 'version': 2, 'headline': 'test video', 'description': 'test video', 'abstract': 'test video', 'slugline': 'test video keyword', 'byline': 'test video', 'subject': [ { 'qcode': '01001000', 'name': 'archaeology', 'parent': '01000000' } ], 'place': [ { 'qcode': 'ACT', 'name': 'ACT' } ], 'anpa_category': [ { 'qcode': 'a', 'name': 'Australian General News' } ] } vocab = [{'_id': 'rightsinfo', 'items': [{'name': 'AAP', 'copyrightHolder': 'copy right holder', 'copyrightNotice': 'copy right notice', 'usageTerms': 'terms'}, {'name': 'default', 'copyrightHolder': 'default copy right holder', 'copyrightNotice': 'default copy right notice', 'usageTerms': 'default terms'}]}] package = { '_id': 'urn:newsml:localhost:2015-08-12T11:59:58.457029:7e90d257-92f6-406d-9186-95653b211701', 'guid': 'urn:newsml:localhost:2015-08-12T11:59:58.457029:7e90d257-92f6-406d-9186-95653b211701', 'type': 'composite', '_current_version': 1, 'groups': [ { 'role': 'grpRole:NEP', 'id': 'root', 'refs': [ { 'idRef': 'main' } ] }, { 'role': 'grpRole:main', 'id': 'main', 'refs': [ { 'type': 'text', 'renditions': {}, 'itemClass': 'icls:text', 'guid': 'tag:localhost:2015:5838657b-b3ec-4e5a-9b39-36039e16400b', 'residRef': 'tag:localhost:2015:5838657b-b3ec-4e5a-9b39-36039e16400b', 'location': 'archive', 'headline': 'US:US cop sacked over student shooting', 'slugline': 'US Police', '_current_version': 4 } ] } ], 'pubstatus': 'usable', 'state': 'published', 'marked_for_not_publication': False, 'dateline': { 'located': { 'alt_name': '', 'state': 'California', 'city_code': 'Los Angeles', 'city': 'Los Angeles', 'dateline': 'city', 'country_code': 'US', 'country': 'USA', 'tz': 'America/Los_Angeles', 'state_code': 'CA' }, 'date': '2015-08-12T01:59:58.000Z', 'source': 'AAP', 'text': 'Los Angeles, Aug 11 AAP -' }, 'language': 'en', 'headline': 'Cop sacked over student shooting', 'source': 'AAP', 'slugline': 'US Police', 'anpa_category': [ { 'name': 'International News', 'qcode': 'I' } ], 'subject': [ { 'name': 'police', 'parent': '02000000', 'qcode': '02003000' } ] } picture_package = { '_id': 'urn:newsml:localhost:2015-08-13T14:07:59.846466:c659e21b-1ea2-48b7-9b35-e971ae9d1e6e', 'guid': 'urn:newsml:localhost:2015-08-13T14:07:59.846466:c659e21b-1ea2-48b7-9b35-e971ae9d1e6e', 'language': 'en', 'pubstatus': 'usable', 'groups': [ { 'refs': [ { 'idRef': 'main' } ], 'id': 'root', 'role': 'grpRole:NEP' }, { 'refs': [ { 'guid': '20150813001165688150', 'headline': 'Prison Riot', 'residRef': 'tag:localhost:2015:0c12aa0a-82ef-4c58-a363-c5bd8a368037', 'location': 'archive', 'type': 'picture', 'slugline': 'Prison Riot', 'renditions': { 'baseImage': { 'height': 1400, 'mimetype': 'image/jpeg', 'width': 1120, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b650a/raw?_schema=http', 'media': '55cc03731d41c8cea12b650a' }, 'thumbnail': { 'height': 120, 'mimetype': 'image/jpeg', 'width': 96, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b650c/raw?_schema=http', 'media': '55cc03731d41c8cea12b650c' }, 'viewImage': { 'height': 640, 'mimetype': 'image/jpeg', 'width': 512, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b650e/raw?_schema=http', 'media': '55cc03731d41c8cea12b650e' }, 'original': { 'height': 800, 'mimetype': 'image/jpeg', 'width': 640, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b6508/raw?_schema=http', 'media': '55cc03731d41c8cea12b6508' } }, 'itemClass': 'icls:picture', '_current_version': 4 } ], 'id': 'main', 'role': 'grpRole:main' } ], 'type': 'composite', 'state': 'published', 'slugline': 'Prison Riot', 'description': 'This Jan. 21, 2015 photo is of something)', 'source': 'AAP', 'headline': 'Prison Riot', '_current_version': 1, 'dateline': { 'date': '2015-08-13T04:07:59.000Z', 'source': 'AAP' }, 'marked_for_not_publication': False, 'sign_off': 'mar', } picture_text_package = { '_id': 'urn:newsml:localhost:2015-08-13T14:07:59.846466:c659e21b-1ea2-48b7-9b35-e971ae9d1e6e', 'guid': 'urn:newsml:localhost:2015-08-13T14:07:59.846466:c659e21b-1ea2-48b7-9b35-e971ae9d1e6e', 'language': 'en', 'pubstatus': 'usable', 'groups': [ { 'refs': [ { 'idRef': 'main' } ], 'id': 'root', 'role': 'grpRole:NEP' }, { 'refs': [ { 'type': 'text', 'renditions': {}, 'itemClass': 'icls:text', 'guid': 'tag:localhost:2015:5838657b-b3ec-4e5a-9b39-36039e16400b', 'residRef': 'tag:localhost:2015:5838657b-b3ec-4e5a-9b39-36039e16400b', 'location': 'archive', 'headline': 'US:US cop sacked over student shooting', 'slugline': 'US Police', '_current_version': 4 }, { 'guid': '20150813001165688150', 'headline': 'Prison Riot', 'residRef': 'tag:localhost:2015:0c12aa0a-82ef-4c58-a363-c5bd8a368037', 'location': 'archive', 'type': 'picture', 'slugline': 'Prison Riot', 'renditions': { 'baseImage': { 'height': 1400, 'mimetype': 'image/jpeg', 'width': 1120, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b650a/raw?_schema=http', 'media': '55cc03731d41c8cea12b650a' }, 'thumbnail': { 'height': 120, 'mimetype': 'image/jpeg', 'width': 96, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b650c/raw?_schema=http', 'media': '55cc03731d41c8cea12b650c' }, 'viewImage': { 'height': 640, 'mimetype': 'image/jpeg', 'width': 512, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b650e/raw?_schema=http', 'media': '55cc03731d41c8cea12b650e' }, 'original': { 'height': 800, 'mimetype': 'image/jpeg', 'width': 640, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b6508/raw?_schema=http', 'media': '55cc03731d41c8cea12b6508' } }, 'itemClass': 'icls:picture', '_current_version': 4 } ], 'id': 'main', 'role': 'grpRole:main' } ], 'type': 'composite', 'state': 'published', 'slugline': 'Prison Riot', 'description': 'This Jan. 21, 2015 photo is of something)', 'source': 'AAP', 'headline': 'Prison Riot', '_current_version': 1, 'dateline': { 'date': '2015-08-13T04:07:59.000Z', 'source': 'AAP' }, 'marked_for_not_publication': False, 'sign_off': 'mar', } picture_text_package_multi_group = { '_id': 'urn:newsml:localhost:2015-08-13T14:07:59.846466:c659e21b-1ea2-48b7-9b35-e971ae9d1e6e', 'guid': 'urn:newsml:localhost:2015-08-13T14:07:59.846466:c659e21b-1ea2-48b7-9b35-e971ae9d1e6e', 'language': 'en', 'pubstatus': 'usable', 'groups': [ { 'refs': [ {'idRef': 'main'}, {'idRef': 'picture'} ], 'id': 'root', 'role': 'grpRole:NEP' }, { 'refs': [ { 'type': 'text', 'renditions': {}, 'itemClass': 'icls:text', 'guid': 'tag:localhost:2015:5838657b-b3ec-4e5a-9b39-36039e16400b', 'residRef': 'tag:localhost:2015:5838657b-b3ec-4e5a-9b39-36039e16400b', 'location': 'archive', 'headline': 'US:US cop sacked over student shooting', 'slugline': 'US Police', '_current_version': 4 } ], 'id': 'main', 'role': 'grpRole:main' }, { 'refs': [ { 'guid': '20150813001165688150', 'headline': 'Prison Riot', 'residRef': 'tag:localhost:2015:0c12aa0a-82ef-4c58-a363-c5bd8a368037', 'location': 'archive', 'type': 'picture', 'slugline': 'Prison Riot', 'renditions': { 'baseImage': { 'height': 1400, 'mimetype': 'image/jpeg', 'width': 1120, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b650a/raw?_schema=http', 'media': '55cc03731d41c8cea12b650a' }, 'thumbnail': { 'height': 120, 'mimetype': 'image/jpeg', 'width': 96, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b650c/raw?_schema=http', 'media': '55cc03731d41c8cea12b650c' }, 'viewImage': { 'height': 640, 'mimetype': 'image/jpeg', 'width': 512, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b650e/raw?_schema=http', 'media': '55cc03731d41c8cea12b650e' }, 'original': { 'height': 800, 'mimetype': 'image/jpeg', 'width': 640, 'href': 'http://localhost:5000/api/upload/55cc03731d41c8cea12b6508/raw?_schema=http', 'media': '55cc03731d41c8cea12b6508' } }, 'itemClass': 'icls:picture', '_current_version': 4 } ], 'id': 'picture', 'role': 'grpRole:picture' } ], 'type': 'composite', 'state': 'published', 'slugline': 'Prison Riot', 'description': 'This Jan. 21, 2015 photo is of something)', 'source': 'AAP', 'headline': 'Prison Riot', '_current_version': 1, 'dateline': { 'date': '2015-08-13T04:07:59.000Z', 'source': 'AAP' }, 'marked_for_not_publication': False, 'sign_off': 'mar', } packaged_articles = [{'_id': 'tag:localhost:2015:5838657b-b3ec-4e5a-9b39-36039e16400b', 'headline': 'package article headline', 'slugline': 'slugline', '_current_version': 4, 'state': 'published', 'pubStatus': 'usable'}, {'_id': 'tag:localhost:2015:0c12aa0a-82ef-4c58-a363-c5bd8a368037', 'headline': 'package article headline', 'slugline': 'slugline', '_current_version': 4, 'state': 'published', 'pubStatus': 'usable'}] now = datetime.datetime(2015, 6, 13, 11, 45, 19, 0) def setUp(self): self.article['state'] = 'published' self._setup_dates([self.article, self.video, self.picture, self.package, self.picture_package, self.preformatted, self.picture_text_package, self.picture_text_package_multi_group]) self.newsml = etree.Element("NewsML") self.formatter = NewsML12Formatter() self.formatter.now = self.now self.formatter.string_now = self.now.strftime('%Y%m%dT%H%M%S+0000') with self.app.app_context(): init_app(self.app) self.app.data.insert('vocabularies', self.vocab) def _setup_dates(self, item_list): for item in item_list: item['firstcreated'] = self.now item['versioncreated'] = self.now def test_format_news_envelope(self): self.formatter._format_news_envelope(self.article, self.newsml, 7) self.assertEqual(self.newsml.find('TransmissionId').text, '7') self.assertEqual(self.newsml.find('DateAndTime').text, '20150613T114519+0000') self.assertEqual(self.newsml.find('Priority').get('FormalName'), '1') newsml = etree.Element("NewsML") self.formatter._format_news_envelope(self.preformatted, newsml, 7) self.assertEqual(newsml.find('Priority').get('FormalName'), '5') def test_format_identification(self): self.formatter._format_identification(self.article, self.newsml) self.assertEqual(self.newsml.find('Identification/NewsIdentifier/ProviderId').text, 'sourcefabric.org') self.assertEqual(self.newsml.find('Identification/NewsIdentifier/DateId').text, '20150613') self.assertEqual(self.newsml.find('Identification/NewsIdentifier/NewsItemId').text, 'urn:localhost.abc') self.assertEqual(self.newsml.find('Identification/NewsIdentifier/RevisionId').get('PreviousRevision'), '0') self.assertEqual(self.newsml.find('Identification/NewsIdentifier/PublicIdentifier').text, 'urn:localhost.abc:5N') self.assertEqual(self.newsml.find('Identification/DateLabel').text, 'Saturday 13 June 2015') def test_format_identification_for_corrections(self): self.article['state'] = 'corrected' self.article['_current_version'] = 7 self.formatter._format_identification(self.article, self.newsml) self.assertEqual(self.newsml.find('Identification/NewsIdentifier/RevisionId').get('PreviousRevision'), '6') self.assertEqual(self.newsml.find('Identification/NewsIdentifier/RevisionId').get('Update'), 'N') self.article['state'] = 'killed' self.formatter._format_identification(self.article, self.newsml) self.assertEqual(self.newsml.find('Identification/NewsIdentifier/RevisionId').get('PreviousRevision'), '6') self.assertEqual(self.newsml.find('Identification/NewsIdentifier/RevisionId').get('Update'), 'N') def test_format_news_management(self): self.formatter._format_news_management(self.article, self.newsml) self.assertEqual(self.newsml.find('NewsManagement/NewsItemType').get('FormalName'), 'News') self.assertEqual(self.newsml.find('NewsManagement/FirstCreated').text, '20150613T114519+0000') self.assertEqual(self.newsml.find('NewsManagement/ThisRevisionCreated').text, '20150613T114519+0000') self.assertEqual(self.newsml.find('NewsManagement/Status').get('FormalName'), 'usable') self.assertEqual(self.newsml.find('NewsManagement/Urgency').get('FormalName'), '2') self.assertEqual(self.newsml.find('NewsManagement/Instruction').get('FormalName'), 'Update') def test_format_news_management_for_corrections(self): self.article['state'] = 'corrected' self.formatter._format_news_management(self.article, self.newsml) self.assertEqual(self.newsml.find('NewsManagement/Instruction').get('FormalName'), 'Correction') def test_format_news_component(self): self.formatter._format_news_component(self.article, self.newsml) self.assertEqual(self.newsml.find('NewsComponent/NewsComponent/Role'). get('FormalName'), 'Main') self.assertEqual(self.newsml.find('NewsComponent/NewsComponent/NewsLines/HeadLine'). text, 'This is a test headline') self.assertEqual(self.newsml.find('NewsComponent/NewsComponent/NewsLines/ByLine'). text, 'joe') self.assertEqual(self.newsml.find('NewsComponent/NewsComponent/NewsLines/DateLine'). text, 'sample dateline') self.assertEqual(self.newsml.find('NewsComponent/NewsComponent/NewsLines/CreditLine'). text, 'AAP') self.assertEqual(self.newsml.find('NewsComponent/NewsComponent/NewsLines/KeywordLine'). text, 'slugline') self.assertEqual( self.newsml.findall( 'NewsComponent/NewsComponent/DescriptiveMetadata/SubjectCode/Subject')[0].get('FormalName'), '02011001') self.assertEqual( self.newsml.findall( 'NewsComponent/NewsComponent/DescriptiveMetadata/SubjectCode/Subject')[1].get('FormalName'), '02011002') self.assertEqual(self.newsml.find( 'NewsComponent/NewsComponent/DescriptiveMetadata/Property').get('Value'), 'a') self.assertEqual( self.newsml.findall( 'NewsComponent/NewsComponent/NewsComponent/ContentItem/DataContent')[0].text, 'sample abstract') self.assertEqual(self.newsml.findall( 'NewsComponent/NewsComponent/NewsComponent/ContentItem/DataContent/nitf/body/body.content/p')[0].text, 'The story body') self.assertEqual( self.newsml.findall( 'NewsComponent/NewsComponent/NewsComponent/ContentItem/DataContent/nitf/body/body.content/p')[1].text, 'call helpline 999 if you are planning to quit smoking') self.assertEqual(self.newsml.find('.//NewsLines/NewsLine/NewsLineText').text, 'this is test') company_info = self.newsml.find('NewsComponent/NewsComponent/Metadata/Property[@FormalName="Ticker Symbol"]') self.assertEqual(company_info.attrib['Value'], 'YAL') company_info = self.newsml.find('NewsComponent/NewsComponent/Metadata/Property[@FormalName="Exchange"]') self.assertEqual(company_info.attrib['Value'], 'ASX') def test_format_news_management_for_embargo(self): embargo_ts = (utcnow() + datetime.timedelta(days=2)) doc = self.article.copy() doc['embargo'] = embargo_ts self.formatter._format_news_management(doc, self.newsml) self.assertEqual(self.newsml.find('NewsManagement/NewsItemType').get('FormalName'), 'News') self.assertEqual(self.newsml.find('NewsManagement/FirstCreated').text, '20150613T114519+0000') self.assertEqual(self.newsml.find('NewsManagement/ThisRevisionCreated').text, '20150613T114519+0000') self.assertEqual(self.newsml.find('NewsManagement/Urgency').get('FormalName'), '2') self.assertEqual(self.newsml.find('NewsManagement/Instruction').get('FormalName'), 'Update') self.assertEqual(self.newsml.find('NewsManagement/Status').get('FormalName'), 'Embargoed') self.assertEqual(self.newsml.find('NewsManagement/StatusWillChange/FutureStatus').get('FormalName'), 'usable') self.assertEqual(self.newsml.find('NewsManagement/StatusWillChange/DateAndTime').text, embargo_ts.isoformat()) def test_format_place(self): doc = self.article.copy() self.formatter._format_place(doc, self.newsml) self.assertEqual(self.newsml.find( 'Location/Property[@FormalName="CountryArea"]').get('Value'), "New South Wales") self.assertEqual(self.newsml.find('Location/Property[@FormalName="Country"]').get('Value'), "Australia") self.assertEqual(self.newsml.find('Location/Property[@FormalName="WorldRegion"]').get('Value'), "Oceania") def test_format_dateline(self): doc = self.article.copy() self.formatter._format_dateline(doc, self.newsml) self.assertEqual(self.newsml.find('Location/Property[@FormalName="City"]').get('Value'), "Los Angeles") self.assertEqual(self.newsml.find('Location/Property[@FormalName="CountryArea"]').get('Value'), "California") self.assertEqual(self.newsml.find('Location/Property[@FormalName="Country"]').get('Value'), "USA") def test_duration(self): self.assertEqual(self.formatter._get_total_duration(None), 0) self.assertEqual(self.formatter._get_total_duration('dsf'), 0) self.assertEqual(self.formatter._get_total_duration('0:1:0.0000'), 60) self.assertEqual(self.formatter._get_total_duration('0:1:10.0000'), 70) self.assertEqual(self.formatter._get_total_duration('1:1:10.0000'), 3670) def test_format_picture(self): doc = self.picture.copy() seq, xml_str = self.formatter.format(doc, {'name': 'Test Subscriber'})[0] xml = etree.fromstring(xml_str) self.assertEqual(xml.find('NewsItem/NewsComponent/NewsComponent/NewsLines/HeadLine').text, 'NUS CHRISTOPHER PYNE PROTEST') self.assertEqual(xml.find('NewsItem/NewsComponent/NewsComponent/NewsLines/ByLine').text, 'TRACEY NEARMY') self.assertEqual(xml.find('NewsItem/NewsComponent/NewsComponent/NewsLines/CreditLine').text, 'AAP Image/AAP') self.assertEqual(xml.find('NewsItem/NewsComponent/NewsComponent/NewsLines/KeywordLine').text, 'NUS CHRISTOPHER PYNE PROTEST') self.assertEqual(xml.find(('NewsItem/NewsComponent/NewsComponent/DescriptiveMetadata/' 'Property[@FormalName="Category"]')).get('Value'), 'a') for rendition, value in doc.get('renditions').items(): xpath = './/Role[@FormalName="{}"]/../ContentItem'.format(rendition) content_item = xml.find(xpath) self.assertEqual(content_item.get('Href'), value.get('href')) self.assertEqual(content_item.find('MediaType').get('FormalName'), 'Photo') self.assertEqual(content_item.find('Format').get('FormalName'), value.get('mimetype')) self.assertEqual(content_item.find('Characteristics/Property[@FormalName="Width"]').get('Value'), str(value.get('width'))) self.assertEqual(content_item.find('Characteristics/Property[@FormalName="Height"]').get('Value'), str(value.get('height'))) def test_format_video(self): doc = self.video.copy() seq, xml_str = self.formatter.format(doc, {'name': 'Test Subscriber'})[0] xml = etree.fromstring(xml_str) self.assertEqual(xml.find('NewsItem/NewsComponent/NewsComponent/NewsLines/HeadLine').text, 'test video') self.assertEqual(xml.find('NewsItem/NewsComponent/NewsComponent/NewsLines/ByLine').text, 'test video') self.assertEqual(xml.find('NewsItem/NewsComponent/NewsComponent/NewsLines/CreditLine').text, 'AAP Video/AAP') self.assertEqual(xml.find('NewsItem/NewsComponent/NewsComponent/NewsLines/KeywordLine').text, 'test video keyword') for rendition, value in doc.get('renditions').items(): xpath = './/Role[@FormalName="{}"]/../ContentItem'.format(rendition) content_item = xml.find(xpath) self.assertEqual(content_item.get('Href'), value.get('href')) self.assertEqual(content_item.find('MediaType').get('FormalName'), 'Video') self.assertEqual(content_item.find('Format').get('FormalName'), value.get('mimetype')) self.assertEqual(content_item.find('Characteristics/Property[@FormalName="Width"]').get('Value'), str(doc.get('filemeta', {}).get('width'))) self.assertEqual(content_item.find('Characteristics/Property[@FormalName="Height"]').get('Value'), str(doc.get('filemeta', {}).get('height'))) self.assertEqual(content_item.find('Characteristics/Property[@FormalName="TotalDuration"]').get('Value'), '10') def test_format_package(self): doc = self.package.copy() seq, xml_str = self.formatter.format(doc, {'name': 'Test Subscriber'})[0] xml = etree.fromstring(xml_str) self.assertEqual(xml.find('.//Role[@FormalName="root"]/../NewsComponent/Role').get('FormalName'), 'grpRole:main') self.assertEqual(xml.find('.//Role[@FormalName="root"]/../NewsComponent/NewsItemRef').get('NewsItem'), 'tag:localhost:2015:5838657b-b3ec-4e5a-9b39-36039e16400b:4N') def test_format_picture_package(self): doc = self.picture_package.copy() seq, xml_str = self.formatter.format(doc, {'name': 'Test Subscriber'})[0] xml = etree.fromstring(xml_str) self.assertEqual(xml.find('.//Role[@FormalName="root"]/../NewsComponent/Role').get('FormalName'), 'grpRole:main') self.assertEqual(xml.find('.//Role[@FormalName="root"]/../NewsComponent/NewsItemRef').get('NewsItem'), 'tag:localhost:2015:0c12aa0a-82ef-4c58-a363-c5bd8a368037:4N') def test_format_picture_text_package(self): doc = self.picture_text_package.copy() seq, xml_str = self.formatter.format(doc, {'name': 'Test Subscriber'})[0] xml = etree.fromstring(xml_str) news_component = xml.find('.//Role[@FormalName="root"]/../NewsComponent') self.assertEqual(news_component.find('Role').get('FormalName'), 'grpRole:main') news_item_refs = news_component.findall('NewsItemRef') self.assertEqual(news_item_refs[0].get('NewsItem'), 'tag:localhost:2015:5838657b-b3ec-4e5a-9b39-36039e16400b:4N') self.assertEqual(news_item_refs[1].get('NewsItem'), 'tag:localhost:2015:0c12aa0a-82ef-4c58-a363-c5bd8a368037:4N')
rbharath/deepchem
refs/heads/master
deepchem/data/tests/test_data_loader.py
2
""" Tests for FeaturizedSamples class """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals __author__ = "Bharath Ramsundar" __copyright__ = "Copyright 2016, Stanford University" __license__ = "MIT" import os import unittest import tempfile import shutil import deepchem as dc class TestDataLoader(unittest.TestCase): """ Test DataLoader """ def setUp(self): super(TestDataLoader, self).setUp() self.current_dir = os.path.dirname(os.path.abspath(__file__)) def unlabelled_test(self): input_file = os.path.join(self.current_dir, "../../data/tests/no_labels.csv") featurizer = dc.feat.CircularFingerprint(size=1024) loader = dc.data.CSVLoader( tasks=[], smiles_field="smiles", featurizer=featurizer) loader.featurize(input_file) def scaffold_test_train_valid_test_split(self): """Test of singletask RF ECFP regression API.""" splittype = "scaffold" input_transforms = [] output_transforms = ["normalize"] model_params = {} tasks = ["log-solubility"] task_type = "regression" task_types = {task: task_type for task in tasks} input_file = os.path.join(self.current_dir, "../../models/tests/example.csv") featurizer = dc.feat.CircularFingerprint(size=1024) input_file = os.path.join(self.current_dir, input_file) loader = dc.data.CSVLoader( tasks=tasks, smiles_field="smiles", featurizer=featurizer) dataset = loader.featurize(input_file) # Splits featurized samples into train/test splitter = dc.splits.ScaffoldSplitter() train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( dataset) assert len(train_dataset) == 8 assert len(valid_dataset) == 1 assert len(test_dataset) == 1 def scaffold_test_train_test_split(self): """Test of singletask RF ECFP regression API.""" splittype = "scaffold" input_transforms = [] output_transforms = ["normalize"] model_params = {} tasks = ["log-solubility"] task_type = "regression" task_types = {task: task_type for task in tasks} input_file = os.path.join(self.current_dir, "../../models/tests/example.csv") featurizer = dc.feat.CircularFingerprint(size=1024) input_file = os.path.join(self.current_dir, input_file) loader = dc.data.CSVLoader( tasks=tasks, smiles_field="smiles", featurizer=featurizer) dataset = loader.featurize(input_file) # Splits featurized samples into train/test splitter = dc.splits.ScaffoldSplitter() train_dataset, test_dataset = splitter.train_test_split(dataset) assert len(train_dataset) == 8 assert len(test_dataset) == 2 def random_test_train_valid_test_split(self): """Test of singletask RF ECFP regression API.""" input_transforms = [] output_transforms = ["normalize"] model_params = {} tasks = ["log-solubility"] task_type = "regression" task_types = {task: task_type for task in tasks} input_file = os.path.join(self.current_dir, "../../models/tests/example.csv") featurizer = dc.feat.CircularFingerprint(size=1024) input_file = os.path.join(self.current_dir, input_file) loader = dc.data.CSVLoader( tasks=tasks, smiles_field="smiles", featurizer=featurizer) dataset = loader.featurize(input_file) # Splits featurized samples into train/test splitter = dc.splits.RandomSplitter() train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( dataset) assert len(train_dataset) == 8 assert len(valid_dataset) == 1 assert len(test_dataset) == 1 def random_test_train_test_split(self): """Test of singletask RF ECFP regression API.""" #splittype = "random" model_params = {} tasks = ["log-solubility"] task_type = "regression" task_types = {task: task_type for task in tasks} input_file = os.path.join(self.current_dir, "../../models/tests/example.csv") featurizer = dc.feat.CircularFingerprint(size=1024) loader = dc.data.CSVLoader( tasks=tasks, smiles_field="smiles", featurizer=featurizer) dataset = loader.featurize(input_file) # Splits featurized samples into train/test splitter = dc.splits.RandomSplitter() train_dataset, test_dataset = splitter.train_test_split(dataset) assert len(train_dataset) == 8 assert len(test_dataset) == 2 def test_log_solubility_dataset(self): """Test of loading for simple log-solubility dataset.""" current_dir = os.path.dirname(os.path.realpath(__file__)) input_file = "../../models/tests/example.csv" input_file = os.path.join(current_dir, input_file) tasks = ["log-solubility"] smiles_field = "smiles" loader = dc.data.CSVLoader( tasks=tasks, smiles_field="smiles", featurizer=dc.feat.CircularFingerprint(size=1024)) dataset = loader.featurize(input_file) assert len(dataset) == 10 def test_dataset_move(self): """Test that dataset can be moved and reloaded.""" base_dir = tempfile.mkdtemp() data_dir = os.path.join(base_dir, "data") moved_data_dir = os.path.join(base_dir, "moved_data") dataset_file = os.path.join(self.current_dir, "../../models/tests/example.csv") featurizer = dc.feat.CircularFingerprint(size=1024) tasks = ["log-solubility"] loader = dc.data.CSVLoader( tasks=tasks, smiles_field="smiles", featurizer=featurizer) featurized_dataset = loader.featurize(dataset_file, data_dir) n_dataset = len(featurized_dataset) # Now perform move shutil.move(data_dir, moved_data_dir) moved_featurized_dataset = dc.data.DiskDataset(moved_data_dir) assert len(moved_featurized_dataset) == n_dataset
tensorflow/tensorflow
refs/heads/master
tensorflow/python/tpu/tensor_tracer_flags.py
5
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ======================================================================== """Utilities to handle tensor tracer parameters.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import os.path import re from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import tf_logging as logging TRACE_MODE_PART_TENSOR = 'part-tensor' TRACE_MODE_FULL_TENSOR = 'full-tensor' TRACE_MODE_FULL_TENSOR_SUMMARY = 'full_tensor_summary' TRACE_MODE_NAN_INF = 'nan-inf' TRACE_MODE_NORM = 'norm' TRACE_MODE_MAX_ABS = 'max-abs' TRACE_MODE_SUMMARY = 'summary' # summary mode to collects a finite set of signatures for each traced tensor, # (such as norm, max, min, mean) and dumps it using tb summaries. # Full tensor mode dumps the whole tensor values for the traced tensors without # any processing on them; using tb summaries. _SUBMODE_BRIEF = 'brief' _SUBMODE_DETAILED = 'detailed' _FLAG_SINGLE_QUOTE_PAT = re.compile(r"\s*--([^=]+)='([^']*)'") _FLAG_DOUBLE_QUOTE_PAT = re.compile(r'\s*--([^=]+)="([^"]*)"') _FLAG_NO_QUOTE_PAT = re.compile(r'\s*--([^=]+)=(\S*)') _FLAG_NO_EQUAL_PAT = re.compile(r'\s*--([^=]+)\s*') FLAGS_ENV_VAR = 'TENSOR_TRACER_FLAGS' FLAG_NAME_ENABLE = 'enable' FLAG_NAME_TRACE_MODE = 'trace_mode' FLAG_NAME_TRACE_SCALAR_OPS = 'trace_scalar' FLAG_NAME_SUBMODE = 'submode' FLAG_NAME_EXCLUDED_OPNAMES = 'excluded_opnames' FLAG_NAME_EXCLUDED_OPTYPES = 'excluded_optypes' FLAG_NAME_INCLUDED_OPNAMES = 'included_opnames' FLAG_NAME_INCLUDED_OPTYPES = 'included_optypes' FLAG_NAME_TRACE_LEVEL = 'trace_level' FLAG_NAME_TRACE_DIR = 'trace_dir' FLAG_NAME_REPORT_FILE = 'report_file' FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR = 'use_test_undeclared_outputs_dir' FLAG_NAME_OP_RANGE = 'op_range' # Folder to dump the pre (before tensor tracer updates) and post graphs (after # tensor tracer updates). FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS = 'dump_graphs' FLAG_NAME_SUMMARY_SIGNATURES = 'signatures' FLAG_NAME_SUMMARY_PER_CORE = 'collect_summary_per_core' FLAG_NAME_TEMP_CACHE_VAR = 'use_temp_cache' FLAG_NAME_INSPECT_TRACE = 'inspect_trace' FLAG_NAME_FINGERPRINT_DIR = 'use_fingerprint_subdirectory' FLAG_FLUSH_SUMMARY = 'flush_summaries' # Flag used in v2 only. FLAG_SUMMARY_MODE_TYPE = 'summary_mode' UI_MODE = 'ui' TEXT_MODE = 'text' _OP_RANGE_PAT = re.compile(r'(\d+):(\d+)') _TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR = 'TEST_UNDECLARED_OUTPUTS_DIR' _TT_DEFAULT_TRACE_LEVEL = 3 _TT_PREFIX = 'tensor_tracer' _TT_NORM = 'norm' _TT_MAX = 'max' _TT_MAX_ABS = 'max-abs' _TT_MIN = 'min' _TT_MEAN = 'mean' _TT_VAR = 'var' _TT_SIZE = 'size' TT_SUMMARY_NORM = '%s_%s' % (_TT_PREFIX, _TT_NORM) TT_SUMMARY_MAX = '%s_%s' % (_TT_PREFIX, _TT_MAX) TT_SUMMARY_MAX_ABS = '%s_%s' % (_TT_PREFIX, _TT_MAX_ABS) TT_SUMMARY_MIN = '%s_%s' % (_TT_PREFIX, _TT_MIN) TT_SUMMARY_MEAN = '%s_%s' % (_TT_PREFIX, _TT_MEAN) TT_SUMMARY_VAR = '%s_%s' % (_TT_PREFIX, _TT_VAR) TT_SUMMARY_SIZE = '%s_%s' % (_TT_PREFIX, _TT_SIZE) TT_SUMMARY_SIGNATURES = (TT_SUMMARY_NORM, TT_SUMMARY_MAX, TT_SUMMARY_MIN, TT_SUMMARY_MEAN, TT_SUMMARY_VAR, TT_SUMMARY_SIZE, TT_SUMMARY_MAX_ABS) class TTParameters(object): """A class that handles the parameters of Tensor Tracer.""" def __init__(self, env=None): if env: self._env = env else: self._env = os.environ self._validate_flag_names() self.trace_mode = self._get_trace_mode() self.submode = self._get_submode() self.trace_dir = self._get_trace_dir() self.report_file_path = self._get_report_filepath() self.op_range = self._get_op_range() self.excluded_opname_re_list = self._flag_value_to_re_list( FLAG_NAME_EXCLUDED_OPNAMES) self.excluded_optype_re_list = self._flag_value_to_re_list( FLAG_NAME_EXCLUDED_OPTYPES) self.included_opname_re_list = self._flag_value_to_re_list( FLAG_NAME_INCLUDED_OPNAMES) self.included_optype_re_list = self._flag_value_to_re_list( FLAG_NAME_INCLUDED_OPTYPES) self.trace_scalar_ops = self.is_flag_on(FLAG_NAME_TRACE_SCALAR_OPS) self.use_compact_trace = self.trace_mode in (TRACE_MODE_NAN_INF, TRACE_MODE_NORM, TRACE_MODE_MAX_ABS, TRACE_MODE_SUMMARY) self.use_temp_cache_var = self.is_flag_on(FLAG_NAME_TEMP_CACHE_VAR) self.inspect_trace = self.is_flag_on(FLAG_NAME_INSPECT_TRACE) self.use_fingerprint_subdir = self.is_flag_on(FLAG_NAME_FINGERPRINT_DIR) _, self.graph_dump_path = self.get_flag_value( FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS) self.trace_level = self._get_flag_int_value(FLAG_NAME_TRACE_LEVEL, _TT_DEFAULT_TRACE_LEVEL) self.summary_signatures = self._get_summary_signatures() self.collect_summary_per_core = self.is_flag_on(FLAG_NAME_SUMMARY_PER_CORE) self.flush_summaries_with_outside_compile = self.is_flag_on( FLAG_FLUSH_SUMMARY) self.summary_mode = self._get_summary_mode() self._check_flag_errors() def _check_flag_errors(self): if self.trace_mode in (TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY): if not self.trace_dir: raise ValueError('trace_dir must be explicitly provided in ' 'TENSOR_TRACER_FLAGS when summary mode is used.') def _get_report_filepath(self): """Sets the path of the output report file.""" found, report_file_path = self.get_flag_value(FLAG_NAME_REPORT_FILE) if found and report_file_path and self.use_test_undeclared_outputs_dir(): if os.path.isabs(report_file_path): raise ValueError('If use_test_undeclared_outputs_dir is set,' 'report_file_path cannot be an absolute path (%s)' %report_file_path) outputs_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR) report_file_path = os.path.join(outputs_dir, report_file_path) return report_file_path def _get_op_range(self): """Sets the index range of the Ops that we will consider tracing.""" found, op_range = self.get_flag_value(FLAG_NAME_OP_RANGE) if not found or not op_range: op_range = (-1, -1) # this means including all ops. return op_range match = _OP_RANGE_PAT.match(op_range) if not match: op_range = (-1, -1) # this means including all ops. return op_range op_range = (int(match.group(1)), int(match.group(2))) return op_range def _get_trace_dir(self): found, trace_dir = self.get_flag_value(FLAG_NAME_TRACE_DIR) if found and trace_dir and self.use_test_undeclared_outputs_dir(): raise ValueError( 'Cannot not use --%s and --%s at the same time' % (FLAG_NAME_TRACE_DIR, FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)) if self.use_test_undeclared_outputs_dir(): trace_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR) return trace_dir def _get_trace_mode(self): """Checks if the given trace mode is valid.""" found, trace_mode = self.get_flag_value(FLAG_NAME_TRACE_MODE) if not found or not trace_mode: trace_mode = TRACE_MODE_NORM valid_trace_modes = [ TRACE_MODE_NAN_INF, TRACE_MODE_PART_TENSOR, TRACE_MODE_FULL_TENSOR, TRACE_MODE_NORM, TRACE_MODE_MAX_ABS, TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY ] if trace_mode not in valid_trace_modes: raise ValueError('Invalid trace mode "%s" given to the Tensor_Tracer.' 'Valid trace modes are: %s'%(trace_mode, valid_trace_modes)) return trace_mode def is_brief_mode(self): return self.submode == _SUBMODE_BRIEF def _get_submode(self): """Checks if the given submode is valid.""" found, submode = self.get_flag_value(FLAG_NAME_SUBMODE) if not found or not submode: submode = _SUBMODE_DETAILED if not submode: return valid_submodes = [_SUBMODE_DETAILED, _SUBMODE_BRIEF] if submode not in valid_submodes: raise ValueError('Invalid submode "%s" given to the Tensor_Tracer.' 'Valid submodes are: %s'%(submode, valid_submodes)) return submode @staticmethod def match_next_flag(flags, pos): """Returns the match for the next TensorTracer flag. Args: flags: a string that contains the flags. pos: where in flags to start the search. Returns: A pair where the first element is the regular-expression match found and the second element indicates if the match has a value. """ match = _FLAG_DOUBLE_QUOTE_PAT.match(flags, pos) if match: return match, True match = _FLAG_SINGLE_QUOTE_PAT.match(flags, pos) if match: return match, True match = _FLAG_NO_QUOTE_PAT.match(flags, pos) if match: return match, True match = _FLAG_NO_EQUAL_PAT.match(flags, pos) if match: # The flag is found but is not given a value. return match, False # The flag is not found. return None, False def _validate_flag_names(self): """Validates if the TensorTrace flags passed are valid.""" valid_flag_names = [ FLAG_NAME_ENABLE, FLAG_NAME_TRACE_MODE, FLAG_NAME_TRACE_SCALAR_OPS, FLAG_NAME_SUBMODE, FLAG_NAME_EXCLUDED_OPNAMES, FLAG_NAME_EXCLUDED_OPTYPES, FLAG_NAME_INCLUDED_OPNAMES, FLAG_NAME_INCLUDED_OPTYPES, FLAG_NAME_TRACE_DIR, FLAG_NAME_REPORT_FILE, FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR, FLAG_NAME_OP_RANGE, FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS, FLAG_NAME_TRACE_LEVEL, FLAG_NAME_SUMMARY_SIGNATURES, FLAG_NAME_SUMMARY_PER_CORE, FLAG_NAME_TEMP_CACHE_VAR, FLAG_NAME_FINGERPRINT_DIR, FLAG_NAME_INSPECT_TRACE, FLAG_FLUSH_SUMMARY, FLAG_SUMMARY_MODE_TYPE ] tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR) if not tensor_tracer_flags: return pos = 0 while True: match, _ = TTParameters.match_next_flag(tensor_tracer_flags, pos) if not match: break flag_name = match.group(1) if flag_name not in valid_flag_names: raise ValueError( 'The flag name "%s" passed via the environment variable "%s" ' 'is invalid. Valid flag names are:' '\n%s' % (flag_name, FLAGS_ENV_VAR, valid_flag_names)) pos = match.end() def _supported_signatures(self): """Returns a tuple of supported signatures.""" return TT_SUMMARY_SIGNATURES def _get_summary_signatures(self): """Verifies and returns the summary signatures. Returns: A dictionary of the signature identifiers {signature: index} that will be computed when trace_mode is summary. """ signatures = self._flag_value_as_list(FLAG_NAME_SUMMARY_SIGNATURES) supported_signatures = self._supported_signatures() tt_signatures = [] for signature in signatures: signature_with_prefix = '%s_%s' % (_TT_PREFIX, signature) if signature in supported_signatures: tt_signatures.append(signature) elif signature_with_prefix in supported_signatures: tt_signatures.append(signature_with_prefix) else: logging.warning('Unknown signature:%s. Supported signatures: %s' % (signature, supported_signatures)) if not tt_signatures: # Default case collects norm and max only. return {TT_SUMMARY_MAX_ABS: 0, TT_SUMMARY_NORM: 1} else: return {signature: idx for idx, signature in enumerate(tt_signatures)} def get_signature_to_agg_fn_map(self): """Returns a map that contains the aggregate function for each signature.""" return {TRACE_MODE_NORM: linalg_ops.norm, TRACE_MODE_MAX_ABS: math_ops.reduce_max, TRACE_MODE_NAN_INF: math_ops.reduce_max, TT_SUMMARY_NORM: linalg_ops.norm, TT_SUMMARY_MAX: math_ops.reduce_max, TT_SUMMARY_MAX_ABS: lambda t, axis=0: math_ops.reduce_max(math_ops.abs(t), # pylint: disable=g-long-lambda axis=axis), TT_SUMMARY_MIN: math_ops.reduce_min, TT_SUMMARY_MEAN: math_ops.reduce_mean, TT_SUMMARY_VAR: math_ops.reduce_max, # Simply reduce max variance. TT_SUMMARY_SIZE: math_ops.reduce_sum} def _flag_value_as_list(self, wanted_flag_name): """Returns the string list of a TensorTracer flag. Args: wanted_flag_name: the name of the flag we are looking for. Returns: The list value of the flag. """ string_value_list = [] found, flag_value = self.get_flag_value(wanted_flag_name) if found: string_value_list = flag_value.split(',') return string_value_list def _flag_value_as_int_list(self, wanted_flag_name): """Returns the integer list of a TensorTracer flag. Args: wanted_flag_name: the name of the flag we are looking for. Returns: the value of the flag. Raises: RuntimeError: If supposedly deadcode is reached. """ int_list = [] found, flag_value = self.get_flag_value(wanted_flag_name) if found and flag_value: try: integer_values = flag_value.split(',') int_list = [int(int_val) for int_val in integer_values] except ValueError: logging.warning('Cannot convert %s to int for flag %s', int_list, wanted_flag_name) return int_list def _get_flag_int_value(self, wanted_flag_name, default_value): """Returns the int value of a TensorTracer flag. Args: wanted_flag_name: the name of the flag we are looking for. default_value: the default value for the flag, if not provided. Returns: the value of the flag. Raises: RuntimeError: If supposedly deadcode is reached. """ flag_int_value = default_value found, flag_value = self.get_flag_value(wanted_flag_name) if found: try: flag_int_value = int(flag_value) except ValueError: logging.warning('Cannot convert %s to int for flag %s' % ( flag_int_value, wanted_flag_name)) return flag_int_value def get_flag_value(self, wanted_flag_name): """Returns the value of a TensorTracer flags. Args: wanted_flag_name: the name of the flag we are looking for. Returns: A pair where the first element indicates if the flag is found and the second element is the value of the flag. Raises: RuntimeError: If supposedly deadcode is reached. """ tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR) if not tensor_tracer_flags: return False, None pos = 0 while True: match, has_value = TTParameters.match_next_flag( tensor_tracer_flags, pos) if not match: return False, None flag_name = match.group(1) if has_value: flag_value = match.group(2) else: flag_value = None if flag_name == wanted_flag_name: return True, flag_value pos = match.end() raise RuntimeError('Should not reach here.') def _flag_value_to_re_list(self, flag_name): """Converts list of strings to compiled RE.""" re_list = [] found, flag_value = self.get_flag_value(flag_name) if not found or not flag_value: return re_list list_of_values = flag_value.split(',') for v in list_of_values: r = re.compile(v) re_list.append(r) return re_list def is_flag_on(self, flag_name): """Returns True if the given flag is on.""" found, flag_value = self.get_flag_value(flag_name) if not found: return False if flag_value is None: return True # Depends on the flag value. flag_value = flag_value.lower() enabled = flag_value in ['1', 't', 'true', 'y', 'yes'] return enabled def is_enabled(self): """Returns True if TensorTracer is enabled.""" if self.is_flag_on(FLAG_NAME_ENABLE): logging.debug('Tensor Tracer is enabled with flags %s.', self._env.get(FLAGS_ENV_VAR)) return True else: return False def use_test_undeclared_outputs_dir(self): """Decides the output directory of the report and trace files. Args: None. Returns: True if the output files should be written to the test-undeclared-outputs-directory defined via an env variable. """ return self.is_flag_on(FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR) def _get_summary_mode(self): """Returns the summary mode after checking if it is valid.""" found, summary_mode = self.get_flag_value(FLAG_SUMMARY_MODE_TYPE) if not found: summary_mode = UI_MODE valid_summary_modes = [UI_MODE, TEXT_MODE] if summary_mode not in valid_summary_modes: raise ValueError('Invalid summary mode "%s" given to the Tensor_Tracer.' 'Valid submodes are: %s'%(summary_mode, valid_summary_modes)) return summary_mode
Ademan/NumPy-GSoC
refs/heads/master
numpy/distutils/fcompiler/vast.py
94
import os from numpy.distutils.fcompiler.gnu import GnuFCompiler compilers = ['VastFCompiler'] class VastFCompiler(GnuFCompiler): compiler_type = 'vast' compiler_aliases = () description = 'Pacific-Sierra Research Fortran 90 Compiler' version_pattern = r'\s*Pacific-Sierra Research vf90 '\ '(Personal|Professional)\s+(?P<version>[^\s]*)' # VAST f90 does not support -o with -c. So, object files are created # to the current directory and then moved to build directory object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile ' executables = { 'version_cmd' : ["vf90", "-v"], 'compiler_f77' : ["g77"], 'compiler_fix' : ["f90", "-Wv,-ya"], 'compiler_f90' : ["f90"], 'linker_so' : ["<F90>"], 'archiver' : ["ar", "-cr"], 'ranlib' : ["ranlib"] } module_dir_switch = None #XXX Fix me module_include_switch = None #XXX Fix me def find_executables(self): pass def get_version_cmd(self): f90 = self.compiler_f90[0] d, b = os.path.split(f90) vf90 = os.path.join(d, 'v'+b) return vf90 def get_flags_arch(self): vast_version = self.get_version() gnu = GnuFCompiler() gnu.customize(None) self.version = gnu.get_version() opt = GnuFCompiler.get_flags_arch(self) self.version = vast_version return opt if __name__ == '__main__': from distutils import log log.set_verbosity(2) from numpy.distutils.fcompiler import new_fcompiler compiler = new_fcompiler(compiler='vast') compiler.customize() print(compiler.get_version())
franekp/millandict
refs/heads/master
ankidict/thirdparty/urllib2.py
30
"""An extensible library for opening URLs using a variety of protocols The simplest way to use this module is to call the urlopen function, which accepts a string containing a URL or a Request object (described below). It opens the URL and returns the results as file-like object; the returned object has some extra methods described below. The OpenerDirector manages a collection of Handler objects that do all the actual work. Each Handler implements a particular protocol or option. The OpenerDirector is a composite object that invokes the Handlers needed to open the requested URL. For example, the HTTPHandler performs HTTP GET and POST requests and deals with non-error returns. The HTTPRedirectHandler automatically deals with HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler deals with digest authentication. urlopen(url, data=None) -- Basic usage is the same as original urllib. pass the url and optionally data to post to an HTTP URL, and get a file-like object back. One difference is that you can also pass a Request instance instead of URL. Raises a URLError (subclass of IOError); for HTTP errors, raises an HTTPError, which can also be treated as a valid response. build_opener -- Function that creates a new OpenerDirector instance. Will install the default handlers. Accepts one or more Handlers as arguments, either instances or Handler classes that it will instantiate. If one of the argument is a subclass of the default handler, the argument will be installed instead of the default. install_opener -- Installs a new opener as the default opener. objects of interest: OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages the Handler classes, while dealing with requests and responses. Request -- An object that encapsulates the state of a request. The state can be as simple as the URL. It can also include extra HTTP headers, e.g. a User-Agent. BaseHandler -- exceptions: URLError -- A subclass of IOError, individual protocols have their own specific subclass. HTTPError -- Also a valid HTTP response, so you can treat an HTTP error as an exceptional event or valid response. internals: BaseHandler and parent _call_chain conventions Example usage: import urllib2 # set up authentication info authinfo = urllib2.HTTPBasicAuthHandler() authinfo.add_password(realm='PDQ Application', uri='https://mahler:8092/site-updates.py', user='klem', passwd='geheim$parole') proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"}) # build a new opener that adds authentication and caching FTP handlers opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler) # install it urllib2.install_opener(opener) f = urllib2.urlopen('http://www.python.org/') """ # XXX issues: # If an authentication error handler that tries to perform # authentication for some reason but fails, how should the error be # signalled? The client needs to know the HTTP error code. But if # the handler knows that the problem was, e.g., that it didn't know # that hash algo that requested in the challenge, it would be good to # pass that information along to the client, too. # ftp errors aren't handled cleanly # check digest against correct (i.e. non-apache) implementation # Possible extensions: # complex proxies XXX not sure what exactly was meant by this # abstract factory for opener import base64 import hashlib import httplib import mimetools import os import posixpath import random import re import socket import sys import time import urlparse import bisect import warnings try: from cStringIO import StringIO except ImportError: from StringIO import StringIO # check for SSL try: import ssl except ImportError: _have_ssl = False else: _have_ssl = True from urllib import (unwrap, unquote, splittype, splithost, quote, addinfourl, splitport, splittag, toBytes, splitattr, ftpwrapper, splituser, splitpasswd, splitvalue) # support for FileHandler, proxies via environment variables from urllib import localhost, url2pathname, getproxies, proxy_bypass # used in User-Agent header sent __version__ = sys.version[:3] _opener = None def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, cafile=None, capath=None, cadefault=False, context=None): global _opener if cafile or capath or cadefault: if context is not None: raise ValueError( "You can't pass both context and any of cafile, capath, and " "cadefault" ) if not _have_ssl: raise ValueError('SSL support not available') context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=cafile, capath=capath) https_handler = HTTPSHandler(context=context) opener = build_opener(https_handler) elif context: https_handler = HTTPSHandler(context=context) opener = build_opener(https_handler) elif _opener is None: _opener = opener = build_opener() else: opener = _opener return opener.open(url, data, timeout) def install_opener(opener): global _opener _opener = opener # do these error classes make sense? # make sure all of the IOError stuff is overridden. we just want to be # subtypes. class URLError(IOError): # URLError is a sub-type of IOError, but it doesn't share any of # the implementation. need to override __init__ and __str__. # It sets self.args for compatibility with other EnvironmentError # subclasses, but args doesn't have the typical format with errno in # slot 0 and strerror in slot 1. This may be better than nothing. def __init__(self, reason): self.args = reason, self.reason = reason def __str__(self): return '<urlopen error %s>' % self.reason class HTTPError(URLError, addinfourl): """Raised when HTTP error occurs, but also acts like non-error return""" __super_init = addinfourl.__init__ def __init__(self, url, code, msg, hdrs, fp): self.code = code self.msg = msg self.hdrs = hdrs self.fp = fp self.filename = url # The addinfourl classes depend on fp being a valid file # object. In some cases, the HTTPError may not have a valid # file object. If this happens, the simplest workaround is to # not initialize the base classes. if fp is not None: self.__super_init(fp, hdrs, url, code) def __str__(self): return 'HTTP Error %s: %s' % (self.code, self.msg) # since URLError specifies a .reason attribute, HTTPError should also # provide this attribute. See issue13211 fo discussion. @property def reason(self): return self.msg def info(self): return self.hdrs # copied from cookielib.py _cut_port_re = re.compile(r":\d+$") def request_host(request): """Return request-host, as defined by RFC 2965. Variation from RFC: returned value is lowercased, for convenient comparison. """ url = request.get_full_url() host = urlparse.urlparse(url)[1] if host == "": host = request.get_header("Host", "") # remove port, if present host = _cut_port_re.sub("", host, 1) return host.lower() class Request: def __init__(self, url, data=None, headers={}, origin_req_host=None, unverifiable=False): # unwrap('<URL:type://host/path>') --> 'type://host/path' self.__original = unwrap(url) self.__original, self.__fragment = splittag(self.__original) self.type = None # self.__r_type is what's left after doing the splittype self.host = None self.port = None self._tunnel_host = None self.data = data self.headers = {} for key, value in headers.items(): self.add_header(key, value) self.unredirected_hdrs = {} if origin_req_host is None: origin_req_host = request_host(self) self.origin_req_host = origin_req_host self.unverifiable = unverifiable def __getattr__(self, attr): # XXX this is a fallback mechanism to guard against these # methods getting called in a non-standard order. this may be # too complicated and/or unnecessary. # XXX should the __r_XXX attributes be public? if attr in ('_Request__r_type', '_Request__r_host'): getattr(self, 'get_' + attr[12:])() return self.__dict__[attr] raise AttributeError, attr def get_method(self): if self.has_data(): return "POST" else: return "GET" # XXX these helper methods are lame def add_data(self, data): self.data = data def has_data(self): return self.data is not None def get_data(self): return self.data def get_full_url(self): if self.__fragment: return '%s#%s' % (self.__original, self.__fragment) else: return self.__original def get_type(self): if self.type is None: self.type, self.__r_type = splittype(self.__original) if self.type is None: raise ValueError, "unknown url type: %s" % self.__original return self.type def get_host(self): if self.host is None: self.host, self.__r_host = splithost(self.__r_type) if self.host: self.host = unquote(self.host) return self.host def get_selector(self): return self.__r_host def set_proxy(self, host, type): if self.type == 'https' and not self._tunnel_host: self._tunnel_host = self.host else: self.type = type self.__r_host = self.__original self.host = host def has_proxy(self): return self.__r_host == self.__original def get_origin_req_host(self): return self.origin_req_host def is_unverifiable(self): return self.unverifiable def add_header(self, key, val): # useful for something like authentication self.headers[key.capitalize()] = val def add_unredirected_header(self, key, val): # will not be added to a redirected request self.unredirected_hdrs[key.capitalize()] = val def has_header(self, header_name): return (header_name in self.headers or header_name in self.unredirected_hdrs) def get_header(self, header_name, default=None): return self.headers.get( header_name, self.unredirected_hdrs.get(header_name, default)) def header_items(self): hdrs = self.unredirected_hdrs.copy() hdrs.update(self.headers) return hdrs.items() class OpenerDirector: def __init__(self): client_version = "Python-urllib/%s" % __version__ self.addheaders = [('User-agent', client_version)] # self.handlers is retained only for backward compatibility self.handlers = [] # manage the individual handlers self.handle_open = {} self.handle_error = {} self.process_response = {} self.process_request = {} def add_handler(self, handler): if not hasattr(handler, "add_parent"): raise TypeError("expected BaseHandler instance, got %r" % type(handler)) added = False for meth in dir(handler): if meth in ["redirect_request", "do_open", "proxy_open"]: # oops, coincidental match continue i = meth.find("_") protocol = meth[:i] condition = meth[i+1:] if condition.startswith("error"): j = condition.find("_") + i + 1 kind = meth[j+1:] try: kind = int(kind) except ValueError: pass lookup = self.handle_error.get(protocol, {}) self.handle_error[protocol] = lookup elif condition == "open": kind = protocol lookup = self.handle_open elif condition == "response": kind = protocol lookup = self.process_response elif condition == "request": kind = protocol lookup = self.process_request else: continue handlers = lookup.setdefault(kind, []) if handlers: bisect.insort(handlers, handler) else: handlers.append(handler) added = True if added: bisect.insort(self.handlers, handler) handler.add_parent(self) def close(self): # Only exists for backwards compatibility. pass def _call_chain(self, chain, kind, meth_name, *args): # Handlers raise an exception if no one else should try to handle # the request, or return None if they can't but another handler # could. Otherwise, they return the response. handlers = chain.get(kind, ()) for handler in handlers: func = getattr(handler, meth_name) result = func(*args) if result is not None: return result def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): # accept a URL or a Request object if isinstance(fullurl, basestring): req = Request(fullurl, data) else: req = fullurl if data is not None: req.add_data(data) req.timeout = timeout protocol = req.get_type() # pre-process request meth_name = protocol+"_request" for processor in self.process_request.get(protocol, []): meth = getattr(processor, meth_name) req = meth(req) response = self._open(req, data) # post-process response meth_name = protocol+"_response" for processor in self.process_response.get(protocol, []): meth = getattr(processor, meth_name) response = meth(req, response) return response def _open(self, req, data=None): result = self._call_chain(self.handle_open, 'default', 'default_open', req) if result: return result protocol = req.get_type() result = self._call_chain(self.handle_open, protocol, protocol + '_open', req) if result: return result return self._call_chain(self.handle_open, 'unknown', 'unknown_open', req) def error(self, proto, *args): if proto in ('http', 'https'): # XXX http[s] protocols are special-cased dict = self.handle_error['http'] # https is not different than http proto = args[2] # YUCK! meth_name = 'http_error_%s' % proto http_err = 1 orig_args = args else: dict = self.handle_error meth_name = proto + '_error' http_err = 0 args = (dict, proto, meth_name) + args result = self._call_chain(*args) if result: return result if http_err: args = (dict, 'default', 'http_error_default') + orig_args return self._call_chain(*args) # XXX probably also want an abstract factory that knows when it makes # sense to skip a superclass in favor of a subclass and when it might # make sense to include both def build_opener(*handlers): """Create an opener object from a list of handlers. The opener will use several default handlers, including support for HTTP, FTP and when applicable, HTTPS. If any of the handlers passed as arguments are subclasses of the default handlers, the default handlers will not be used. """ import types def isclass(obj): return isinstance(obj, (types.ClassType, type)) opener = OpenerDirector() default_classes = [ProxyHandler, UnknownHandler, HTTPHandler, HTTPDefaultErrorHandler, HTTPRedirectHandler, FTPHandler, FileHandler, HTTPErrorProcessor] if hasattr(httplib, 'HTTPS'): default_classes.append(HTTPSHandler) skip = set() for klass in default_classes: for check in handlers: if isclass(check): if issubclass(check, klass): skip.add(klass) elif isinstance(check, klass): skip.add(klass) for klass in skip: default_classes.remove(klass) for klass in default_classes: opener.add_handler(klass()) for h in handlers: if isclass(h): h = h() opener.add_handler(h) return opener class BaseHandler: handler_order = 500 def add_parent(self, parent): self.parent = parent def close(self): # Only exists for backwards compatibility pass def __lt__(self, other): if not hasattr(other, "handler_order"): # Try to preserve the old behavior of having custom classes # inserted after default ones (works only for custom user # classes which are not aware of handler_order). return True return self.handler_order < other.handler_order class HTTPErrorProcessor(BaseHandler): """Process HTTP error responses.""" handler_order = 1000 # after all other processing def http_response(self, request, response): code, msg, hdrs = response.code, response.msg, response.info() # According to RFC 2616, "2xx" code indicates that the client's # request was successfully received, understood, and accepted. if not (200 <= code < 300): response = self.parent.error( 'http', request, response, code, msg, hdrs) return response https_response = http_response class HTTPDefaultErrorHandler(BaseHandler): def http_error_default(self, req, fp, code, msg, hdrs): raise HTTPError(req.get_full_url(), code, msg, hdrs, fp) class HTTPRedirectHandler(BaseHandler): # maximum number of redirections to any single URL # this is needed because of the state that cookies introduce max_repeats = 4 # maximum total number of redirections (regardless of URL) before # assuming we're in a loop max_redirections = 10 def redirect_request(self, req, fp, code, msg, headers, newurl): """Return a Request or None in response to a redirect. This is called by the http_error_30x methods when a redirection response is received. If a redirection should take place, return a new Request to allow http_error_30x to perform the redirect. Otherwise, raise HTTPError if no-one else should try to handle this url. Return None if you can't but another Handler might. """ m = req.get_method() if (code in (301, 302, 303, 307) and m in ("GET", "HEAD") or code in (301, 302, 303) and m == "POST"): # Strictly (according to RFC 2616), 301 or 302 in response # to a POST MUST NOT cause a redirection without confirmation # from the user (of urllib2, in this case). In practice, # essentially all clients do redirect in this case, so we # do the same. # be conciliant with URIs containing a space newurl = newurl.replace(' ', '%20') newheaders = dict((k,v) for k,v in req.headers.items() if k.lower() not in ("content-length", "content-type") ) return Request(newurl, headers=newheaders, origin_req_host=req.get_origin_req_host(), unverifiable=True) else: raise HTTPError(req.get_full_url(), code, msg, headers, fp) # Implementation note: To avoid the server sending us into an # infinite loop, the request object needs to track what URLs we # have already seen. Do this by adding a handler-specific # attribute to the Request object. def http_error_302(self, req, fp, code, msg, headers): # Some servers (incorrectly) return multiple Location headers # (so probably same goes for URI). Use first header. if 'location' in headers: newurl = headers.getheaders('location')[0] elif 'uri' in headers: newurl = headers.getheaders('uri')[0] else: return # fix a possible malformed URL urlparts = urlparse.urlparse(newurl) if not urlparts.path and urlparts.netloc: urlparts = list(urlparts) urlparts[2] = "/" newurl = urlparse.urlunparse(urlparts) newurl = urlparse.urljoin(req.get_full_url(), newurl) # For security reasons we do not allow redirects to protocols # other than HTTP, HTTPS or FTP. newurl_lower = newurl.lower() if not (newurl_lower.startswith('http://') or newurl_lower.startswith('https://') or newurl_lower.startswith('ftp://')): raise HTTPError(newurl, code, msg + " - Redirection to url '%s' is not allowed" % newurl, headers, fp) # XXX Probably want to forget about the state of the current # request, although that might interact poorly with other # handlers that also use handler-specific request attributes new = self.redirect_request(req, fp, code, msg, headers, newurl) if new is None: return # loop detection # .redirect_dict has a key url if url was previously visited. if hasattr(req, 'redirect_dict'): visited = new.redirect_dict = req.redirect_dict if (visited.get(newurl, 0) >= self.max_repeats or len(visited) >= self.max_redirections): raise HTTPError(req.get_full_url(), code, self.inf_msg + msg, headers, fp) else: visited = new.redirect_dict = req.redirect_dict = {} visited[newurl] = visited.get(newurl, 0) + 1 # Don't close the fp until we are sure that we won't use it # with HTTPError. fp.read() fp.close() return self.parent.open(new, timeout=req.timeout) http_error_301 = http_error_303 = http_error_307 = http_error_302 inf_msg = "The HTTP server returned a redirect error that would " \ "lead to an infinite loop.\n" \ "The last 30x error message was:\n" def _parse_proxy(proxy): """Return (scheme, user, password, host/port) given a URL or an authority. If a URL is supplied, it must have an authority (host:port) component. According to RFC 3986, having an authority component means the URL must have two slashes after the scheme: >>> _parse_proxy('file:/ftp.example.com/') Traceback (most recent call last): ValueError: proxy URL with no authority: 'file:/ftp.example.com/' The first three items of the returned tuple may be None. Examples of authority parsing: >>> _parse_proxy('proxy.example.com') (None, None, None, 'proxy.example.com') >>> _parse_proxy('proxy.example.com:3128') (None, None, None, 'proxy.example.com:3128') The authority component may optionally include userinfo (assumed to be username:password): >>> _parse_proxy('joe:[email protected]') (None, 'joe', 'password', 'proxy.example.com') >>> _parse_proxy('joe:[email protected]:3128') (None, 'joe', 'password', 'proxy.example.com:3128') Same examples, but with URLs instead: >>> _parse_proxy('http://proxy.example.com/') ('http', None, None, 'proxy.example.com') >>> _parse_proxy('http://proxy.example.com:3128/') ('http', None, None, 'proxy.example.com:3128') >>> _parse_proxy('http://joe:[email protected]/') ('http', 'joe', 'password', 'proxy.example.com') >>> _parse_proxy('http://joe:[email protected]:3128') ('http', 'joe', 'password', 'proxy.example.com:3128') Everything after the authority is ignored: >>> _parse_proxy('ftp://joe:[email protected]/rubbish:3128') ('ftp', 'joe', 'password', 'proxy.example.com') Test for no trailing '/' case: >>> _parse_proxy('http://joe:[email protected]') ('http', 'joe', 'password', 'proxy.example.com') """ scheme, r_scheme = splittype(proxy) if not r_scheme.startswith("/"): # authority scheme = None authority = proxy else: # URL if not r_scheme.startswith("//"): raise ValueError("proxy URL with no authority: %r" % proxy) # We have an authority, so for RFC 3986-compliant URLs (by ss 3. # and 3.3.), path is empty or starts with '/' end = r_scheme.find("/", 2) if end == -1: end = None authority = r_scheme[2:end] userinfo, hostport = splituser(authority) if userinfo is not None: user, password = splitpasswd(userinfo) else: user = password = None return scheme, user, password, hostport class ProxyHandler(BaseHandler): # Proxies must be in front handler_order = 100 def __init__(self, proxies=None): if proxies is None: proxies = getproxies() assert hasattr(proxies, 'has_key'), "proxies must be a mapping" self.proxies = proxies for type, url in proxies.items(): setattr(self, '%s_open' % type, lambda r, proxy=url, type=type, meth=self.proxy_open: \ meth(r, proxy, type)) def proxy_open(self, req, proxy, type): orig_type = req.get_type() proxy_type, user, password, hostport = _parse_proxy(proxy) if proxy_type is None: proxy_type = orig_type if req.host and proxy_bypass(req.host): return None if user and password: user_pass = '%s:%s' % (unquote(user), unquote(password)) creds = base64.b64encode(user_pass).strip() req.add_header('Proxy-authorization', 'Basic ' + creds) hostport = unquote(hostport) req.set_proxy(hostport, proxy_type) if orig_type == proxy_type or orig_type == 'https': # let other handlers take care of it return None else: # need to start over, because the other handlers don't # grok the proxy's URL type # e.g. if we have a constructor arg proxies like so: # {'http': 'ftp://proxy.example.com'}, we may end up turning # a request for http://acme.example.com/a into one for # ftp://proxy.example.com/a return self.parent.open(req, timeout=req.timeout) class HTTPPasswordMgr: def __init__(self): self.passwd = {} def add_password(self, realm, uri, user, passwd): # uri could be a single URI or a sequence if isinstance(uri, basestring): uri = [uri] if not realm in self.passwd: self.passwd[realm] = {} for default_port in True, False: reduced_uri = tuple( [self.reduce_uri(u, default_port) for u in uri]) self.passwd[realm][reduced_uri] = (user, passwd) def find_user_password(self, realm, authuri): domains = self.passwd.get(realm, {}) for default_port in True, False: reduced_authuri = self.reduce_uri(authuri, default_port) for uris, authinfo in domains.iteritems(): for uri in uris: if self.is_suburi(uri, reduced_authuri): return authinfo return None, None def reduce_uri(self, uri, default_port=True): """Accept authority or URI and extract only the authority and path.""" # note HTTP URLs do not have a userinfo component parts = urlparse.urlsplit(uri) if parts[1]: # URI scheme = parts[0] authority = parts[1] path = parts[2] or '/' else: # host or host:port scheme = None authority = uri path = '/' host, port = splitport(authority) if default_port and port is None and scheme is not None: dport = {"http": 80, "https": 443, }.get(scheme) if dport is not None: authority = "%s:%d" % (host, dport) return authority, path def is_suburi(self, base, test): """Check if test is below base in a URI tree Both args must be URIs in reduced form. """ if base == test: return True if base[0] != test[0]: return False common = posixpath.commonprefix((base[1], test[1])) if len(common) == len(base[1]): return True return False class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr): def find_user_password(self, realm, authuri): user, password = HTTPPasswordMgr.find_user_password(self, realm, authuri) if user is not None: return user, password return HTTPPasswordMgr.find_user_password(self, None, authuri) class AbstractBasicAuthHandler: # XXX this allows for multiple auth-schemes, but will stupidly pick # the last one with a realm specified. # allow for double- and single-quoted realm values # (single quotes are a violation of the RFC, but appear in the wild) rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+' 'realm=(["\']?)([^"\']*)\\2', re.I) # XXX could pre-emptively send auth info already accepted (RFC 2617, # end of section 2, and section 1.2 immediately after "credentials" # production). def __init__(self, password_mgr=None): if password_mgr is None: password_mgr = HTTPPasswordMgr() self.passwd = password_mgr self.add_password = self.passwd.add_password def http_error_auth_reqed(self, authreq, host, req, headers): # host may be an authority (without userinfo) or a URL with an # authority # XXX could be multiple headers authreq = headers.get(authreq, None) if authreq: mo = AbstractBasicAuthHandler.rx.search(authreq) if mo: scheme, quote, realm = mo.groups() if quote not in ['"', "'"]: warnings.warn("Basic Auth Realm was unquoted", UserWarning, 2) if scheme.lower() == 'basic': return self.retry_http_basic_auth(host, req, realm) def retry_http_basic_auth(self, host, req, realm): user, pw = self.passwd.find_user_password(realm, host) if pw is not None: raw = "%s:%s" % (user, pw) auth = 'Basic %s' % base64.b64encode(raw).strip() if req.get_header(self.auth_header, None) == auth: return None req.add_unredirected_header(self.auth_header, auth) return self.parent.open(req, timeout=req.timeout) else: return None class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): auth_header = 'Authorization' def http_error_401(self, req, fp, code, msg, headers): url = req.get_full_url() response = self.http_error_auth_reqed('www-authenticate', url, req, headers) return response class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler): auth_header = 'Proxy-authorization' def http_error_407(self, req, fp, code, msg, headers): # http_error_auth_reqed requires that there is no userinfo component in # authority. Assume there isn't one, since urllib2 does not (and # should not, RFC 3986 s. 3.2.1) support requests for URLs containing # userinfo. authority = req.get_host() response = self.http_error_auth_reqed('proxy-authenticate', authority, req, headers) return response def randombytes(n): """Return n random bytes.""" # Use /dev/urandom if it is available. Fall back to random module # if not. It might be worthwhile to extend this function to use # other platform-specific mechanisms for getting random bytes. if os.path.exists("/dev/urandom"): f = open("/dev/urandom") s = f.read(n) f.close() return s else: L = [chr(random.randrange(0, 256)) for i in range(n)] return "".join(L) class AbstractDigestAuthHandler: # Digest authentication is specified in RFC 2617. # XXX The client does not inspect the Authentication-Info header # in a successful response. # XXX It should be possible to test this implementation against # a mock server that just generates a static set of challenges. # XXX qop="auth-int" supports is shaky def __init__(self, passwd=None): if passwd is None: passwd = HTTPPasswordMgr() self.passwd = passwd self.add_password = self.passwd.add_password self.retried = 0 self.nonce_count = 0 self.last_nonce = None def reset_retry_count(self): self.retried = 0 def http_error_auth_reqed(self, auth_header, host, req, headers): authreq = headers.get(auth_header, None) if self.retried > 5: # Don't fail endlessly - if we failed once, we'll probably # fail a second time. Hm. Unless the Password Manager is # prompting for the information. Crap. This isn't great # but it's better than the current 'repeat until recursion # depth exceeded' approach <wink> raise HTTPError(req.get_full_url(), 401, "digest auth failed", headers, None) else: self.retried += 1 if authreq: scheme = authreq.split()[0] if scheme.lower() == 'digest': return self.retry_http_digest_auth(req, authreq) def retry_http_digest_auth(self, req, auth): token, challenge = auth.split(' ', 1) chal = parse_keqv_list(parse_http_list(challenge)) auth = self.get_authorization(req, chal) if auth: auth_val = 'Digest %s' % auth if req.headers.get(self.auth_header, None) == auth_val: return None req.add_unredirected_header(self.auth_header, auth_val) resp = self.parent.open(req, timeout=req.timeout) return resp def get_cnonce(self, nonce): # The cnonce-value is an opaque # quoted string value provided by the client and used by both client # and server to avoid chosen plaintext attacks, to provide mutual # authentication, and to provide some message integrity protection. # This isn't a fabulous effort, but it's probably Good Enough. dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(), randombytes(8))).hexdigest() return dig[:16] def get_authorization(self, req, chal): try: realm = chal['realm'] nonce = chal['nonce'] qop = chal.get('qop') algorithm = chal.get('algorithm', 'MD5') # mod_digest doesn't send an opaque, even though it isn't # supposed to be optional opaque = chal.get('opaque', None) except KeyError: return None H, KD = self.get_algorithm_impls(algorithm) if H is None: return None user, pw = self.passwd.find_user_password(realm, req.get_full_url()) if user is None: return None # XXX not implemented yet if req.has_data(): entdig = self.get_entity_digest(req.get_data(), chal) else: entdig = None A1 = "%s:%s:%s" % (user, realm, pw) A2 = "%s:%s" % (req.get_method(), # XXX selector: what about proxies and full urls req.get_selector()) if qop == 'auth': if nonce == self.last_nonce: self.nonce_count += 1 else: self.nonce_count = 1 self.last_nonce = nonce ncvalue = '%08x' % self.nonce_count cnonce = self.get_cnonce(nonce) noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2)) respdig = KD(H(A1), noncebit) elif qop is None: respdig = KD(H(A1), "%s:%s" % (nonce, H(A2))) else: # XXX handle auth-int. raise URLError("qop '%s' is not supported." % qop) # XXX should the partial digests be encoded too? base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (user, realm, nonce, req.get_selector(), respdig) if opaque: base += ', opaque="%s"' % opaque if entdig: base += ', digest="%s"' % entdig base += ', algorithm="%s"' % algorithm if qop: base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce) return base def get_algorithm_impls(self, algorithm): # algorithm should be case-insensitive according to RFC2617 algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if algorithm == 'MD5': H = lambda x: hashlib.md5(x).hexdigest() elif algorithm == 'SHA': H = lambda x: hashlib.sha1(x).hexdigest() # XXX MD5-sess else: raise ValueError("Unsupported digest authentication " "algorithm %r" % algorithm.lower()) KD = lambda s, d: H("%s:%s" % (s, d)) return H, KD def get_entity_digest(self, data, chal): # XXX not implemented yet return None class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): """An authentication protocol defined by RFC 2069 Digest authentication improves on basic authentication because it does not transmit passwords in the clear. """ auth_header = 'Authorization' handler_order = 490 # before Basic auth def http_error_401(self, req, fp, code, msg, headers): host = urlparse.urlparse(req.get_full_url())[1] retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) self.reset_retry_count() return retry class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler): auth_header = 'Proxy-Authorization' handler_order = 490 # before Basic auth def http_error_407(self, req, fp, code, msg, headers): host = req.get_host() retry = self.http_error_auth_reqed('proxy-authenticate', host, req, headers) self.reset_retry_count() return retry class AbstractHTTPHandler(BaseHandler): def __init__(self, debuglevel=0): self._debuglevel = debuglevel def set_http_debuglevel(self, level): self._debuglevel = level def do_request_(self, request): host = request.get_host() if not host: raise URLError('no host given') if request.has_data(): # POST data = request.get_data() if not request.has_header('Content-type'): request.add_unredirected_header( 'Content-type', 'application/x-www-form-urlencoded') if not request.has_header('Content-length'): request.add_unredirected_header( 'Content-length', '%d' % len(data)) sel_host = host if request.has_proxy(): scheme, sel = splittype(request.get_selector()) sel_host, sel_path = splithost(sel) if not request.has_header('Host'): request.add_unredirected_header('Host', sel_host) for name, value in self.parent.addheaders: name = name.capitalize() if not request.has_header(name): request.add_unredirected_header(name, value) return request def do_open(self, http_class, req, **http_conn_args): """Return an addinfourl object for the request, using http_class. http_class must implement the HTTPConnection API from httplib. The addinfourl return value is a file-like object. It also has methods and attributes including: - info(): return a mimetools.Message object for the headers - geturl(): return the original request URL - code: HTTP status code """ host = req.get_host() if not host: raise URLError('no host given') # will parse host:port h = http_class(host, timeout=req.timeout, **http_conn_args) h.set_debuglevel(self._debuglevel) headers = dict(req.unredirected_hdrs) headers.update(dict((k, v) for k, v in req.headers.items() if k not in headers)) # We want to make an HTTP/1.1 request, but the addinfourl # class isn't prepared to deal with a persistent connection. # It will try to read all remaining data from the socket, # which will block while the server waits for the next request. # So make sure the connection gets closed after the (only) # request. headers["Connection"] = "close" headers = dict( (name.title(), val) for name, val in headers.items()) if req._tunnel_host: tunnel_headers = {} proxy_auth_hdr = "Proxy-Authorization" if proxy_auth_hdr in headers: tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] # Proxy-Authorization should not be sent to origin # server. del headers[proxy_auth_hdr] h.set_tunnel(req._tunnel_host, headers=tunnel_headers) try: h.request(req.get_method(), req.get_selector(), req.data, headers) except socket.error, err: # XXX what error? h.close() raise URLError(err) else: try: r = h.getresponse(buffering=True) except TypeError: # buffering kw not supported r = h.getresponse() # Pick apart the HTTPResponse object to get the addinfourl # object initialized properly. # Wrap the HTTPResponse object in socket's file object adapter # for Windows. That adapter calls recv(), so delegate recv() # to read(). This weird wrapping allows the returned object to # have readline() and readlines() methods. # XXX It might be better to extract the read buffering code # out of socket._fileobject() and into a base class. r.recv = r.read fp = socket._fileobject(r, close=True) resp = addinfourl(fp, r.msg, req.get_full_url()) resp.code = r.status resp.msg = r.reason return resp class HTTPHandler(AbstractHTTPHandler): def http_open(self, req): return self.do_open(httplib.HTTPConnection, req) http_request = AbstractHTTPHandler.do_request_ if hasattr(httplib, 'HTTPS'): class HTTPSHandler(AbstractHTTPHandler): def __init__(self, debuglevel=0, context=None): AbstractHTTPHandler.__init__(self, debuglevel) self._context = context def https_open(self, req): return self.do_open(httplib.HTTPSConnection, req, context=self._context) https_request = AbstractHTTPHandler.do_request_ class HTTPCookieProcessor(BaseHandler): def __init__(self, cookiejar=None): import cookielib if cookiejar is None: cookiejar = cookielib.CookieJar() self.cookiejar = cookiejar def http_request(self, request): self.cookiejar.add_cookie_header(request) return request def http_response(self, request, response): self.cookiejar.extract_cookies(response, request) return response https_request = http_request https_response = http_response class UnknownHandler(BaseHandler): def unknown_open(self, req): type = req.get_type() raise URLError('unknown url type: %s' % type) def parse_keqv_list(l): """Parse list of key=value strings where keys are not duplicated.""" parsed = {} for elt in l: k, v = elt.split('=', 1) if v[0] == '"' and v[-1] == '"': v = v[1:-1] parsed[k] = v return parsed def parse_http_list(s): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Neither commas nor quotes count if they are escaped. Only double-quotes count, not single-quotes. """ res = [] part = '' escape = quote = False for cur in s: if escape: part += cur escape = False continue if quote: if cur == '\\': escape = True continue elif cur == '"': quote = False part += cur continue if cur == ',': res.append(part) part = '' continue if cur == '"': quote = True part += cur # append last part if part: res.append(part) return [part.strip() for part in res] def _safe_gethostbyname(host): try: return socket.gethostbyname(host) except socket.gaierror: return None class FileHandler(BaseHandler): # Use local file or FTP depending on form of URL def file_open(self, req): url = req.get_selector() if url[:2] == '//' and url[2:3] != '/' and (req.host and req.host != 'localhost'): req.type = 'ftp' return self.parent.open(req) else: return self.open_local_file(req) # names for the localhost names = None def get_names(self): if FileHandler.names is None: try: FileHandler.names = tuple( socket.gethostbyname_ex('localhost')[2] + socket.gethostbyname_ex(socket.gethostname())[2]) except socket.gaierror: FileHandler.names = (socket.gethostbyname('localhost'),) return FileHandler.names # not entirely sure what the rules are here def open_local_file(self, req): import email.utils import mimetypes host = req.get_host() filename = req.get_selector() localfile = url2pathname(filename) try: stats = os.stat(localfile) size = stats.st_size modified = email.utils.formatdate(stats.st_mtime, usegmt=True) mtype = mimetypes.guess_type(filename)[0] headers = mimetools.Message(StringIO( 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' % (mtype or 'text/plain', size, modified))) if host: host, port = splitport(host) if not host or \ (not port and _safe_gethostbyname(host) in self.get_names()): if host: origurl = 'file://' + host + filename else: origurl = 'file://' + filename return addinfourl(open(localfile, 'rb'), headers, origurl) except OSError, msg: # urllib2 users shouldn't expect OSErrors coming from urlopen() raise URLError(msg) raise URLError('file not on local host') class FTPHandler(BaseHandler): def ftp_open(self, req): import ftplib import mimetypes host = req.get_host() if not host: raise URLError('ftp error: no host given') host, port = splitport(host) if port is None: port = ftplib.FTP_PORT else: port = int(port) # username/password handling user, host = splituser(host) if user: user, passwd = splitpasswd(user) else: passwd = None host = unquote(host) user = user or '' passwd = passwd or '' try: host = socket.gethostbyname(host) except socket.error, msg: raise URLError(msg) path, attrs = splitattr(req.get_selector()) dirs = path.split('/') dirs = map(unquote, dirs) dirs, file = dirs[:-1], dirs[-1] if dirs and not dirs[0]: dirs = dirs[1:] try: fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout) type = file and 'I' or 'D' for attr in attrs: attr, value = splitvalue(attr) if attr.lower() == 'type' and \ value in ('a', 'A', 'i', 'I', 'd', 'D'): type = value.upper() fp, retrlen = fw.retrfile(file, type) headers = "" mtype = mimetypes.guess_type(req.get_full_url())[0] if mtype: headers += "Content-type: %s\n" % mtype if retrlen is not None and retrlen >= 0: headers += "Content-length: %d\n" % retrlen sf = StringIO(headers) headers = mimetools.Message(sf) return addinfourl(fp, headers, req.get_full_url()) except ftplib.all_errors, msg: raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2] def connect_ftp(self, user, passwd, host, port, dirs, timeout): fw = ftpwrapper(user, passwd, host, port, dirs, timeout, persistent=False) ## fw.ftp.set_debuglevel(1) return fw class CacheFTPHandler(FTPHandler): # XXX would be nice to have pluggable cache strategies # XXX this stuff is definitely not thread safe def __init__(self): self.cache = {} self.timeout = {} self.soonest = 0 self.delay = 60 self.max_conns = 16 def setTimeout(self, t): self.delay = t def setMaxConns(self, m): self.max_conns = m def connect_ftp(self, user, passwd, host, port, dirs, timeout): key = user, host, port, '/'.join(dirs), timeout if key in self.cache: self.timeout[key] = time.time() + self.delay else: self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout) self.timeout[key] = time.time() + self.delay self.check_cache() return self.cache[key] def check_cache(self): # first check for old ones t = time.time() if self.soonest <= t: for k, v in self.timeout.items(): if v < t: self.cache[k].close() del self.cache[k] del self.timeout[k] self.soonest = min(self.timeout.values()) # then check the size if len(self.cache) == self.max_conns: for k, v in self.timeout.items(): if v == self.soonest: del self.cache[k] del self.timeout[k] break self.soonest = min(self.timeout.values()) def clear_cache(self): for conn in self.cache.values(): conn.close() self.cache.clear() self.timeout.clear()
hallyn/qemu
refs/heads/ubuntu_1.7.0+dfsg-2
scripts/qapi-types.py
54
# # QAPI types generator # # Copyright IBM, Corp. 2011 # # Authors: # Anthony Liguori <[email protected]> # # This work is licensed under the terms of the GNU GPLv2. # See the COPYING.LIB file in the top-level directory. from ordereddict import OrderedDict from qapi import * import sys import os import getopt import errno def generate_fwd_struct(name, members, builtin_type=False): if builtin_type: return mcgen(''' typedef struct %(name)sList { union { %(type)s value; uint64_t padding; }; struct %(name)sList *next; } %(name)sList; ''', type=c_type(name), name=name) return mcgen(''' typedef struct %(name)s %(name)s; typedef struct %(name)sList { union { %(name)s *value; uint64_t padding; }; struct %(name)sList *next; } %(name)sList; ''', name=name) def generate_fwd_enum_struct(name, members): return mcgen(''' typedef struct %(name)sList { union { %(name)s value; uint64_t padding; }; struct %(name)sList *next; } %(name)sList; ''', name=name) def generate_struct_fields(members): ret = '' for argname, argentry, optional, structured in parse_args(members): if optional: ret += mcgen(''' bool has_%(c_name)s; ''', c_name=c_var(argname)) if structured: push_indent() ret += generate_struct({ "field": argname, "data": argentry}) pop_indent() else: ret += mcgen(''' %(c_type)s %(c_name)s; ''', c_type=c_type(argentry), c_name=c_var(argname)) return ret def generate_struct(expr): structname = expr.get('type', "") fieldname = expr.get('field', "") members = expr['data'] base = expr.get('base') ret = mcgen(''' struct %(name)s { ''', name=structname) if base: ret += generate_struct_fields({'base': base}) ret += generate_struct_fields(members) if len(fieldname): fieldname = " " + fieldname ret += mcgen(''' }%(field)s; ''', field=fieldname) return ret def generate_enum_lookup(name, values): ret = mcgen(''' const char *%(name)s_lookup[] = { ''', name=name) i = 0 for value in values: ret += mcgen(''' "%(value)s", ''', value=value) ret += mcgen(''' NULL, }; ''') return ret def generate_enum_name(name): if name.isupper(): return c_fun(name, False) new_name = '' for c in c_fun(name, False): if c.isupper(): new_name += '_' new_name += c return new_name.lstrip('_').upper() def generate_enum(name, values): lookup_decl = mcgen(''' extern const char *%(name)s_lookup[]; ''', name=name) enum_decl = mcgen(''' typedef enum %(name)s { ''', name=name) # append automatically generated _MAX value enum_values = values + [ 'MAX' ] i = 0 for value in enum_values: enum_decl += mcgen(''' %(abbrev)s_%(value)s = %(i)d, ''', abbrev=de_camel_case(name).upper(), value=generate_enum_name(value), i=i) i += 1 enum_decl += mcgen(''' } %(name)s; ''', name=name) return lookup_decl + enum_decl def generate_anon_union_qtypes(expr): name = expr['union'] members = expr['data'] ret = mcgen(''' const int %(name)s_qtypes[QTYPE_MAX] = { ''', name=name) for key in members: qapi_type = members[key] if builtin_type_qtypes.has_key(qapi_type): qtype = builtin_type_qtypes[qapi_type] elif find_struct(qapi_type): qtype = "QTYPE_QDICT" elif find_union(qapi_type): qtype = "QTYPE_QDICT" else: assert False, "Invalid anonymous union member" ret += mcgen(''' [ %(qtype)s ] = %(abbrev)s_KIND_%(enum)s, ''', qtype = qtype, abbrev = de_camel_case(name).upper(), enum = c_fun(de_camel_case(key),False).upper()) ret += mcgen(''' }; ''') return ret def generate_union(expr): name = expr['union'] typeinfo = expr['data'] base = expr.get('base') discriminator = expr.get('discriminator') ret = mcgen(''' struct %(name)s { %(name)sKind kind; union { void *data; ''', name=name) for key in typeinfo: ret += mcgen(''' %(c_type)s %(c_name)s; ''', c_type=c_type(typeinfo[key]), c_name=c_fun(key)) ret += mcgen(''' }; ''') if base: base_fields = find_struct(base)['data'] if discriminator: base_fields = base_fields.copy() del base_fields[discriminator] ret += generate_struct_fields(base_fields) else: assert not discriminator ret += mcgen(''' }; ''') if discriminator == {}: ret += mcgen(''' extern const int %(name)s_qtypes[]; ''', name=name) return ret def generate_type_cleanup_decl(name): ret = mcgen(''' void qapi_free_%(type)s(%(c_type)s obj); ''', c_type=c_type(name),type=name) return ret def generate_type_cleanup(name): ret = mcgen(''' void qapi_free_%(type)s(%(c_type)s obj) { QapiDeallocVisitor *md; Visitor *v; if (!obj) { return; } md = qapi_dealloc_visitor_new(); v = qapi_dealloc_get_visitor(md); visit_type_%(type)s(v, &obj, NULL, NULL); qapi_dealloc_visitor_cleanup(md); } ''', c_type=c_type(name),type=name) return ret try: opts, args = getopt.gnu_getopt(sys.argv[1:], "chbp:o:", ["source", "header", "builtins", "prefix=", "output-dir="]) except getopt.GetoptError, err: print str(err) sys.exit(1) output_dir = "" prefix = "" c_file = 'qapi-types.c' h_file = 'qapi-types.h' do_c = False do_h = False do_builtins = False for o, a in opts: if o in ("-p", "--prefix"): prefix = a elif o in ("-o", "--output-dir"): output_dir = a + "/" elif o in ("-c", "--source"): do_c = True elif o in ("-h", "--header"): do_h = True elif o in ("-b", "--builtins"): do_builtins = True if not do_c and not do_h: do_c = True do_h = True c_file = output_dir + prefix + c_file h_file = output_dir + prefix + h_file try: os.makedirs(output_dir) except os.error, e: if e.errno != errno.EEXIST: raise def maybe_open(really, name, opt): if really: return open(name, opt) else: import StringIO return StringIO.StringIO() fdef = maybe_open(do_c, c_file, 'w') fdecl = maybe_open(do_h, h_file, 'w') fdef.write(mcgen(''' /* AUTOMATICALLY GENERATED, DO NOT MODIFY */ /* * deallocation functions for schema-defined QAPI types * * Copyright IBM, Corp. 2011 * * Authors: * Anthony Liguori <[email protected]> * Michael Roth <[email protected]> * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. * */ #include "qapi/dealloc-visitor.h" #include "%(prefix)sqapi-types.h" #include "%(prefix)sqapi-visit.h" ''', prefix=prefix)) fdecl.write(mcgen(''' /* AUTOMATICALLY GENERATED, DO NOT MODIFY */ /* * schema-defined QAPI types * * Copyright IBM, Corp. 2011 * * Authors: * Anthony Liguori <[email protected]> * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. * */ #ifndef %(guard)s #define %(guard)s #include <stdbool.h> #include <stdint.h> ''', guard=guardname(h_file))) exprs = parse_schema(sys.stdin) exprs = filter(lambda expr: not expr.has_key('gen'), exprs) fdecl.write(guardstart("QAPI_TYPES_BUILTIN_STRUCT_DECL")) for typename in builtin_types: fdecl.write(generate_fwd_struct(typename, None, builtin_type=True)) fdecl.write(guardend("QAPI_TYPES_BUILTIN_STRUCT_DECL")) for expr in exprs: ret = "\n" if expr.has_key('type'): ret += generate_fwd_struct(expr['type'], expr['data']) elif expr.has_key('enum'): ret += generate_enum(expr['enum'], expr['data']) + "\n" ret += generate_fwd_enum_struct(expr['enum'], expr['data']) fdef.write(generate_enum_lookup(expr['enum'], expr['data'])) elif expr.has_key('union'): ret += generate_fwd_struct(expr['union'], expr['data']) + "\n" ret += generate_enum('%sKind' % expr['union'], expr['data'].keys()) fdef.write(generate_enum_lookup('%sKind' % expr['union'], expr['data'].keys())) if expr.get('discriminator') == {}: fdef.write(generate_anon_union_qtypes(expr)) else: continue fdecl.write(ret) # to avoid header dependency hell, we always generate declarations # for built-in types in our header files and simply guard them fdecl.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DECL")) for typename in builtin_types: fdecl.write(generate_type_cleanup_decl(typename + "List")) fdecl.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DECL")) # ...this doesn't work for cases where we link in multiple objects that # have the functions defined, so we use -b option to provide control # over these cases if do_builtins: fdef.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DEF")) for typename in builtin_types: fdef.write(generate_type_cleanup(typename + "List")) fdef.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DEF")) for expr in exprs: ret = "\n" if expr.has_key('type'): ret += generate_struct(expr) + "\n" ret += generate_type_cleanup_decl(expr['type'] + "List") fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n") ret += generate_type_cleanup_decl(expr['type']) fdef.write(generate_type_cleanup(expr['type']) + "\n") elif expr.has_key('union'): ret += generate_union(expr) ret += generate_type_cleanup_decl(expr['union'] + "List") fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n") ret += generate_type_cleanup_decl(expr['union']) fdef.write(generate_type_cleanup(expr['union']) + "\n") elif expr.has_key('enum'): ret += generate_type_cleanup_decl(expr['enum'] + "List") fdef.write(generate_type_cleanup(expr['enum'] + "List") + "\n") else: continue fdecl.write(ret) fdecl.write(''' #endif ''') fdecl.flush() fdecl.close() fdef.flush() fdef.close()
thonkify/thonkify
refs/heads/master
src/lib/gcloud/storage/demo/demo.py
8
# Welcome to the gCloud Storage Demo! (hit enter) # We're going to walk through some of the basics... # Don't worry though. You don't need to do anything, just keep hitting enter... # Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Let's start by importing the demo module and getting a client: import time from gcloud import storage from gcloud.storage import demo client = storage.Client(project=demo.PROJECT_ID) # OK, now let's look at all of the buckets... print(list(client.list_buckets())) # This might take a second... # Now let's create a new bucket... bucket_name = ("bucket-%s" % time.time()).replace(".", "") # Get rid of dots. print(bucket_name) bucket = client.create_bucket(bucket_name) print(bucket) # Let's look at all of the buckets again... print(list(client.list_buckets())) # How about we create a new blob inside this bucket. blob = bucket.blob("my-new-file.txt") # Now let's put some data in there. blob.upload_from_string("this is some data!") # ... and we can read that data back again. print(blob.download_as_string()) # Now let's delete that blob. print(blob.delete()) # And now that we're done, let's delete that bucket... print(bucket.delete()) # Alright! That's all! # Here's an interactive prompt for you now...
kenorb/BitTorrent
refs/heads/master
BTL/asyncexecutor.py
5
# This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ An simple lightweight asynchronous executor class with nice java type static methods """ from twisted.python.threadpool import ThreadPool class AsyncExecutor(object): """ defaults to minthreads=5, maxthreads=20 """ pool = ThreadPool( name = 'AsyncExecutorPool') def _execute(self, func, *args, **kwargs): if not self.pool.started: self.pool.start() self.pool.dispatch(None, func, *args, **kwargs) execute = classmethod(_execute) stop = pool.stop def test(): import random import time def test(digit): print 'Testing %d' % digit time.sleep(random.randint(1, 5000)/1000) print ' finished with test %d' % digit for i in xrange(10): AsyncExecutor.execute(test, ) AsyncExecutor.stop() if __name__ == '__main__': test()
dlacombejr/deepy
refs/heads/master
experiments/initialization_schemes/kaiming_he.py
7
#!/usr/bin/env python # -*- coding: utf-8 -*- import os from util import run from deepy.utils import KaimingHeInitializer model_path = os.path.join(os.path.dirname(__file__), "models", "kaiming_he1.gz") if __name__ == '__main__': run(KaimingHeInitializer(), model_path)
undoware/neutron-drive
refs/heads/master
google_appengine/lib/django_1_3/django/conf/locale/zh_CN/__init__.py
12133432
potatolondon/django-nonrel-1-4
refs/heads/master
tests/modeltests/select_related/__init__.py
12133432
350dotorg/Django
refs/heads/master
tests/modeltests/field_subclassing/__init__.py
12133432
doctormo/motte
refs/heads/master
motte/tagcss/__init__.py
12133432
sam-tsai/django-old
refs/heads/master
django/conf/locale/uk/__init__.py
12133432
haiyangd/Gelatin
refs/heads/master
src/Gelatin/parser/Newline.py
2
# Copyright (C) 2010 Samuel Abels. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2, as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from simpleparse.objectgenerator import Prebuilt from simpleparse.stt.TextTools import Call, Skip from util import eat_indent from Token import Token class Newline(Token): def __call__(self, buffer, start, end): # Skip empty lines. thestart = start try: if buffer[thestart] != '\n': return thestart while buffer[thestart] == '\n': thestart += 1 except IndexError: return thestart + 2 # +1/-1 hack #EOF # If the indent of the non-empty line matches, we are done. return eat_indent(buffer, thestart, end, self.processor.indent) + 1 # +1/-1 hack def table(self): table = (None, Call, self), (None, Skip, -1) # +1/-1 hack return Prebuilt(value = table, report = False)
RedBulli/Django_SnookerStats
refs/heads/master
SnookerStats/urls.py
1
from django.conf.urls import patterns, include, url from tastypie.api import Api from Snooker.api import StrikeResource, FrameResource, PlayerResource, MatchResource, LeagueResource, TournamentResource from django.contrib import admin from Snooker.views import index, client v1_api = Api(api_name='v1') v1_api.register(StrikeResource()) v1_api.register(FrameResource()) v1_api.register(PlayerResource()) v1_api.register(MatchResource()) v1_api.register(LeagueResource()) v1_api.register(TournamentResource()) admin.autodiscover() urlpatterns = patterns('', # Examples: url(r'^$', index), url(r'^client/$', client), (r'^api/', include(v1_api.urls)), # url(r'^SnookerStats/', include('SnookerStats.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), )
DarthMaulware/EquationGroupLeaks
refs/heads/master
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/file/cmd/diskspace/type_Result.py
1
# uncompyle6 version 2.9.10 # Python bytecode 2.7 (62211) # Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10) # [GCC 6.2.0 20161005] # Embedded file name: type_Result.py from types import * class Result: def __init__(self): self.__dict__['available'] = 0 self.__dict__['total'] = 0 self.__dict__['free'] = 0 self.__dict__['disk'] = '' def __getattr__(self, name): if name == 'available': return self.__dict__['available'] if name == 'total': return self.__dict__['total'] if name == 'free': return self.__dict__['free'] if name == 'disk': return self.__dict__['disk'] raise AttributeError("Attribute '%s' not found" % name) def __setattr__(self, name, value): if name == 'available': self.__dict__['available'] = value elif name == 'total': self.__dict__['total'] = value elif name == 'free': self.__dict__['free'] = value elif name == 'disk': self.__dict__['disk'] = value else: raise AttributeError("Attribute '%s' not found" % name) def Marshal(self, mmsg): from mcl.object.Message import MarshalMessage submsg = MarshalMessage() submsg.AddU64(MSG_KEY_RESULT_AVAILABLE, self.__dict__['available']) submsg.AddU64(MSG_KEY_RESULT_TOTAL, self.__dict__['total']) submsg.AddU64(MSG_KEY_RESULT_FREE, self.__dict__['free']) submsg.AddStringUtf8(MSG_KEY_RESULT_DISK, self.__dict__['disk']) mmsg.AddMessage(MSG_KEY_RESULT, submsg) def Demarshal(self, dmsg, instance=-1): import mcl.object.Message msgData = dmsg.FindData(MSG_KEY_RESULT, mcl.object.Message.MSG_TYPE_MSG, instance) submsg = mcl.object.Message.DemarshalMessage(msgData) self.__dict__['available'] = submsg.FindU64(MSG_KEY_RESULT_AVAILABLE) self.__dict__['total'] = submsg.FindU64(MSG_KEY_RESULT_TOTAL) self.__dict__['free'] = submsg.FindU64(MSG_KEY_RESULT_FREE) self.__dict__['disk'] = submsg.FindString(MSG_KEY_RESULT_DISK)
maciek263/django2
refs/heads/master
myvenv/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py
1734
from __future__ import absolute_import, division, unicode_literals from . import _base from ..sanitizer import HTMLSanitizerMixin class Filter(_base.Filter, HTMLSanitizerMixin): def __iter__(self): for token in _base.Filter.__iter__(self): token = self.sanitize_token(token) if token: yield token
barryrobison/arsenalsuite
refs/heads/master
cpp/apps/bach/plugins/gen_thumbs.py
10
import initbach import bachutil from Bach import * from PyQt4.QtCore import * import os def listMissingThumbs(width): assets = BachAsset.select("exclude=false") for asset in assets: path = asset.path() cachePath = "%s/%s_%sx%s.png" % ("/drd/reference/.thumbnails", path, width,width) if os.path.exists(cachePath): continue if not QFile.exists( QFileInfo(cachePath).absolutePath() ): QDir().mkpath( QFileInfo(cachePath).absolutePath() ) print bachutil.thumbCommand(path, width) listMissingThumbs(256) listMissingThumbs(512)
orlenko/plei
refs/heads/master
mezzanine/core/forms.py
1
from uuid import uuid4 from django import forms from django.forms.extras.widgets import SelectDateWidget from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from mezzanine.conf import settings from mezzanine.core.models import Orderable from django.template.loader import render_to_string from django.forms.util import flatatt from django.utils.html import conditional_escape from bleach.encoding import force_unicode class Html5Mixin(object): """ Mixin for form classes. Adds HTML5 features to forms for client side validation by the browser, like a "required" attribute and "email" and "url" input types. """ def __init__(self, *args, **kwargs): super(Html5Mixin, self).__init__(*args, **kwargs) if hasattr(self, "fields"): # Autofocus first field first_field = self.fields.itervalues().next() first_field.widget.attrs["autofocus"] = "" for name, field in self.fields.items(): if settings.FORMS_USE_HTML5: if isinstance(field, forms.EmailField): self.fields[name].widget.input_type = "email" elif isinstance(field, forms.URLField): self.fields[name].widget.input_type = "url" if field.required: self.fields[name].widget.attrs["required"] = "" _tinymce_js = () if settings.GRAPPELLI_INSTALLED: _tinymce_js = (settings.STATIC_URL + "grappelli/tinymce/jscripts/tiny_mce/tiny_mce.js", settings.TINYMCE_SETUP_JS,) class TinyMceWidget(forms.Textarea): """ Setup the JS files and targetting CSS class for a textarea to use TinyMCE. """ class Media: js = _tinymce_js def __init__(self, *args, **kwargs): super(TinyMceWidget, self).__init__(*args, **kwargs) self.attrs["class"] = "mceEditor" class CKEditorWidget(forms.Textarea): class Media: js = ( settings.STATIC_URL + 'ckeditor/ckeditor.js', settings.STATIC_URL + 'ckeditor_admin_filebrowser.js', ) def render(self, name, value, attrs=None): attrs = attrs or {} final_attrs = self.build_attrs(attrs, name=name) return mark_safe(render_to_string('ckeditor/widget.html', { 'final_attrs': flatatt(final_attrs), 'value': conditional_escape(force_unicode(value)), 'id': final_attrs['id'] })) class OrderWidget(forms.HiddenInput): """ Add up and down arrows for ordering controls next to a hidden form field. """ def render(self, *args, **kwargs): rendered = super(OrderWidget, self).render(*args, **kwargs) arrows = ["<img src='%sadmin/img/admin/arrow-%s.gif' />" % (settings.STATIC_URL, arrow) for arrow in ("up", "down")] arrows = "<span class='ordering'>%s</span>" % "".join(arrows) return rendered + mark_safe(arrows) class DynamicInlineAdminForm(forms.ModelForm): """ Form for ``DynamicInlineAdmin`` that can be collapsed and sorted with drag and drop using ``OrderWidget``. """ class Media: js = ("mezzanine/js/jquery-ui-1.9.1.custom.min.js", "mezzanine/js/admin/dynamic_inline.js",) def __init__(self, *args, **kwargs): super(DynamicInlineAdminForm, self).__init__(*args, **kwargs) if issubclass(self._meta.model, Orderable): self.fields["_order"] = forms.CharField(label=_("Order"), widget=OrderWidget, required=False) class SplitSelectDateTimeWidget(forms.SplitDateTimeWidget): """ Combines Django's ``SelectDateTimeWidget`` and ``SelectDateWidget``. """ def __init__(self, attrs=None, date_format=None, time_format=None): date_widget = SelectDateWidget(attrs=attrs) time_widget = forms.TimeInput(attrs=attrs, format=time_format) forms.MultiWidget.__init__(self, (date_widget, time_widget), attrs) class CheckboxSelectMultiple(forms.CheckboxSelectMultiple): """ Wraps render with a CSS class for styling. """ def render(self, *args, **kwargs): rendered = super(CheckboxSelectMultiple, self).render(*args, **kwargs) return mark_safe("<span class='multicheckbox'>%s</span>" % rendered) def get_edit_form(obj, field_names, data=None, files=None): """ Returns the in-line editing form for editing a single model field. """ # Map these form fields to their types defined in the forms app so # we can make use of their custom widgets. from mezzanine.forms import fields widget_overrides = { forms.DateField: fields.DATE, forms.DateTimeField: fields.DATE_TIME, forms.EmailField: fields.EMAIL, } class EditForm(forms.ModelForm): """ In-line editing form for editing a single model field. """ app = forms.CharField(widget=forms.HiddenInput) model = forms.CharField(widget=forms.HiddenInput) id = forms.CharField(widget=forms.HiddenInput) fields = forms.CharField(widget=forms.HiddenInput) class Meta: model = obj.__class__ fields = field_names.split(",") def __init__(self, *args, **kwargs): super(EditForm, self).__init__(*args, **kwargs) self.uuid = str(uuid4()) for f in self.fields.keys(): field_class = self.fields[f].__class__ try: field_type = widget_overrides[field_class] except KeyError: pass else: self.fields[f].widget = fields.WIDGETS[field_type]() css_class = self.fields[f].widget.attrs.get("class", "") css_class += " " + field_class.__name__.lower() self.fields[f].widget.attrs["class"] = css_class self.fields[f].widget.attrs["id"] = "%s-%s" % (f, self.uuid) if settings.FORMS_USE_HTML5 and self.fields[f].required: self.fields[f].widget.attrs["required"] = "" initial = {"app": obj._meta.app_label, "id": obj.id, "fields": field_names, "model": obj._meta.object_name.lower()} return EditForm(instance=obj, initial=initial, data=data, files=files)
zephirefaith/AI_Fall15_Assignments
refs/heads/master
A2/lib/networkx/algorithms/tests/test_boundary.py
43
#!/usr/bin/env python from nose.tools import * import networkx as nx from networkx import convert_node_labels_to_integers as cnlti class TestBoundary: def setUp(self): self.null=nx.null_graph() self.P10=cnlti(nx.path_graph(10),first_label=1) self.K10=cnlti(nx.complete_graph(10),first_label=1) def test_null_node_boundary(self): """null graph has empty node boundaries""" null=self.null assert_equal(nx.node_boundary(null,[]),[]) assert_equal(nx.node_boundary(null,[],[]),[]) assert_equal(nx.node_boundary(null,[1,2,3]),[]) assert_equal(nx.node_boundary(null,[1,2,3],[4,5,6]),[]) assert_equal(nx.node_boundary(null,[1,2,3],[3,4,5]),[]) def test_null_edge_boundary(self): """null graph has empty edge boundaries""" null=self.null assert_equal(nx.edge_boundary(null,[]),[]) assert_equal(nx.edge_boundary(null,[],[]),[]) assert_equal(nx.edge_boundary(null,[1,2,3]),[]) assert_equal(nx.edge_boundary(null,[1,2,3],[4,5,6]),[]) assert_equal(nx.edge_boundary(null,[1,2,3],[3,4,5]),[]) def test_path_node_boundary(self): """Check node boundaries in path graph.""" P10=self.P10 assert_equal(nx.node_boundary(P10,[]),[]) assert_equal(nx.node_boundary(P10,[],[]),[]) assert_equal(nx.node_boundary(P10,[1,2,3]),[4]) assert_equal(sorted(nx.node_boundary(P10,[4,5,6])),[3, 7]) assert_equal(sorted(nx.node_boundary(P10,[3,4,5,6,7])),[2, 8]) assert_equal(nx.node_boundary(P10,[8,9,10]),[7]) assert_equal(sorted(nx.node_boundary(P10,[4,5,6],[9,10])),[]) def test_path_edge_boundary(self): """Check edge boundaries in path graph.""" P10=self.P10 assert_equal(nx.edge_boundary(P10,[]),[]) assert_equal(nx.edge_boundary(P10,[],[]),[]) assert_equal(nx.edge_boundary(P10,[1,2,3]),[(3, 4)]) assert_equal(sorted(nx.edge_boundary(P10,[4,5,6])),[(4, 3), (6, 7)]) assert_equal(sorted(nx.edge_boundary(P10,[3,4,5,6,7])),[(3, 2), (7, 8)]) assert_equal(nx.edge_boundary(P10,[8,9,10]),[(8, 7)]) assert_equal(sorted(nx.edge_boundary(P10,[4,5,6],[9,10])),[]) assert_equal(nx.edge_boundary(P10,[1,2,3],[3,4,5]) ,[(2, 3), (3, 4)]) def test_k10_node_boundary(self): """Check node boundaries in K10""" K10=self.K10 assert_equal(nx.node_boundary(K10,[]),[]) assert_equal(nx.node_boundary(K10,[],[]),[]) assert_equal(sorted(nx.node_boundary(K10,[1,2,3])), [4, 5, 6, 7, 8, 9, 10]) assert_equal(sorted(nx.node_boundary(K10,[4,5,6])), [1, 2, 3, 7, 8, 9, 10]) assert_equal(sorted(nx.node_boundary(K10,[3,4,5,6,7])), [1, 2, 8, 9, 10]) assert_equal(nx.node_boundary(K10,[4,5,6],[]),[]) assert_equal(nx.node_boundary(K10,K10),[]) assert_equal(nx.node_boundary(K10,[1,2,3],[3,4,5]),[4, 5]) def test_k10_edge_boundary(self): """Check edge boundaries in K10""" K10=self.K10 assert_equal(nx.edge_boundary(K10,[]),[]) assert_equal(nx.edge_boundary(K10,[],[]),[]) assert_equal(len(nx.edge_boundary(K10,[1,2,3])),21) assert_equal(len(nx.edge_boundary(K10,[4,5,6,7])),24) assert_equal(len(nx.edge_boundary(K10,[3,4,5,6,7])),25) assert_equal(len(nx.edge_boundary(K10,[8,9,10])),21) assert_equal(sorted(nx.edge_boundary(K10,[4,5,6],[9,10])), [(4, 9), (4, 10), (5, 9), (5, 10), (6, 9), (6, 10)]) assert_equal(nx.edge_boundary(K10,[1,2,3],[3,4,5]), [(1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (2, 5), (3, 4), (3, 5)]) def test_petersen(self): """Check boundaries in the petersen graph cheeger(G,k)=min(|bdy(S)|/|S| for |S|=k, 0<k<=|V(G)|/2) """ from itertools import combinations P=nx.petersen_graph() def cheeger(G,k): return min( float(len(nx.node_boundary(G,nn)))/k for nn in combinations(G,k) ) assert_almost_equals(cheeger(P,1),3.00,places=2) assert_almost_equals(cheeger(P,2),2.00,places=2) assert_almost_equals(cheeger(P,3),1.67,places=2) assert_almost_equals(cheeger(P,4),1.00,places=2) assert_almost_equals(cheeger(P,5),0.80,places=2)
smartshark/serverSHARK
refs/heads/master
server/settings_template_vagrant.py
1
from .base import * # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'CHANGEME' DEBUG = True LOCALQUEUE = { 'root_path': '/tmp/servershark/', 'plugin_installation': os.path.join(BASE_DIR, 'plugin_installations'), 'plugin_output': os.path.join(BASE_DIR, 'plugin_output'), 'redis_url': 'redis://localhost:6379', 'job_queue': 'queue:jobs', 'result_queue': 'queue:results', 'timeout': 0, 'debug': False, } HPC = { 'username': 'xxx', 'password': 'xxx', 'host': 'xxx', 'port': 22, 'queue': 'xx', 'tasks_per_node': [], 'root_path': 'xxx', 'log_path': 'xxx', 'ssh_tunnel_username': '', 'ssh_tunnel_password': '', 'ssh_tunnel_host': '', 'ssh_tunnel_port': '', 'ssh_use_tunnel': '', 'ssh_key_path': '', 'cores_per_job': 1, 'local_log_path': '', 'hosts_per_job': 1 } COLLECTION_CONNECTOR_IDENTIFIER = 'LOCALQUEUE' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': 'servershark', 'USER': 'root', 'PASSWORD': 'CHANGEME', 'HOST': 'localhost', 'CONN_MAX_AGE': 3500, }, 'mongodb': { 'ENGINE': '', 'NAME': 'smartshark', 'USER': 'root', 'PASSWORD': 'CHANGEME', 'HOST': 'localhost', 'PORT': 27017, 'AUTHENTICATION_DB': 'smartshark', 'PLUGIN_SCHEMA_COLLECTION': 'plugin_schema', 'SHARDING': False, } } # API Key for visualSHARK if used API_KEY = None EMAIL_HOST = '' EMAIL_PORT = 587 EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = True NOTIFICATION_RECEIVER = ''
hollerith/schoogle
refs/heads/master
gdata/youtube/__init__.py
297
#!/usr/bin/python # # Copyright (C) 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __author__ = ('[email protected] (Stephanie Liu)' ', [email protected] (Jochen Hartmann)') import atom import gdata import gdata.media as Media import gdata.geo as Geo YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007' YOUTUBE_FORMAT = '{http://gdata.youtube.com/schemas/2007}format' YOUTUBE_DEVELOPER_TAG_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE, 'developertags.cat') YOUTUBE_SUBSCRIPTION_TYPE_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE, 'subscriptiontypes.cat') class Username(atom.AtomBase): """The YouTube Username element""" _tag = 'username' _namespace = YOUTUBE_NAMESPACE class QueryString(atom.AtomBase): """The YouTube QueryString element""" _tag = 'queryString' _namespace = YOUTUBE_NAMESPACE class FirstName(atom.AtomBase): """The YouTube FirstName element""" _tag = 'firstName' _namespace = YOUTUBE_NAMESPACE class LastName(atom.AtomBase): """The YouTube LastName element""" _tag = 'lastName' _namespace = YOUTUBE_NAMESPACE class Age(atom.AtomBase): """The YouTube Age element""" _tag = 'age' _namespace = YOUTUBE_NAMESPACE class Books(atom.AtomBase): """The YouTube Books element""" _tag = 'books' _namespace = YOUTUBE_NAMESPACE class Gender(atom.AtomBase): """The YouTube Gender element""" _tag = 'gender' _namespace = YOUTUBE_NAMESPACE class Company(atom.AtomBase): """The YouTube Company element""" _tag = 'company' _namespace = YOUTUBE_NAMESPACE class Hobbies(atom.AtomBase): """The YouTube Hobbies element""" _tag = 'hobbies' _namespace = YOUTUBE_NAMESPACE class Hometown(atom.AtomBase): """The YouTube Hometown element""" _tag = 'hometown' _namespace = YOUTUBE_NAMESPACE class Location(atom.AtomBase): """The YouTube Location element""" _tag = 'location' _namespace = YOUTUBE_NAMESPACE class Movies(atom.AtomBase): """The YouTube Movies element""" _tag = 'movies' _namespace = YOUTUBE_NAMESPACE class Music(atom.AtomBase): """The YouTube Music element""" _tag = 'music' _namespace = YOUTUBE_NAMESPACE class Occupation(atom.AtomBase): """The YouTube Occupation element""" _tag = 'occupation' _namespace = YOUTUBE_NAMESPACE class School(atom.AtomBase): """The YouTube School element""" _tag = 'school' _namespace = YOUTUBE_NAMESPACE class Relationship(atom.AtomBase): """The YouTube Relationship element""" _tag = 'relationship' _namespace = YOUTUBE_NAMESPACE class Recorded(atom.AtomBase): """The YouTube Recorded element""" _tag = 'recorded' _namespace = YOUTUBE_NAMESPACE class Statistics(atom.AtomBase): """The YouTube Statistics element.""" _tag = 'statistics' _namespace = YOUTUBE_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['viewCount'] = 'view_count' _attributes['videoWatchCount'] = 'video_watch_count' _attributes['subscriberCount'] = 'subscriber_count' _attributes['lastWebAccess'] = 'last_web_access' _attributes['favoriteCount'] = 'favorite_count' def __init__(self, view_count=None, video_watch_count=None, favorite_count=None, subscriber_count=None, last_web_access=None, extension_elements=None, extension_attributes=None, text=None): self.view_count = view_count self.video_watch_count = video_watch_count self.subscriber_count = subscriber_count self.last_web_access = last_web_access self.favorite_count = favorite_count atom.AtomBase.__init__(self, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) class Status(atom.AtomBase): """The YouTube Status element""" _tag = 'status' _namespace = YOUTUBE_NAMESPACE class Position(atom.AtomBase): """The YouTube Position element. The position in a playlist feed.""" _tag = 'position' _namespace = YOUTUBE_NAMESPACE class Racy(atom.AtomBase): """The YouTube Racy element.""" _tag = 'racy' _namespace = YOUTUBE_NAMESPACE class Description(atom.AtomBase): """The YouTube Description element.""" _tag = 'description' _namespace = YOUTUBE_NAMESPACE class Private(atom.AtomBase): """The YouTube Private element.""" _tag = 'private' _namespace = YOUTUBE_NAMESPACE class NoEmbed(atom.AtomBase): """The YouTube VideoShare element. Whether a video can be embedded or not.""" _tag = 'noembed' _namespace = YOUTUBE_NAMESPACE class Comments(atom.AtomBase): """The GData Comments element""" _tag = 'comments' _namespace = gdata.GDATA_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', [gdata.FeedLink]) def __init__(self, feed_link=None, extension_elements=None, extension_attributes=None, text=None): self.feed_link = feed_link atom.AtomBase.__init__(self, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) class Rating(atom.AtomBase): """The GData Rating element""" _tag = 'rating' _namespace = gdata.GDATA_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['min'] = 'min' _attributes['max'] = 'max' _attributes['numRaters'] = 'num_raters' _attributes['average'] = 'average' def __init__(self, min=None, max=None, num_raters=None, average=None, extension_elements=None, extension_attributes=None, text=None): self.min = min self.max = max self.num_raters = num_raters self.average = average atom.AtomBase.__init__(self, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) class YouTubePlaylistVideoEntry(gdata.GDataEntry): """Represents a YouTubeVideoEntry on a YouTubePlaylist.""" _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', [gdata.FeedLink]) _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', Description) _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) _children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location) _children['{%s}position' % YOUTUBE_NAMESPACE] = ('position', Position) _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, feed_link=None, description=None, rating=None, comments=None, statistics=None, location=None, position=None, media=None, extension_elements=None, extension_attributes=None): self.feed_link = feed_link self.description = description self.rating = rating self.comments = comments self.statistics = statistics self.location = location self.position = position self.media = media gdata.GDataEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated, extension_elements=extension_elements, extension_attributes=extension_attributes) class YouTubeVideoCommentEntry(gdata.GDataEntry): """Represents a comment on YouTube.""" _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() class YouTubeSubscriptionEntry(gdata.GDataEntry): """Represents a subscription entry on YouTube.""" _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) _children['{%s}queryString' % YOUTUBE_NAMESPACE] = ( 'query_string', QueryString) _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', [gdata.FeedLink]) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, username=None, query_string=None, feed_link=None, extension_elements=None, extension_attributes=None): gdata.GDataEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated) self.username = username self.query_string = query_string self.feed_link = feed_link def GetSubscriptionType(self): """Retrieve the type of this subscription. Returns: A string that is either 'channel, 'query' or 'favorites' """ for category in self.category: if category.scheme == YOUTUBE_SUBSCRIPTION_TYPE_SCHEME: return category.term class YouTubeVideoResponseEntry(gdata.GDataEntry): """Represents a video response. """ _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) _children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed) _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) _children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy) _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, rating=None, noembed=None, statistics=None, racy=None, media=None, extension_elements=None, extension_attributes=None): gdata.GDataEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated) self.rating = rating self.noembed = noembed self.statistics = statistics self.racy = racy self.media = media or Media.Group() class YouTubeContactEntry(gdata.GDataEntry): """Represents a contact entry.""" _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) _children['{%s}status' % YOUTUBE_NAMESPACE] = ('status', Status) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, username=None, status=None, extension_elements=None, extension_attributes=None, text=None): gdata.GDataEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated) self.username = username self.status = status class YouTubeVideoEntry(gdata.GDataEntry): """Represents a video on YouTube.""" _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) _children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed) _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) _children['{%s}recorded' % YOUTUBE_NAMESPACE] = ('recorded', Recorded) _children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy) _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) _children['{%s}where' % gdata.geo.GEORSS_NAMESPACE] = ('geo', Geo.Where) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, rating=None, noembed=None, statistics=None, racy=None, media=None, geo=None, recorded=None, comments=None, extension_elements=None, extension_attributes=None): self.rating = rating self.noembed = noembed self.statistics = statistics self.racy = racy self.comments = comments self.media = media or Media.Group() self.geo = geo self.recorded = recorded gdata.GDataEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated, extension_elements=extension_elements, extension_attributes=extension_attributes) def GetSwfUrl(self): """Return the URL for the embeddable Video Returns: URL of the embeddable video """ if self.media.content: for content in self.media.content: if content.extension_attributes[YOUTUBE_FORMAT] == '5': return content.url else: return None def AddDeveloperTags(self, developer_tags): """Add a developer tag for this entry. Developer tags can only be set during the initial upload. Arguments: developer_tags: A list of developer tags as strings. Returns: A list of all developer tags for this video entry. """ for tag_text in developer_tags: self.media.category.append(gdata.media.Category( text=tag_text, label=tag_text, scheme=YOUTUBE_DEVELOPER_TAG_SCHEME)) return self.GetDeveloperTags() def GetDeveloperTags(self): """Retrieve developer tags for this video entry.""" developer_tags = [] for category in self.media.category: if category.scheme == YOUTUBE_DEVELOPER_TAG_SCHEME: developer_tags.append(category) if len(developer_tags) > 0: return developer_tags def GetYouTubeCategoryAsString(self): """Convenience method to return the YouTube category as string. YouTubeVideoEntries can contain multiple Category objects with differing schemes. This method returns only the category with the correct scheme, ignoring developer tags. """ for category in self.media.category: if category.scheme != YOUTUBE_DEVELOPER_TAG_SCHEME: return category.text class YouTubeUserEntry(gdata.GDataEntry): """Represents a user on YouTube.""" _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) _children['{%s}firstName' % YOUTUBE_NAMESPACE] = ('first_name', FirstName) _children['{%s}lastName' % YOUTUBE_NAMESPACE] = ('last_name', LastName) _children['{%s}age' % YOUTUBE_NAMESPACE] = ('age', Age) _children['{%s}books' % YOUTUBE_NAMESPACE] = ('books', Books) _children['{%s}gender' % YOUTUBE_NAMESPACE] = ('gender', Gender) _children['{%s}company' % YOUTUBE_NAMESPACE] = ('company', Company) _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', Description) _children['{%s}hobbies' % YOUTUBE_NAMESPACE] = ('hobbies', Hobbies) _children['{%s}hometown' % YOUTUBE_NAMESPACE] = ('hometown', Hometown) _children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location) _children['{%s}movies' % YOUTUBE_NAMESPACE] = ('movies', Movies) _children['{%s}music' % YOUTUBE_NAMESPACE] = ('music', Music) _children['{%s}occupation' % YOUTUBE_NAMESPACE] = ('occupation', Occupation) _children['{%s}school' % YOUTUBE_NAMESPACE] = ('school', School) _children['{%s}relationship' % YOUTUBE_NAMESPACE] = ('relationship', Relationship) _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', [gdata.FeedLink]) _children['{%s}thumbnail' % gdata.media.MEDIA_NAMESPACE] = ('thumbnail', Media.Thumbnail) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, username=None, first_name=None, last_name=None, age=None, books=None, gender=None, company=None, description=None, hobbies=None, hometown=None, location=None, movies=None, music=None, occupation=None, school=None, relationship=None, statistics=None, feed_link=None, extension_elements=None, extension_attributes=None, text=None): self.username = username self.first_name = first_name self.last_name = last_name self.age = age self.books = books self.gender = gender self.company = company self.description = description self.hobbies = hobbies self.hometown = hometown self.location = location self.movies = movies self.music = music self.occupation = occupation self.school = school self.relationship = relationship self.statistics = statistics self.feed_link = feed_link gdata.GDataEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) class YouTubeVideoFeed(gdata.GDataFeed, gdata.LinkFinder): """Represents a video feed on YouTube.""" _tag = gdata.GDataFeed._tag _namespace = gdata.GDataFeed._namespace _children = gdata.GDataFeed._children.copy() _attributes = gdata.GDataFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubeVideoEntry]) class YouTubePlaylistEntry(gdata.GDataEntry): """Represents a playlist in YouTube.""" _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', Description) _children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', Private) _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', [gdata.FeedLink]) def __init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, private=None, feed_link=None, description=None, extension_elements=None, extension_attributes=None): self.description = description self.private = private self.feed_link = feed_link gdata.GDataEntry.__init__(self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated, extension_elements=extension_elements, extension_attributes=extension_attributes) class YouTubePlaylistFeed(gdata.GDataFeed, gdata.LinkFinder): """Represents a feed of a user's playlists """ _tag = gdata.GDataFeed._tag _namespace = gdata.GDataFeed._namespace _children = gdata.GDataFeed._children.copy() _attributes = gdata.GDataFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubePlaylistEntry]) class YouTubePlaylistVideoFeed(gdata.GDataFeed, gdata.LinkFinder): """Represents a feed of video entry on a playlist.""" _tag = gdata.GDataFeed._tag _namespace = gdata.GDataFeed._namespace _children = gdata.GDataFeed._children.copy() _attributes = gdata.GDataFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubePlaylistVideoEntry]) class YouTubeContactFeed(gdata.GDataFeed, gdata.LinkFinder): """Represents a feed of a users contacts.""" _tag = gdata.GDataFeed._tag _namespace = gdata.GDataFeed._namespace _children = gdata.GDataFeed._children.copy() _attributes = gdata.GDataFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubeContactEntry]) class YouTubeSubscriptionFeed(gdata.GDataFeed, gdata.LinkFinder): """Represents a feed of a users subscriptions.""" _tag = gdata.GDataFeed._tag _namespace = gdata.GDataFeed._namespace _children = gdata.GDataFeed._children.copy() _attributes = gdata.GDataFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubeSubscriptionEntry]) class YouTubeVideoCommentFeed(gdata.GDataFeed, gdata.LinkFinder): """Represents a feed of comments for a video.""" _tag = gdata.GDataFeed._tag _namespace = gdata.GDataFeed._namespace _children = gdata.GDataFeed._children.copy() _attributes = gdata.GDataFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubeVideoCommentEntry]) class YouTubeVideoResponseFeed(gdata.GDataFeed, gdata.LinkFinder): """Represents a feed of video responses.""" _tag = gdata.GDataFeed._tag _namespace = gdata.GDataFeed._namespace _children = gdata.GDataFeed._children.copy() _attributes = gdata.GDataFeed._attributes.copy() _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubeVideoResponseEntry]) def YouTubeVideoFeedFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string) def YouTubeVideoEntryFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeVideoEntry, xml_string) def YouTubeContactFeedFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeContactFeed, xml_string) def YouTubeContactEntryFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeContactEntry, xml_string) def YouTubeVideoCommentFeedFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeVideoCommentFeed, xml_string) def YouTubeVideoCommentEntryFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeVideoCommentEntry, xml_string) def YouTubeUserFeedFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string) def YouTubeUserEntryFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeUserEntry, xml_string) def YouTubePlaylistFeedFromString(xml_string): return atom.CreateClassFromXMLString(YouTubePlaylistFeed, xml_string) def YouTubePlaylistVideoFeedFromString(xml_string): return atom.CreateClassFromXMLString(YouTubePlaylistVideoFeed, xml_string) def YouTubePlaylistEntryFromString(xml_string): return atom.CreateClassFromXMLString(YouTubePlaylistEntry, xml_string) def YouTubePlaylistVideoEntryFromString(xml_string): return atom.CreateClassFromXMLString(YouTubePlaylistVideoEntry, xml_string) def YouTubeSubscriptionFeedFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeSubscriptionFeed, xml_string) def YouTubeSubscriptionEntryFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeSubscriptionEntry, xml_string) def YouTubeVideoResponseFeedFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeVideoResponseFeed, xml_string) def YouTubeVideoResponseEntryFromString(xml_string): return atom.CreateClassFromXMLString(YouTubeVideoResponseEntry, xml_string)
Plurk/Solace
refs/heads/master
solace/_openid_auth.py
2
# -*- coding: utf-8 -*- """ solace._openid_auth ~~~~~~~~~~~~~~~~~~~ Implements a simple OpenID driven store. :copyright: (c) 2009 by Plurk Inc., see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import with_statement from time import time from hashlib import sha1 from contextlib import closing from openid.association import Association from openid.store.interface import OpenIDStore from openid.consumer.consumer import Consumer, SUCCESS, CANCEL from openid.consumer import discover from openid.store import nonce from sqlalchemy.orm import scoped_session from sqlalchemy.exceptions import SQLError from werkzeug import redirect from werkzeug.exceptions import NotFound from solace.i18n import _, lazy_gettext from solace.application import url_for from solace.templating import render_template from solace.database import get_engine, session from solace.schema import openid_association, openid_user_nonces from solace.models import User from solace.forms import OpenIDLoginForm, OpenIDRegistrationForm from solace.auth import AuthSystemBase, LoginUnsucessful class SolaceOpenIDStore(OpenIDStore): """Implements the open store for solace using the database.""" def connection(self): return closing(get_engine().connect()) def storeAssociation(self, server_url, association): with self.connection() as con: con.execute(openid_association.insert(), server_url=server_url, handle=association.handle, secret=association.secret.encode('base64'), issued=association.issued, lifetime=association.lifetime, assoc_type=association.assoc_type ) def getAssociation(self, server_url, handle=None): filter = openid_association.c.server_url == server_url if handle is not None: filter &= openid_association.c.handle == handle with self.connection() as con: result = con.execute(openid_association.select(filter)) result_assoc = None for row in result.fetchall(): assoc = Association(row.handle, row.secret.decode('base64'), row.issued, row.lifetime, row.assoc_type) if assoc.getExpiresIn() <= 0: self.removeAssociation(server_url, assoc.handle) else: result_assoc = assoc return result_assoc def removeAssociation(self, server_url, handle): with self.connection() as con: return con.execute(openid_association.delete( (openid_association.c.server_url == server_url) & (openid_association.c.handle == handle) )).rowcount > 0 def useNonce(self, server_url, timestamp, salt): if abs(timestamp - time()) > nonce.SKEW: return False with self.connection() as con: row = con.execute(openid_user_nonces.select( (openid_user_nonces.c.server_url == server_url) & (openid_user_nonces.c.timestamp == timestamp) & (openid_user_nonces.c.salt == salt) )).fetchone() if row is not None: return False con.execute(openid_user_nonces.insert(), server_url=server_url, timestamp=timestamp, salt=salt ) return True def cleanupNonces(self): with self.connection() as con: return con.execute(openid_user_nonces.delete( openid_user_nonces.c.timestamp <= int(time() - nonce.SKEW) )).rowcount def cleanupAssociations(self): with self.connection() as con: return con.execute(openid_association.delete( openid_association.c.issued + openid_association.c.lifetime < int(time()) )).rowcount def getAuthKey(self): return sha1(settings.SECRET_KEY).hexdigest()[:self.AUTH_KEY_LEN] def isDump(self): return False class OpenIDAuth(AuthSystemBase): """Authenticate against openid. Requires the Python OpenID library to be installed. (python-openid). """ password_managed_external = True passwordless = True show_register_link = False def register(self, request): # the register link is a complete noop. The actual user registration # on first login happens in the login handling. raise NotFound() def first_login(self, request): """Until the openid information is removed from the session, this view will be use to create the user account based on the openid url. """ identity_url = request.session.get('openid') if identity_url is None: return redirect(url_for('core.login')) if request.is_logged_in: del request.session['openid'] return redirect(request.next_url or url_for('kb.overview')) form = OpenIDRegistrationForm() if request.method == 'POST' and form.validate(): user = User(form['username'], form['email']) user.openid_logins.add(identity_url) self.after_register(request, user) session.commit() del request.session['openid'] self.set_user_checked(request, user) return self.redirect_back(request) return render_template('core/register_openid.html', form=form.as_widget(), identity_url=identity_url) def redirect_back(self, request): return redirect(request.get_redirect_target([ url_for('core.login'), url_for('core.register') ]) or url_for('kb.overview')) def before_login(self, request): if request.args.get('openid_complete') == 'yes': return self.complete_login(request) elif request.args.get('firstlogin') == 'yes': return self.first_login(request) def complete_login(self, request): consumer = Consumer(request.session, SolaceOpenIDStore()) openid_response = consumer.complete(request.args.to_dict(), url_for('core.login', _external=True)) if openid_response.status == SUCCESS: return self.create_or_login(request, openid_response.identity_url) elif openid_response.status == CANCEL: raise LoginUnsucessful(_(u'The request was cancelled')) else: raise LoginUnsucessful(_(u'OpenID authentication error')) def create_or_login(self, request, identity_url): user = User.query.by_openid_login(identity_url).first() # we don't have a user for this openid yet. What we want to do # now is to remember the openid in the session until we have the # user. We're using the session because it is signed. if user is None: request.session['openid'] = identity_url return redirect(url_for('core.login', firstlogin='yes', next=request.next_url)) self.set_user_checked(request, user) return self.redirect_back(request) def set_user_checked(self, request, user): if not user.is_active: raise LoginUnsucessful(_(u'The user is not yet activated.')) if user.is_banned: raise LoginUnsucessful(_(u'The user got banned from the system.')) self.set_user(request, user) def perform_login(self, request, identity_url): try: consumer = Consumer(request.session, SolaceOpenIDStore()) auth_request = consumer.begin(identity_url) except discover.DiscoveryFailure: raise LoginUnsucessful(_(u'The OpenID was invalid')) trust_root = request.host_url redirect_to = url_for('core.login', openid_complete='yes', next=request.next_url, _external=True) return redirect(auth_request.redirectURL(trust_root, redirect_to)) def get_login_form(self): return OpenIDLoginForm() def render_login_template(self, request, form): return render_template('core/login_openid.html', form=form.as_widget())
farodin91/servo
refs/heads/master
tests/wpt/css-tests/tools/wptserve/tests/functional/docroot/test_tuple_3.py
467
def main(request, response): return (202, "Giraffe"), [("Content-Type", "text/html"), ("X-Test", "PASS")], "PASS"
woobe/h2o
refs/heads/master
py/jprobe.py
1
#!/usr/bin/python import random, jenkinsapi, getpass, re, os, argparse, shutil, json, logging, sys import string from jenkinsapi.jenkins import Jenkins # only used when we wanted to see what objects were available (below) from see import see DO_LAST_GOOD = False # using the env variables to force jenkinsapi to use proxy..but after to clear to avoid # problems in other python stuff that uses requests! def clear_env(): # need to set environment variables for proxy server if going to sm box # or clear them if not! if os.environ.get('HTTPS_PROXY'): print "removing HTTPS_PROXY os env variable so requests won't use it" del os.environ['HTTPS_PROXY'] if os.environ.get('HTTP_PROXY'): print "removing HTTP_PROXY os env variable so requests won't use it" del os.environ['HTTP_PROXY'] import sys def my_hook(type, value, traceback): print 'hooked the exception so we can clear env variables' clear_env() print 'Type:', type print 'Value:', value print 'Traceback:', traceback raise Exception sys.excepthook = my_hook parse = argparse.ArgumentParser() group = parse.add_mutually_exclusive_group() group.add_argument('-e', help="job number from a list of ec2 known jobs", type=int, action='store', default=None) group.add_argument('-x', help="job number from a list of 164 known jobs", type=int, action='store', default=None) group.add_argument('-s', help="job number from a list of sm known jobs", type=int, action='store', default=None) group.add_argument('-j', '--jobname', help="jobname. Correct url is found", action='store', default=None) parse.add_argument('-l', '--logging', help="turn on logging.DEBUG msgs to see allUrls used", action='store_true') parse.add_argument('-v', '--verbose', help="dump the last N stdout from the failed jobs", action='store_true') group.add_argument('-c', help="do a hardwired special job copy between jenkins", type=int, action='store', default=None) args = parse.parse_args() # can refer to this by zero-based index with -n 0 or -n 1 etc # or by job name with -j h2o_master_test allowedJobsX = [ 'h2o_master_test', 'h2o_release_tests', 'h2o_release_tests2', 'h2o_release_tests_164', 'h2o_release_tests_c10_only', 'h2o_perf_test', 'h2o_release_Runit', ] allowedJobsE = [ 'h2o.tests.single.jvm', 'h2o.tests.single.jvm.fvec', 'h2o.multi.vm.temporary', 'h2o.tests.ec2.multi.jvm', 'h2o.tests.ec2.multi.jvm.fvec', 'h2o.tests.ec2.hosts', ] allowedJobsS = [ 'sm_testdir_single_jvm', 'sm_testdir_single_jvm_fvec', 'sm_testdir_multi_jvm', 'sm_testdir_hosts', 'sm_test_NN2_mnist', ] allUrls = { 'ec2': 'http://test.0xdata.com', '164': 'http://192.168.1.164:8080', 'sm': 'http://10.71.0.163:8080', } all164Jobs = ['do all', 'h2o_master_test', 'h2o_master_test2', 'h2o_perf_test', 'h2o_private_json_vers_Runit', 'h2o_release_Runit', 'h2o_release_tests', 'h2o_release_tests2', 'h2o_release_tests_164', 'h2o_release_tests_c10_only', 'h2o_release_tests_cdh3', 'h2o_release_tests_cdh4', 'h2o_release_tests_cdh4_yarn', 'h2o_release_tests_cdh5', 'h2o_release_tests_cdh5_yarn', 'h2o_release_tests_hdp1.3', 'h2o_release_tests_hdp2.0.6', 'h2o_release_tests_mapr', 'selenium12'] allEc2Jobs = ['generic.h2o.build.branch', 'h2o.branch.api-dev', 'h2o.branch.cliffc-drf', 'h2o.branch.hilbert', 'h2o.branch.jobs', 'h2o.branch.jobs1', 'h2o.branch.json_versioning', 'h2o.branch.rel-ito', 'h2o.build', 'h2o.build.api-dev', 'h2o.build.gauss', 'h2o.build.godel', 'h2o.build.h2oscala', 'h2o.build.hilbert', 'h2o.build.jobs', 'h2o.build.master', 'h2o.build.rel-ito', 'h2o.build.rel-ivory', 'h2o.build.rel-iwasawa', 'h2o.build.rel-jacobi', 'h2o.build.rel-jordan', 'h2o.build.rest_api_versioning', 'h2o.build.ux-client', 'h2o.build.va_defaults_renamed', 'h2o.clone', 'h2o.datasets', 'h2o.download.latest', 'h2o.ec2.start', 'h2o.ec2.stop', 'h2o.findbugs', 'h2o.multi.vm.temporary', 'h2o.multi.vm.temporary.cliffc-no-limits', 'h2o.nightly', 'h2o.nightly.1', 'h2o.nightly.cliffc-lock', 'h2o.nightly.ec2', 'h2o.nightly.ec2.cliffc-no-limits', 'h2o.nightly.ec2.erdos', 'h2o.nightly.ec2.hilbert', 'h2o.nightly.ec2.rel-ito', 'h2o.nightly.ec2.rel-jacobi', 'h2o.nightly.ec2.rel-jordan', 'h2o.nightly.fourier', 'h2o.nightly.godel', 'h2o.nightly.multi.vm', 'h2o.nightly.rel-ivory', 'h2o.nightly.rel-iwasawa', 'h2o.nightly.rel-jacobi', 'h2o.nightly.rel-jordan', 'h2o.nightly.va_defaults_renamed', 'h2o.post.push', 'h2o.private.nightly', 'h2o.tests.ec2', 'h2o.tests.ec2.hosts', 'h2o.tests.ec2.multi.jvm', 'h2o.tests.ec2.multi.jvm.fvec', 'h2o.tests.golden', 'h2o.tests.junit', 'h2o.tests.multi.jvm', 'h2o.tests.multi.jvm.fvec', 'h2o.tests.single.jvm', 'h2o.tests.single.jvm.fvec', 'h2o.tests.test'] allSmJobs = [ 'sm_testdir_single_jvm', 'sm_testdir_single_jvm_fvec', 'sm_testdir_multi_jvm', 'sm_testdir_hosts', 'sm_test_NN2_mnist', ] # jenkinsapi: # This library wraps up that interface as more # conventional python objects in order to make many # Jenkins oriented tasks easier to automate. # http://pythonhosted.org//jenkinsapi # https://pypi.python.org/pypi/jenkinsapi # Project source code: github: https://github.com/salimfadhley/jenkinsapi # Project documentation: https://jenkinsapi.readthedocs.org/en/latest/ #************************************************ if args.logging: logging.basicConfig(level=logging.DEBUG) if args.jobname and (args.e or args.x or args.s): raise Exception("Don't use both -j and -x or -e or -s args") # default ec2 0 jobname = None if args.e is not None: if args.e<0 or args.e>(len(allowedJobsE)-1): raise Exception("ec2 job number %s is outside allowed range: 0-%s" % \ (args.e, len(allowedJobsE)-1)) jobname = allowedJobsE[args.e] if args.x is not None: if args.x<0 or args.x>(len(allowedJobsX)-1): raise Exception("0xdata job number %s is outside allowed range: 0-%s" % \ (args.x, len(allowedJobsX)-1)) jobname = allowedJobsX[args.x] if args.s is not None: if args.s<0 or args.s>(len(allowedJobsS)-1): raise Exception("sm job number %s is outside allowed range: 0-%s" % \ (args.s, len(allowedJobsS)-1)) jobname = allowedJobsS[args.s] if args.jobname: if args.jobname not in allowedJobs: raise Exception("%s not in list of legal jobs" % args.jobname) jobname = args.jobname if not (args.jobname or args.x or args.e or args.s): # prompt the user subtract = 0 prefix = "-e" eDone = False xDone = False while not jobname: # allAllowedJobs = allowedJobsE + allowedJobsX + allowedJobsS allAllowedJobs = allowedJobsE + allowedJobsX for j, job in enumerate(allAllowedJobs): # first boundary if not eDone and j==(subtract + len(allowedJobsE)): subtract += len(allowedJobsE) prefix = "-x" eDone = True # second boundary if not xDone and j==(subtract + len(allowedJobsX)): subtract += len(allowedJobsX) prefix = "-s" xDone = True print prefix, j-subtract, " [%s]: %s" % (j, job) userInput = int(raw_input("Enter number (0 to %s): " % (len(allAllowedJobs)-1) )) if userInput >=0 and userInput <= len(allAllowedJobs): jobname = allAllowedJobs[userInput] # defaults if jobname in allEc2Jobs: machine = 'ec2' elif jobname in all164Jobs: machine = '164' elif jobname in allSmJobs: machine = 'sm' print "Setting up proxy server for sm" os.environ['HTTP_PROXY'] = 'http://172.16.0.3:8888' os.environ['HTTPS_PROXY'] = 'https://172.16.0.3:8888' else: raise Exception("%s not in lists of known jobs" % jobname) if machine not in allUrls: raise Exception("%s not in allUrls dict" % machine) jenkins_url = allUrls[machine] print "machine:", machine #************************************************ def clean_sandbox(LOG_DIR="sandbox"): if os.path.exists(LOG_DIR): shutil.rmtree(LOG_DIR) # it should have been removed, but on error it might still be there if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) return LOG_DIR #************************************************ # get the username/pswd from files in the user's .ec2 dir (don't want cleartext here) # prompt if doesn't exist def login(machine='164'): def getit(k): if not os.path.isfile(k): print "you probably should create this file to avoid typing %s" % k return None else: with open(k) as f: lines = f.read().splitlines() return lines[0] home = os.path.expanduser("~") username = getit(home + '/.ec2/jenkins_user_' + machine) pswd = getit(home + '/.ec2/jenkins_pswd_' + machine) if not username: username = raw_input("Username [%s]: " % getpass.getuser()) if not pswd: pswd = getpass.getpass() return username, pswd #************************************************8 username, password = login(machine) LOG_DIR = clean_sandbox("sandbox") def dump_json(j): return json.dumps(j, sort_keys=True, indent=2) #************************************************8 J = Jenkins(jenkins_url, username, password) print "\nCurrent jobs available at %s" % jenkins_url print J.keys() print "\nChecking this job:", J[jobname] job = J[jobname] print "\nGetting %s job config" % jobname print job.get_config print "\nlast good build:" lgb = job.get_last_good_build() print "\nlast good build revision:" print lgb.get_revision() from jenkinsapi.api import get_latest_complete_build from jenkinsapi.api import get_latest_test_results # print "************************HELLO****************************" # print get_latest_complete_build(jenkins_url, jobname, username=username, password=password) # print "************************HELLO****************************" # get_latest_test_results(jenkinsurl, jobname, username=None, password=None)[source] # search_artifact_by_regexp.py if 1==0: expr = "commands.log" print("testing search_artifact_by_regexp with expression %s") % expr from jenkinsapi.api import search_artifact_by_regexp artifact_regexp = re.compile(expr) # A file name I want. result = search_artifact_by_regexp(jenkins_url, jobname, artifact_regexp) print("tested search_artifact_by_regexp", (repr(result))) # print "last_stable_buildnumber", job.get_last_stable_buildnumber() print "last_good_buildnumber", job.get_last_good_buildnumber() # print "last_failed_buildnumber", job.get_last_failed_buildnumber() print "last_buildnumber", job.get_last_buildnumber() if DO_LAST_GOOD: print "Using last_good_buildnumber %s for result set" % job.get_last_good_buildnumber() build = job.get_build(job.get_last_good_buildnumber()) else: print "Using last_buildnumber %s for result set" % job.get_last_buildnumber() build = job.get_build(job.get_last_buildnumber()) af = build.get_artifacts() dict_af = build.get_artifact_dict() # for looking at object in json # import h2o_util # s = h2o_util.json_repr(dict_af, curr_depth=0, max_depth=12) # print dump_json(s) buildstatus = build.get_status() print "build get_status", buildstatus buildname = build.name print "build name", buildname buildnumber = build.get_number() print "build number", buildnumber buildrevision = build.get_revision() print "build revision", buildrevision buildbranch = build.get_revision_branch() print "build revision branch", buildbranch buildduration = build.get_duration() print "build duration", buildduration buildupstream = build.get_upstream_job_name() print "build upstream job name", buildupstream buildgood = build.is_good() print "build is_good", buildgood buildtimestamp = build.get_timestamp() print "build timestamp", buildtimestamp consoleTxt = open(LOG_DIR + '/console.txt', "a") print "getting build console (how to buffer this write?)" print "probably better to figure how to save it as file" c = build.get_console() consoleTxt.write(c) consoleTxt.close() print "build has result set", build.has_resultset() print "build get result set" rs = build.get_resultset() print "build result set name", rs.name # print "build result set items", rs.items() print #**************************************** # print dump_json(item) # print "build result set keys", rs.keys() aTxt = open(LOG_DIR + '/artifacts.txt', "a") # have just a json string in the result set? # rs.items is a generator? #**************************************************************************** PRINTALL = False # keep count of status counts # 2014-03-19 07:26:15+00:00 # buildtimestampe is a datetime object see(buildtimestamp) t = buildtimestamp # hour minute hm = "%s_%s" % (t.hour, t.minute) # hour minute second hms = "%s_%s" % (hm, t.second) failName = "%s_%s_%s_%s%s" % ("fail", jobname, buildnumber, hm, ".txt") print "failName:", failName regressName = "%s_%s_%s_%s%s" % ("regress", jobname, buildnumber, hm, ".txt") print "regressName:", regressName fixedName = "%s_%s_%s_%s%s" % ("fixed", jobname, buildnumber, hm, ".txt") print "fixedName:", fixedName stats = {} def fprint (*args): # emulate printing each as string, then join with spaces s = ["%s" % a for a in args] line = " ".join(s) fTxt.write(line + "\n") print line def printStuff(): e1 = "\n******************************************************************************" e2 = "%s %s %s" % (i, jobname, v) fprint(e1) fprint(e2) # print "\n", k, "\n" # print "\n", v, "\n" # to see what you can get # print see(v) # print dir(v) # print vars(v) # .age .className .duration .errorDetails .errorStackTrace .failedSince # .identifier() .name .skipped .skippedMessage .status .stderr .stdout fprint (i, "v.duration", v.duration) fprint (i, "v.errorStackTrace", v.errorStackTrace) fprint (i, "v.failedSince", v.failedSince) if args.verbose: fprint (i, "v.stderr", v.stderr) # lines = v.stdout.splitlines() # keep newlines in the list elements if not v.stdout: fprint ("v.stdout is empty") else: fprint ("len(v.stdout):", len(v.stdout)) # have to fix the \n and \tat in the strings stdout = v.stdout # json string has the actual '\' and 'n' or 'tat' chars stdout = string.replace(stdout,'\\n', '\n'); stdout = string.replace(stdout,'\\tat', '\t'); # don't need double newlines stdout = string.replace(stdout,'\n\n', '\n'); lineList = stdout.splitlines() fprint ("len(lineList):", len(lineList)) num = min(20, len(lineList)) if num!=0: # print i, "Last %s lineList of stdout %s" % (num, "\n".join(lineList[-num])) fprint (i, "Last %s lineList of stdout\n" % num) fprint ("\n".join(lineList[-num:])) else: fprint ("v.stdout is empty") #****************************************************** for i, (k, v) in enumerate(rs.items()): if v.status in stats: stats[v.status] += 1 else: stats[v.status] = 1 # print rs.name e1 = "\n******************************************************************************" e2 = "%s %s %s" % (i, jobname, v) aTxt.write(e1+"\n") aTxt.write(e2+"\n") # only if not PASSED if v.status == 'FAILED': fTxt = open(LOG_DIR + "/" + failName, "a") printStuff() fTxt.close() if v.status == 'REGRESSION': fTxt = open(LOG_DIR + "/" + regressName, "a") printStuff() fTxt.close() if v.status == 'FIXED': fTxt = open(LOG_DIR + "/" + fixedName, "a") printStuff() fTxt.close() if PRINTALL: fprint (i, "k", k) fprint (i, "v", v) fprint (i, "v.errorDetails", v.errorDetails) fprint (i, "v.age", v.age) fprint (i, "v.className", v.className) fprint (i, "v.identifier()", v.identifier()) fprint (i, "v.name", v.name) fprint (i, "v.skipped", v.age) fprint (i, "v.skippedMessage", v.skippedMessage) fprint (i, "v.status", v.status) fprint (i, "v.stdout", v.stdout) #**************************************************************************** # print "dict_af", dict_af if 1==1: for a in af: # print "a.keys():", a.keys() # txt = a.get_data() e = "%s %s %s %s\n" % ("#", a.filename, a.url, "########### artifact saved ####################") # print e, aTxt.write(e+"\n") # get the h2o output from the runit runs # a.save_to_dir(LOG_DIR) consoleTxt.close() # print txt # a.save_to_dir('./sandbox') # print txt[0] aTxt.close() print "#***********************************************" print "Build:", buildname print buildtimestamp print "Status:", buildstatus if buildgood: print "Build is good" else: print "Build is bad" print "Build number", buildnumber # print buildrevision print buildbranch print "Duration", buildduration print "Upstream job", buildupstream print "Test summary" for s in stats: print s, stats[s] # rename the sandbox dirname = "%s_%s_%s_%s" % ("sandbox", jobname, buildnumber, hm) if os.path.exists(dirname): shutil.rmtree(dirname) os.rename(LOG_DIR, dirname) print "Results are in", dirname print "#***********************************************" clear_env() # from jenkins.py, we can copy jobs? # def jobs(self): # def get_jobs(self): # def get_jobs_info(self): # def get_job(self, jobname): # def has_job(self, jobname): # def create_job(self, jobname, config_): # Create a job # :param jobname: name of new job, str # :param config: configuration of new job, xml # :return: new Job obj # def copy_job(self, jobname, newjobname): # def build_job(self, jobname, params=None): # Invoke a build by job name # :param jobname: name of exist job, str # :param params: the job params, dict # :return: none # def delete_job(self, jobname): # def rename_job(self, jobname, newjobname): # load config calls get_config? # def load_config(self): # def get_config(self): # '''Returns the config.xml from the job''' # def get_config_xml_url(self): # def update_config(self, config): # def create(self, job_name, config): # Create a job # :param jobname: name of new job, str # :param config: configuration of new job, xml # :return: new Job obj
antoinecarme/pyaf
refs/heads/master
tests/artificial/transf_RelativeDifference/trend_Lag1Trend/cycle_7/ar_12/test_artificial_1024_RelativeDifference_Lag1Trend_7_12_100.py
1
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "RelativeDifference", sigma = 0.0, exog_count = 100, ar_order = 12);
jqk6/robomongo
refs/heads/master
src/third-party/mongodb/src/third_party/v8/src/macros.py
26
# Copyright 2006-2009 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Dictionary that is passed as defines for js2c.py. # Used for defines that must be defined for all native JS files. const NONE = 0; const READ_ONLY = 1; const DONT_ENUM = 2; const DONT_DELETE = 4; # Constants used for getter and setter operations. const GETTER = 0; const SETTER = 1; # These definitions must match the index of the properties in objects.h. const kApiTagOffset = 0; const kApiPropertyListOffset = 1; const kApiSerialNumberOffset = 2; const kApiConstructorOffset = 2; const kApiPrototypeTemplateOffset = 5; const kApiParentTemplateOffset = 6; const kApiFlagOffset = 14; const NO_HINT = 0; const NUMBER_HINT = 1; const STRING_HINT = 2; const kFunctionTag = 0; const kNewObjectTag = 1; # For date.js. const HoursPerDay = 24; const MinutesPerHour = 60; const SecondsPerMinute = 60; const msPerSecond = 1000; const msPerMinute = 60000; const msPerHour = 3600000; const msPerDay = 86400000; const msPerMonth = 2592000000; # For apinatives.js const kUninitialized = -1; const kReadOnlyPrototypeBit = 3; # For FunctionTemplateInfo, matches objects.h # Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1). const kInvalidDate = 'Invalid Date'; const kDayZeroInJulianDay = 2440588; const kMonthMask = 0x1e0; const kDayMask = 0x01f; const kYearShift = 9; const kMonthShift = 5; # Limits for parts of the date, so that we support all the dates that # ECMA 262 - 15.9.1.1 requires us to, but at the same time be sure that # the date (days since 1970) is in SMI range. const kMinYear = -1000000; const kMaxYear = 1000000; const kMinMonth = -10000000; const kMaxMonth = 10000000; # Native cache ids. const STRING_TO_REGEXP_CACHE_ID = 0; # Type query macros. # # Note: We have special support for typeof(foo) === 'bar' in the compiler. # It will *not* generate a runtime typeof call for the most important # values of 'bar'. macro IS_NULL(arg) = (arg === null); macro IS_NULL_OR_UNDEFINED(arg) = (arg == null); macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined'); macro IS_NUMBER(arg) = (typeof(arg) === 'number'); macro IS_STRING(arg) = (typeof(arg) === 'string'); macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean'); macro IS_OBJECT(arg) = (%_IsObject(arg)); macro IS_ARRAY(arg) = (%_IsArray(arg)); macro IS_FUNCTION(arg) = (%_IsFunction(arg)); macro IS_REGEXP(arg) = (%_IsRegExp(arg)); macro IS_SET(arg) = (%_ClassOf(arg) === 'Set'); macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map'); macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap'); macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date'); macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number'); macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String'); macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean'); macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error'); macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script'); macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments'); macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global'); macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg)); macro FLOOR(arg) = $floor(arg); # Macro for ECMAScript 5 queries of the type: # "Type(O) is object." # This is the same as being either a function or an object in V8 terminology # (including proxies). # In addition, an undetectable object is also included by this. macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg)); # Macro for ECMAScript 5 queries of the type: # "IsCallable(O)" # We assume here that this is the same as being either a function or a function # proxy. That ignores host objects with [[Call]] methods, but in most situations # we cannot handle those anyway. macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function'); # Indices in bound function info retrieved by %BoundFunctionGetBindings(...). const kBoundFunctionIndex = 0; const kBoundThisIndex = 1; const kBoundArgumentsStartIndex = 2; # Inline macros. Use %IS_VAR to make sure arg is evaluated only once. macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg)); macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0))); macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg))); macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg))); macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0)); macro TO_UINT32(arg) = (arg >>> 0); macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg)); macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg)); macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg)); macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null"); # Macros implemented in Python. python macro CHAR_CODE(str) = ord(str[1]); # Constants used on an array to implement the properties of the RegExp object. const REGEXP_NUMBER_OF_CAPTURES = 0; const REGEXP_FIRST_CAPTURE = 3; # We can't put macros in macros so we use constants here. # REGEXP_NUMBER_OF_CAPTURES macro NUMBER_OF_CAPTURES(array) = ((array)[0]); # Limit according to ECMA 262 15.9.1.1 const MAX_TIME_MS = 8640000000000000; # Limit which is MAX_TIME_MS + msPerMonth. const MAX_TIME_BEFORE_UTC = 8640002592000000; # Gets the value of a Date object. If arg is not a Date object # a type error is thrown. macro CHECK_DATE(arg) = if (%_ClassOf(arg) !== 'Date') ThrowDateTypeError(); macro LOCAL_DATE_VALUE(arg) = (%_DateField(arg, 0) + %_DateField(arg, 21)); macro UTC_DATE_VALUE(arg) = (%_DateField(arg, 0)); macro LOCAL_YEAR(arg) = (%_DateField(arg, 1)); macro LOCAL_MONTH(arg) = (%_DateField(arg, 2)); macro LOCAL_DAY(arg) = (%_DateField(arg, 3)); macro LOCAL_WEEKDAY(arg) = (%_DateField(arg, 4)); macro LOCAL_HOUR(arg) = (%_DateField(arg, 5)); macro LOCAL_MIN(arg) = (%_DateField(arg, 6)); macro LOCAL_SEC(arg) = (%_DateField(arg, 7)); macro LOCAL_MS(arg) = (%_DateField(arg, 8)); macro LOCAL_DAYS(arg) = (%_DateField(arg, 9)); macro LOCAL_TIME_IN_DAY(arg) = (%_DateField(arg, 10)); macro UTC_YEAR(arg) = (%_DateField(arg, 11)); macro UTC_MONTH(arg) = (%_DateField(arg, 12)); macro UTC_DAY(arg) = (%_DateField(arg, 13)); macro UTC_WEEKDAY(arg) = (%_DateField(arg, 14)); macro UTC_HOUR(arg) = (%_DateField(arg, 15)); macro UTC_MIN(arg) = (%_DateField(arg, 16)); macro UTC_SEC(arg) = (%_DateField(arg, 17)); macro UTC_MS(arg) = (%_DateField(arg, 18)); macro UTC_DAYS(arg) = (%_DateField(arg, 19)); macro UTC_TIME_IN_DAY(arg) = (%_DateField(arg, 20)); macro TIMEZONE_OFFSET(arg) = (%_DateField(arg, 21)); macro SET_UTC_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 1)); macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0)); # Last input and last subject of regexp matches. const LAST_SUBJECT_INDEX = 1; macro LAST_SUBJECT(array) = ((array)[1]); macro LAST_INPUT(array) = ((array)[2]); # REGEXP_FIRST_CAPTURE macro CAPTURE(index) = (3 + (index)); const CAPTURE0 = 3; const CAPTURE1 = 4; # For the regexp capture override array. This has the same # format as the arguments to a function called from # String.prototype.replace. macro OVERRIDE_MATCH(override) = ((override)[0]); macro OVERRIDE_POS(override) = ((override)[(override).length - 2]); macro OVERRIDE_SUBJECT(override) = ((override)[(override).length - 1]); # 1-based so index of 1 returns the first capture macro OVERRIDE_CAPTURE(override, index) = ((override)[(index)]); # PropertyDescriptor return value indices - must match # PropertyDescriptorIndices in runtime.cc. const IS_ACCESSOR_INDEX = 0; const VALUE_INDEX = 1; const GETTER_INDEX = 2; const SETTER_INDEX = 3; const WRITABLE_INDEX = 4; const ENUMERABLE_INDEX = 5; const CONFIGURABLE_INDEX = 6; # For messages.js # Matches Script::Type from objects.h const TYPE_NATIVE = 0; const TYPE_EXTENSION = 1; const TYPE_NORMAL = 2; # Matches Script::CompilationType from objects.h const COMPILATION_TYPE_HOST = 0; const COMPILATION_TYPE_EVAL = 1; const COMPILATION_TYPE_JSON = 2; # Matches Messages::kNoLineNumberInfo from v8.h const kNoLineNumberInfo = 0;
girving/tensorflow
refs/heads/master
tensorflow/contrib/slim/python/slim/nets/inception_v1_test.py
25
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for nets.inception_v1.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.framework.python.ops import arg_scope from tensorflow.contrib.framework.python.ops import variables as variables_lib from tensorflow.contrib.slim.python.slim import model_analyzer from tensorflow.contrib.slim.python.slim.nets import inception_v1 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test class InceptionV1Test(test.TestCase): def testBuildClassificationNetwork(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = random_ops.random_uniform((batch_size, height, width, 3)) logits, end_points = inception_v1.inception_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('Predictions' in end_points) self.assertListEqual(end_points['Predictions'].get_shape().as_list(), [batch_size, num_classes]) def testBuildBaseNetwork(self): batch_size = 5 height, width = 224, 224 inputs = random_ops.random_uniform((batch_size, height, width, 3)) mixed_6c, end_points = inception_v1.inception_v1_base(inputs) self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c')) self.assertListEqual(mixed_6c.get_shape().as_list(), [batch_size, 7, 7, 1024]) expected_endpoints = [ 'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c' ] self.assertItemsEqual(end_points.keys(), expected_endpoints) def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 224, 224 endpoints = [ 'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c' ] for index, endpoint in enumerate(endpoints): with ops.Graph().as_default(): inputs = random_ops.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception_v1.inception_v1_base( inputs, final_endpoint=endpoint) self.assertTrue( out_tensor.op.name.startswith('InceptionV1/' + endpoint)) self.assertItemsEqual(endpoints[:index + 1], end_points) def testBuildAndCheckAllEndPointsUptoMixed5c(self): batch_size = 5 height, width = 224, 224 inputs = random_ops.random_uniform((batch_size, height, width, 3)) _, end_points = inception_v1.inception_v1_base( inputs, final_endpoint='Mixed_5c') endpoints_shapes = { 'Conv2d_1a_7x7': [5, 112, 112, 64], 'MaxPool_2a_3x3': [5, 56, 56, 64], 'Conv2d_2b_1x1': [5, 56, 56, 64], 'Conv2d_2c_3x3': [5, 56, 56, 192], 'MaxPool_3a_3x3': [5, 28, 28, 192], 'Mixed_3b': [5, 28, 28, 256], 'Mixed_3c': [5, 28, 28, 480], 'MaxPool_4a_3x3': [5, 14, 14, 480], 'Mixed_4b': [5, 14, 14, 512], 'Mixed_4c': [5, 14, 14, 512], 'Mixed_4d': [5, 14, 14, 512], 'Mixed_4e': [5, 14, 14, 528], 'Mixed_4f': [5, 14, 14, 832], 'MaxPool_5a_2x2': [5, 7, 7, 832], 'Mixed_5b': [5, 7, 7, 832], 'Mixed_5c': [5, 7, 7, 1024] } self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: expected_shape = endpoints_shapes[endpoint_name] self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape) def testModelHasExpectedNumberOfParameters(self): batch_size = 5 height, width = 224, 224 inputs = random_ops.random_uniform((batch_size, height, width, 3)) with arg_scope(inception_v1.inception_v1_arg_scope()): inception_v1.inception_v1_base(inputs) total_params, _ = model_analyzer.analyze_vars( variables_lib.get_model_variables()) self.assertAlmostEqual(5607184, total_params) def testHalfSizeImages(self): batch_size = 5 height, width = 112, 112 inputs = random_ops.random_uniform((batch_size, height, width, 3)) mixed_5c, _ = inception_v1.inception_v1_base(inputs) self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c')) self.assertListEqual(mixed_5c.get_shape().as_list(), [batch_size, 4, 4, 1024]) def testUnknownImageShape(self): ops.reset_default_graph() batch_size = 2 height, width = 224, 224 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.cached_session() as sess: inputs = array_ops.placeholder( dtypes.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception_v1.inception_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_5c'] feed_dict = {inputs: input_np} variables.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) def testUnknownBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3)) logits, _ = inception_v1.inception_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = random_ops.random_uniform((batch_size, height, width, 3)) with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) def testEvaluation(self): batch_size = 2 height, width = 224, 224 num_classes = 1000 eval_inputs = random_ops.random_uniform((batch_size, height, width, 3)) logits, _ = inception_v1.inception_v1( eval_inputs, num_classes, is_training=False) predictions = math_ops.argmax(logits, 1) with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 224, 224 num_classes = 1000 train_inputs = random_ops.random_uniform( (train_batch_size, height, width, 3)) inception_v1.inception_v1(train_inputs, num_classes) eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception_v1.inception_v1(eval_inputs, num_classes, reuse=True) predictions = math_ops.argmax(logits, 1) with self.cached_session() as sess: sess.run(variables.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) def testLogitsNotSqueezed(self): num_classes = 25 images = random_ops.random_uniform([1, 224, 224, 3]) logits, _ = inception_v1.inception_v1( images, num_classes=num_classes, spatial_squeeze=False) with self.cached_session() as sess: variables.global_variables_initializer().run() logits_out = sess.run(logits) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) if __name__ == '__main__': test.main()
serensoner/CouchPotatoServer
refs/heads/develop
couchpotato/core/notifications/pushbullet.py
37
from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import splitString from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Pushbullet' class Pushbullet(Notification): url = 'https://api.pushbullet.com/v2/%s' def notify(self, message = '', data = None, listener = None): if not data: data = {} # Get all the device IDs linked to this user devices = self.getDevices() or [None] successful = 0 for device in devices: response = self.request( 'pushes', device_iden = device, type = 'note', title = self.default_title, body = toUnicode(message) ) if response: successful += 1 else: log.error('Unable to push notification to Pushbullet device with ID %s' % device) for channel in self.getChannels(): self.request( 'pushes', channel_tag = channel, type = 'note', title = self.default_title, body = toUnicode(message) ) return successful == len(devices) def getDevices(self): return splitString(self.conf('devices')) def getChannels(self): return splitString(self.conf('channels')) def request(self, method, **kwargs): try: headers = { 'Access-Token': self.conf('api_key') } if kwargs.get('device_iden') is None: try: del kwargs['device_iden'] except: pass return self.getJsonData(self.url % method, cache_timeout = -1, headers = headers, data = kwargs) except Exception as ex: log.error('Pushbullet request failed') log.debug(ex) return None config = [{ 'name': 'pushbullet', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'pushbullet', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'api_key', 'label': 'Access Token', 'description': 'Can be found on <a href="https://www.pushbullet.com/account" target="_blank">Account Settings</a>', }, { 'name': 'devices', 'default': '', 'advanced': True, 'description': 'IDs of devices to send notifications to, empty = all devices' }, { 'name': 'channels', 'default': '', 'advanced': True, 'description': 'IDs of channels to send notifications to, empty = no channels' }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
erikr/django
refs/heads/master
django/conf/locale/es_MX/formats.py
504
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals DATE_FORMAT = r'j \d\e F \d\e Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = r'j \d\e F \d\e Y \a \l\a\s H:i' YEAR_MONTH_FORMAT = r'F \d\e Y' MONTH_DAY_FORMAT = r'j \d\e F' SHORT_DATE_FORMAT = 'd/m/Y' SHORT_DATETIME_FORMAT = 'd/m/Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday: ISO 8601 DATE_INPUT_FORMATS = [ '%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06' '%Y%m%d', # '20061025' ] DATETIME_INPUT_FORMATS = [ '%d/%m/%Y %H:%M:%S', '%d/%m/%Y %H:%M:%S.%f', '%d/%m/%Y %H:%M', '%d/%m/%y %H:%M:%S', '%d/%m/%y %H:%M:%S.%f', '%d/%m/%y %H:%M', ] DECIMAL_SEPARATOR = '.' # ',' is also official (less common): NOM-008-SCFI-2002 THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
andreparrish/python-for-android
refs/heads/master
python3-alpha/python3-src/Lib/test/re_tests.py
879
#!/usr/bin/env python3 # -*- mode: python -*- # Re test suite and benchmark suite v1.5 # The 3 possible outcomes for each pattern [SUCCEED, FAIL, SYNTAX_ERROR] = range(3) # Benchmark suite (needs expansion) # # The benchmark suite does not test correctness, just speed. The # first element of each tuple is the regex pattern; the second is a # string to match it against. The benchmarking code will embed the # second string inside several sizes of padding, to test how regex # matching performs on large strings. benchmarks = [ # test common prefix ('Python|Perl', 'Perl'), # Alternation ('(Python|Perl)', 'Perl'), # Grouped alternation ('Python|Perl|Tcl', 'Perl'), # Alternation ('(Python|Perl|Tcl)', 'Perl'), # Grouped alternation ('(Python)\\1', 'PythonPython'), # Backreference ('([0a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization ('([a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # A few sets ('Python', 'Python'), # Simple text literal ('.*Python', 'Python'), # Bad text literal ('.*Python.*', 'Python'), # Worse text literal ('.*(Python)', 'Python'), # Bad text literal with grouping ] # Test suite (for verifying correctness) # # The test suite is a list of 5- or 3-tuples. The 5 parts of a # complete tuple are: # element 0: a string containing the pattern # 1: the string to match against the pattern # 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR) # 3: a string that will be eval()'ed to produce a test string. # This is an arbitrary Python expression; the available # variables are "found" (the whole match), and "g1", "g2", ... # up to "g99" contain the contents of each group, or the # string 'None' if the group wasn't given a value, or the # string 'Error' if the group index was out of range; # also "groups", the return value of m.group() (a tuple). # 4: The expected result of evaluating the expression. # If the two don't match, an error is reported. # # If the regex isn't expected to work, the latter two elements can be omitted. tests = [ # Test ?P< and ?P= extensions ('(?P<foo_123', '', SYNTAX_ERROR), # Unterminated group identifier ('(?P<1>a)', '', SYNTAX_ERROR), # Begins with a digit ('(?P<!>a)', '', SYNTAX_ERROR), # Begins with an illegal char ('(?P<foo!>a)', '', SYNTAX_ERROR), # Begins with an illegal char # Same tests, for the ?P= form ('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR), ('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR), ('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR), ('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR), # Backref to undefined group ('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'), ('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'), # Test octal escapes ('\\1', 'a', SYNTAX_ERROR), # Backreference ('[\\1]', '\1', SUCCEED, 'found', '\1'), # Character ('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'), ('\\141', 'a', SUCCEED, 'found', 'a'), ('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'), # Test \0 is handled everywhere (r'\0', '\0', SUCCEED, 'found', '\0'), (r'[\0a]', '\0', SUCCEED, 'found', '\0'), (r'[a\0]', '\0', SUCCEED, 'found', '\0'), (r'[^a\0]', '\0', FAIL), # Test various letter escapes (r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'), (r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'), # NOTE: not an error under PCRE/PRE: # (r'\u', '', SYNTAX_ERROR), # A Perl escape (r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'), (r'\xff', '\377', SUCCEED, 'found', chr(255)), # new \x semantics (r'\x00ffffffffffffff', '\377', FAIL, 'found', chr(255)), (r'\x00f', '\017', FAIL, 'found', chr(15)), (r'\x00fe', '\376', FAIL, 'found', chr(254)), # (r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)), # (r'\x00f', '\017', SUCCEED, 'found', chr(15)), # (r'\x00fe', '\376', SUCCEED, 'found', chr(254)), (r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c", SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"), # Test that . only matches \n in DOTALL mode ('a.b', 'acb', SUCCEED, 'found', 'acb'), ('a.b', 'a\nb', FAIL), ('a.*b', 'acc\nccb', FAIL), ('a.{4,5}b', 'acc\nccb', FAIL), ('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'), ('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'), ('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'), ('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'), ('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'), (')', '', SYNTAX_ERROR), # Unmatched right bracket ('', '', SUCCEED, 'found', ''), # Empty pattern ('abc', 'abc', SUCCEED, 'found', 'abc'), ('abc', 'xbc', FAIL), ('abc', 'axc', FAIL), ('abc', 'abx', FAIL), ('abc', 'xabcy', SUCCEED, 'found', 'abc'), ('abc', 'ababc', SUCCEED, 'found', 'abc'), ('ab*c', 'abc', SUCCEED, 'found', 'abc'), ('ab*bc', 'abc', SUCCEED, 'found', 'abc'), ('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'), ('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'), ('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'), ('ab+bc', 'abc', FAIL), ('ab+bc', 'abq', FAIL), ('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'), ('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'), ('ab?bc', 'abc', SUCCEED, 'found', 'abc'), ('ab?bc', 'abbbbc', FAIL), ('ab?c', 'abc', SUCCEED, 'found', 'abc'), ('^abc$', 'abc', SUCCEED, 'found', 'abc'), ('^abc$', 'abcc', FAIL), ('^abc', 'abcc', SUCCEED, 'found', 'abc'), ('^abc$', 'aabc', FAIL), ('abc$', 'aabc', SUCCEED, 'found', 'abc'), ('^', 'abc', SUCCEED, 'found+"-"', '-'), ('$', 'abc', SUCCEED, 'found+"-"', '-'), ('a.c', 'abc', SUCCEED, 'found', 'abc'), ('a.c', 'axc', SUCCEED, 'found', 'axc'), ('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'), ('a.*c', 'axyzd', FAIL), ('a[bc]d', 'abc', FAIL), ('a[bc]d', 'abd', SUCCEED, 'found', 'abd'), ('a[b-d]e', 'abd', FAIL), ('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'), ('a[b-d]', 'aac', SUCCEED, 'found', 'ac'), ('a[-b]', 'a-', SUCCEED, 'found', 'a-'), ('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'), # NOTE: not an error under PCRE/PRE: # ('a[b-]', 'a-', SYNTAX_ERROR), ('a[]b', '-', SYNTAX_ERROR), ('a[', '-', SYNTAX_ERROR), ('a\\', '-', SYNTAX_ERROR), ('abc)', '-', SYNTAX_ERROR), ('(abc', '-', SYNTAX_ERROR), ('a]', 'a]', SUCCEED, 'found', 'a]'), ('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'), ('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'), ('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'), ('a[^bc]d', 'abd', FAIL), ('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'), ('a[^-b]c', 'a-c', FAIL), ('a[^]b]c', 'a]c', FAIL), ('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'), ('\\ba\\b', 'a-', SUCCEED, '"-"', '-'), ('\\ba\\b', '-a', SUCCEED, '"-"', '-'), ('\\ba\\b', '-a-', SUCCEED, '"-"', '-'), ('\\by\\b', 'xy', FAIL), ('\\by\\b', 'yz', FAIL), ('\\by\\b', 'xyz', FAIL), ('x\\b', 'xyz', FAIL), ('x\\B', 'xyz', SUCCEED, '"-"', '-'), ('\\Bz', 'xyz', SUCCEED, '"-"', '-'), ('z\\B', 'xyz', FAIL), ('\\Bx', 'xyz', FAIL), ('\\Ba\\B', 'a-', FAIL, '"-"', '-'), ('\\Ba\\B', '-a', FAIL, '"-"', '-'), ('\\Ba\\B', '-a-', FAIL, '"-"', '-'), ('\\By\\B', 'xy', FAIL), ('\\By\\B', 'yz', FAIL), ('\\By\\b', 'xy', SUCCEED, '"-"', '-'), ('\\by\\B', 'yz', SUCCEED, '"-"', '-'), ('\\By\\B', 'xyz', SUCCEED, '"-"', '-'), ('ab|cd', 'abc', SUCCEED, 'found', 'ab'), ('ab|cd', 'abcd', SUCCEED, 'found', 'ab'), ('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'), ('$b', 'b', FAIL), ('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'), ('a\\(*b', 'ab', SUCCEED, 'found', 'ab'), ('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'), ('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'), ('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'), ('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'), ('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'), ('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'), ('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'), ('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'), (')(', '-', SYNTAX_ERROR), ('[^ab]*', 'cde', SUCCEED, 'found', 'cde'), ('abc', '', FAIL), ('a*', '', SUCCEED, 'found', ''), ('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'), ('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'), ('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'), ('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'), ('ab*', 'xayabbbz', SUCCEED, 'found', 'a'), ('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'), ('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'), ('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'), ('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'), ('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'), ('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'), ('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'), ('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'), ('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'), ('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'), ('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'), ('a[bcd]+dcdcde', 'adcdcde', FAIL), ('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'), ('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'), ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'), ('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'), ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'), ('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'), ('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL), ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL), ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'), ('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'), ('multiple words of text', 'uh-uh', FAIL), ('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'), ('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'), ('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'), ('[k]', 'ab', FAIL), ('a[-]?c', 'ac', SUCCEED, 'found', 'ac'), ('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'), ('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'), ('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'), ('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'), ('^(a+).\\1$', 'aaaa', FAIL), ('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'), ('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'), ('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'), ('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'), ('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'), ('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'), ('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'), ('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'), ('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'), ('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'), ('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'), ('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'), ('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'), ('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'), ('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'), ('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'), ('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'), ('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'), ('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'), ('([abc]*)x', 'abc', FAIL), ('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'), ('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'), # Test symbolic groups ('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR), ('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'), ('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'), ('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR), # Test octal escapes/memory references ('\\1', 'a', SYNTAX_ERROR), ('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'), ('\\141', 'a', SUCCEED, 'found', 'a'), ('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'), # All tests from Perl ('abc', 'abc', SUCCEED, 'found', 'abc'), ('abc', 'xbc', FAIL), ('abc', 'axc', FAIL), ('abc', 'abx', FAIL), ('abc', 'xabcy', SUCCEED, 'found', 'abc'), ('abc', 'ababc', SUCCEED, 'found', 'abc'), ('ab*c', 'abc', SUCCEED, 'found', 'abc'), ('ab*bc', 'abc', SUCCEED, 'found', 'abc'), ('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'), ('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'), ('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'), ('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'), ('ab+bc', 'abc', FAIL), ('ab+bc', 'abq', FAIL), ('ab{1,}bc', 'abq', FAIL), ('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'), ('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'), ('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'), ('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'), ('ab{4,5}bc', 'abbbbc', FAIL), ('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'), ('ab?bc', 'abc', SUCCEED, 'found', 'abc'), ('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'), ('ab?bc', 'abbbbc', FAIL), ('ab?c', 'abc', SUCCEED, 'found', 'abc'), ('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'), ('^abc$', 'abc', SUCCEED, 'found', 'abc'), ('^abc$', 'abcc', FAIL), ('^abc', 'abcc', SUCCEED, 'found', 'abc'), ('^abc$', 'aabc', FAIL), ('abc$', 'aabc', SUCCEED, 'found', 'abc'), ('^', 'abc', SUCCEED, 'found', ''), ('$', 'abc', SUCCEED, 'found', ''), ('a.c', 'abc', SUCCEED, 'found', 'abc'), ('a.c', 'axc', SUCCEED, 'found', 'axc'), ('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'), ('a.*c', 'axyzd', FAIL), ('a[bc]d', 'abc', FAIL), ('a[bc]d', 'abd', SUCCEED, 'found', 'abd'), ('a[b-d]e', 'abd', FAIL), ('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'), ('a[b-d]', 'aac', SUCCEED, 'found', 'ac'), ('a[-b]', 'a-', SUCCEED, 'found', 'a-'), ('a[b-]', 'a-', SUCCEED, 'found', 'a-'), ('a[b-a]', '-', SYNTAX_ERROR), ('a[]b', '-', SYNTAX_ERROR), ('a[', '-', SYNTAX_ERROR), ('a]', 'a]', SUCCEED, 'found', 'a]'), ('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'), ('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'), ('a[^bc]d', 'abd', FAIL), ('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'), ('a[^-b]c', 'a-c', FAIL), ('a[^]b]c', 'a]c', FAIL), ('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'), ('ab|cd', 'abc', SUCCEED, 'found', 'ab'), ('ab|cd', 'abcd', SUCCEED, 'found', 'ab'), ('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'), ('*a', '-', SYNTAX_ERROR), ('(*)b', '-', SYNTAX_ERROR), ('$b', 'b', FAIL), ('a\\', '-', SYNTAX_ERROR), ('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'), ('a\\(*b', 'ab', SUCCEED, 'found', 'ab'), ('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'), ('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'), ('abc)', '-', SYNTAX_ERROR), ('(abc', '-', SYNTAX_ERROR), ('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'), ('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'), ('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'), ('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'), ('a**', '-', SYNTAX_ERROR), ('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'), ('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'), ('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'), ('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'), ('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'), ('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'), ('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'), (')(', '-', SYNTAX_ERROR), ('[^ab]*', 'cde', SUCCEED, 'found', 'cde'), ('abc', '', FAIL), ('a*', '', SUCCEED, 'found', ''), ('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'), ('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'), ('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'), ('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'), ('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'), ('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'), ('ab*', 'xayabbbz', SUCCEED, 'found', 'a'), ('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'), ('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'), ('^(ab|cd)e', 'abcde', FAIL), ('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'), ('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'), ('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'), ('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'), ('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'), ('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'), ('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'), ('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'), ('a[bcd]+dcdcde', 'adcdcde', FAIL), ('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'), ('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'), ('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'), ('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'), ('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'), ('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'), ('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL), ('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL), ('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'), ('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'), ('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'), # Python does not have the same rules for \\41 so this is a syntax error # ('((((((((((a))))))))))\\41', 'aa', FAIL), # ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'), ('((((((((((a))))))))))\\41', '', SYNTAX_ERROR), ('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR), ('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'), ('multiple words of text', 'uh-uh', FAIL), ('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'), ('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'), ('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'), ('[k]', 'ab', FAIL), ('a[-]?c', 'ac', SUCCEED, 'found', 'ac'), ('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'), ('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'), ('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'), ('(?i)abc', 'XBC', FAIL), ('(?i)abc', 'AXC', FAIL), ('(?i)abc', 'ABX', FAIL), ('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'), ('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'), ('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'), ('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'), ('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'), ('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'), ('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'), ('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'), ('(?i)ab+bc', 'ABC', FAIL), ('(?i)ab+bc', 'ABQ', FAIL), ('(?i)ab{1,}bc', 'ABQ', FAIL), ('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'), ('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'), ('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'), ('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'), ('(?i)ab{4,5}?bc', 'ABBBBC', FAIL), ('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'), ('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'), ('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'), ('(?i)ab??bc', 'ABBBBC', FAIL), ('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'), ('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'), ('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'), ('(?i)^abc$', 'ABCC', FAIL), ('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'), ('(?i)^abc$', 'AABC', FAIL), ('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'), ('(?i)^', 'ABC', SUCCEED, 'found', ''), ('(?i)$', 'ABC', SUCCEED, 'found', ''), ('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'), ('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'), ('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'), ('(?i)a.*c', 'AXYZD', FAIL), ('(?i)a[bc]d', 'ABC', FAIL), ('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'), ('(?i)a[b-d]e', 'ABD', FAIL), ('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'), ('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'), ('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'), ('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'), ('(?i)a[b-a]', '-', SYNTAX_ERROR), ('(?i)a[]b', '-', SYNTAX_ERROR), ('(?i)a[', '-', SYNTAX_ERROR), ('(?i)a]', 'A]', SUCCEED, 'found', 'A]'), ('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'), ('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'), ('(?i)a[^bc]d', 'ABD', FAIL), ('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'), ('(?i)a[^-b]c', 'A-C', FAIL), ('(?i)a[^]b]c', 'A]C', FAIL), ('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'), ('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'), ('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'), ('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'), ('(?i)*a', '-', SYNTAX_ERROR), ('(?i)(*)b', '-', SYNTAX_ERROR), ('(?i)$b', 'B', FAIL), ('(?i)a\\', '-', SYNTAX_ERROR), ('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'), ('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'), ('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'), ('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'), ('(?i)abc)', '-', SYNTAX_ERROR), ('(?i)(abc', '-', SYNTAX_ERROR), ('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'), ('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'), ('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'), ('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'), ('(?i)a**', '-', SYNTAX_ERROR), ('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'), ('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'), ('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'), ('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'), ('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'), ('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'), ('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'), ('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'), ('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'), ('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'), ('(?i))(', '-', SYNTAX_ERROR), ('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'), ('(?i)abc', '', FAIL), ('(?i)a*', '', SUCCEED, 'found', ''), ('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'), ('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'), ('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'), ('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'), ('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'), ('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'), ('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'), ('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'), ('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'), ('(?i)^(ab|cd)e', 'ABCDE', FAIL), ('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'), ('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'), ('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'), ('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'), ('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'), ('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'), ('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'), ('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'), ('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL), ('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'), ('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'), ('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'), ('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'), ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'), ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'), ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL), ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL), ('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'), ('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'), ('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'), #('(?i)((((((((((a))))))))))\\41', 'AA', FAIL), #('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'), ('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'), ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'), ('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'), ('(?i)multiple words of text', 'UH-UH', FAIL), ('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'), ('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'), ('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'), ('(?i)[k]', 'AB', FAIL), # ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'), # ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'), ('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'), ('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'), ('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'), ('a(?!b).', 'abad', SUCCEED, 'found', 'ad'), ('a(?=d).', 'abad', SUCCEED, 'found', 'ad'), ('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'), ('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'), ('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'), ('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'), ('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'), ('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'), # lookbehind: split by : but not if it is escaped by -. ('(?<!-):(.*?)(?<!-):', 'a:bc-:de:f', SUCCEED, 'g1', 'bc-:de' ), # escaping with \ as we know it ('(?<!\\\):(.*?)(?<!\\\):', 'a:bc\\:de:f', SUCCEED, 'g1', 'bc\\:de' ), # terminating with ' and escaping with ? as in edifact ("(?<!\\?)'(.*?)(?<!\\?)'", "a'bc?'de'f", SUCCEED, 'g1', "bc?'de" ), # Comments using the (?#...) syntax ('w(?# comment', 'w', SYNTAX_ERROR), ('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'), # Check odd placement of embedded pattern modifiers # not an error under PCRE/PRE: ('w(?i)', 'W', SUCCEED, 'found', 'W'), # ('w(?i)', 'W', SYNTAX_ERROR), # Comments using the x embedded pattern modifier ("""(?x)w# comment 1 x y # comment 2 z""", 'wxyz', SUCCEED, 'found', 'wxyz'), # using the m embedded pattern modifier ('^abc', """jkl abc xyz""", FAIL), ('(?m)^abc', """jkl abc xyz""", SUCCEED, 'found', 'abc'), ('(?m)abc$', """jkl xyzabc 123""", SUCCEED, 'found', 'abc'), # using the s embedded pattern modifier ('a.b', 'a\nb', FAIL), ('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'), # test \w, etc. both inside and outside character classes ('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'), ('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'), ('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'), ('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'), ('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'), # not an error under PCRE/PRE: # ('[\\d-x]', '-', SYNTAX_ERROR), (r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '), (r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '), (r'\xff', '\377', SUCCEED, 'found', chr(255)), # new \x semantics (r'\x00ff', '\377', FAIL), # (r'\x00ff', '\377', SUCCEED, 'found', chr(255)), (r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'), ('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'), (r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)), (r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'), # # post-1.5.2 additions # xmllib problem (r'(([a-z]+):)?([a-z]+)$', 'smil', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-smil'), # bug 110866: reference to undefined group (r'((.)\1+)', '', SYNTAX_ERROR), # bug 111869: search (PRE/PCRE fails on this one, SRE doesn't) (r'.*d', 'abc\nabd', SUCCEED, 'found', 'abd'), # bug 112468: various expected syntax errors (r'(', '', SYNTAX_ERROR), (r'[\41]', '!', SUCCEED, 'found', '!'), # bug 114033: nothing to repeat (r'(x?)?', 'x', SUCCEED, 'found', 'x'), # bug 115040: rescan if flags are modified inside pattern (r' (?x)foo ', 'foo', SUCCEED, 'found', 'foo'), # bug 115618: negative lookahead (r'(?<!abc)(d.f)', 'abcdefdof', SUCCEED, 'found', 'dof'), # bug 116251: character class bug (r'[\w-]+', 'laser_beam', SUCCEED, 'found', 'laser_beam'), # bug 123769+127259: non-greedy backtracking bug (r'.*?\S *:', 'xx:', SUCCEED, 'found', 'xx:'), (r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'), (r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'), # bug 127259: \Z shouldn't depend on multiline mode (r'(?ms).*?x\s*\Z(.*)','xx\nx\n', SUCCEED, 'g1', ''), # bug 128899: uppercase literals under the ignorecase flag (r'(?i)M+', 'MMM', SUCCEED, 'found', 'MMM'), (r'(?i)m+', 'MMM', SUCCEED, 'found', 'MMM'), (r'(?i)[M]+', 'MMM', SUCCEED, 'found', 'MMM'), (r'(?i)[m]+', 'MMM', SUCCEED, 'found', 'MMM'), # bug 130748: ^* should be an error (nothing to repeat) (r'^*', '', SYNTAX_ERROR), # bug 133283: minimizing repeat problem (r'"(?:\\"|[^"])*?"', r'"\""', SUCCEED, 'found', r'"\""'), # bug 477728: minimizing repeat problem (r'^.*?$', 'one\ntwo\nthree\n', FAIL), # bug 483789: minimizing repeat problem (r'a[^>]*?b', 'a>b', FAIL), # bug 490573: minimizing repeat problem (r'^a*?$', 'foo', FAIL), # bug 470582: nested groups problem (r'^((a)c)?(ab)$', 'ab', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-ab'), # another minimizing repeat problem (capturing groups in assertions) ('^([ab]*?)(?=(b)?)c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'), ('^([ab]*?)(?!(b))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'), ('^([ab]*?)(?<!(a))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'), ] u = '\N{LATIN CAPITAL LETTER A WITH DIAERESIS}' tests.extend([ # bug 410271: \b broken under locales (r'\b.\b', 'a', SUCCEED, 'found', 'a'), (r'(?u)\b.\b', u, SUCCEED, 'found', u), (r'(?u)\w', u, SUCCEED, 'found', u), ])
lysenkoivan/nose-html-reporting
refs/heads/master
tests/test_nose_htmloutput.py
3
from __future__ import print_function from process_tests import dump_on_error from process_tests import TestProcess from process_tests import wait_for_strings TIMEOUT = 10 def test_sample(): with TestProcess( 'coverage', 'run', 'tests/nosetests.py', '--verbose', '--with-html', '--html-file=sample.html', 'tests/test_sample.py' ) as proc: with dump_on_error(proc.read): wait_for_strings(proc.read, TIMEOUT, 'Ran 9 tests in') output = open('sample.html').read() assert """<tr> <td>test_sample</td> <td>1</td> <td>1</td> <td>1</td> <td>4</td> </tr>""" in output assert """<tr> <td>test_sample.MainTestCase</td> <td>1</td> <td>0</td> <td>1</td> <td>2</td> </tr>""" in output assert """<tr> <td>test_sample.FailedSetupTestCase</td> <td>0</td> <td>0</td> <td>0</td> <td>1</td> </tr>""" in output assert """<tr> <td>test_sample.SecondTestCase</td> <td>0</td> <td>0</td> <td>2</td> <td>2</td> </tr>""" in output assert """<tr> <td><strong>Total</strong></td> <td>2</td> <td>1</td> <td>4</td> <td>9</td> </tr>""" in output assert "<h2>test_sample.MainTestCase (1 failures, 0 errors)</h2>" in output assert '<section id="test_sample.MainTestCase:test_b">' in output assert '<h3>test_b: <strong>' in output assert '<section id="test_sample:test_b">' in output assert '<h3>test_b: <strong>' in output assert '<li><a class="success">test_a</a></li>' in output assert '<li><a class="failed" href="#test_sample.MainTestCase:test_b">test_b</a></li>' in output assert '<h2>test_sample (1 failures, 1 errors)</h2>' in output assert '<li><a class="success">test_a</a></li>' in output assert '<li><a class="failed" href="#test_sample:test_b">test_b</a></li>' in output assert "<h2>test_sample.FailedSetupTestCase (0 failures, 1 errors)</h2>" in output
kjagoo/wger_stark
refs/heads/master
wger/core/migrations/0004_auto_20150217_1914.py
3
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('core', '0003_auto_20150217_1554'), ] operations = [ migrations.AlterField( model_name='userprofile', name='num_days_weight_reminder', field=models.IntegerField(default=0, verbose_name='Automatic reminders for weight entries', max_length=30, help_text='Number of days after the last weight entry (enter 0 to deactivate)'), preserve_default=True, ), ]
ammarkhann/FinalSeniorCode
refs/heads/master
lib/python2.7/site-packages/django/views/generic/edit.py
45
import inspect import re import warnings from django.core.exceptions import ImproperlyConfigured from django.forms import models as model_forms from django.http import HttpResponseRedirect from django.utils import six from django.utils.deprecation import RemovedInDjango110Warning from django.utils.encoding import force_text from django.views.generic.base import ContextMixin, TemplateResponseMixin, View from django.views.generic.detail import ( BaseDetailView, SingleObjectMixin, SingleObjectTemplateResponseMixin, ) PERCENT_PLACEHOLDER_REGEX = re.compile(r'%\([^\)]+\)') # RemovedInDjango110Warning class FormMixinBase(type): def __new__(cls, name, bases, attrs): get_form = attrs.get('get_form') if get_form and inspect.isfunction(get_form): try: inspect.getcallargs(get_form, None) except TypeError: warnings.warn( "`%s.%s.get_form` method must define a default value for " "its `form_class` argument." % (attrs['__module__'], name), RemovedInDjango110Warning, stacklevel=2 ) def get_form_with_form_class(self, form_class=None): if form_class is None: form_class = self.get_form_class() return get_form(self, form_class=form_class) attrs['get_form'] = get_form_with_form_class return super(FormMixinBase, cls).__new__(cls, name, bases, attrs) class FormMixin(six.with_metaclass(FormMixinBase, ContextMixin)): """ A mixin that provides a way to show and handle a form in a request. """ initial = {} form_class = None success_url = None prefix = None def get_initial(self): """ Returns the initial data to use for forms on this view. """ return self.initial.copy() def get_prefix(self): """ Returns the prefix to use for forms on this view """ return self.prefix def get_form_class(self): """ Returns the form class to use in this view """ return self.form_class def get_form(self, form_class=None): """ Returns an instance of the form to be used in this view. """ if form_class is None: form_class = self.get_form_class() return form_class(**self.get_form_kwargs()) def get_form_kwargs(self): """ Returns the keyword arguments for instantiating the form. """ kwargs = { 'initial': self.get_initial(), 'prefix': self.get_prefix(), } if self.request.method in ('POST', 'PUT'): kwargs.update({ 'data': self.request.POST, 'files': self.request.FILES, }) return kwargs def get_success_url(self): """ Returns the supplied success URL. """ if self.success_url: # Forcing possible reverse_lazy evaluation url = force_text(self.success_url) else: raise ImproperlyConfigured( "No URL to redirect to. Provide a success_url.") return url def form_valid(self, form): """ If the form is valid, redirect to the supplied URL. """ return HttpResponseRedirect(self.get_success_url()) def form_invalid(self, form): """ If the form is invalid, re-render the context data with the data-filled form and errors. """ return self.render_to_response(self.get_context_data(form=form)) class ModelFormMixin(FormMixin, SingleObjectMixin): """ A mixin that provides a way to show and handle a modelform in a request. """ fields = None def get_form_class(self): """ Returns the form class to use in this view. """ if self.fields is not None and self.form_class: raise ImproperlyConfigured( "Specifying both 'fields' and 'form_class' is not permitted." ) if self.form_class: return self.form_class else: if self.model is not None: # If a model has been explicitly provided, use it model = self.model elif hasattr(self, 'object') and self.object is not None: # If this view is operating on a single object, use # the class of that object model = self.object.__class__ else: # Try to get a queryset and extract the model class # from that model = self.get_queryset().model if self.fields is None: raise ImproperlyConfigured( "Using ModelFormMixin (base class of %s) without " "the 'fields' attribute is prohibited." % self.__class__.__name__ ) return model_forms.modelform_factory(model, fields=self.fields) def get_form_kwargs(self): """ Returns the keyword arguments for instantiating the form. """ kwargs = super(ModelFormMixin, self).get_form_kwargs() if hasattr(self, 'object'): kwargs.update({'instance': self.object}) return kwargs def get_success_url(self): """ Returns the supplied URL. """ if self.success_url: # force_text can be removed with deprecation warning self.success_url = force_text(self.success_url) if PERCENT_PLACEHOLDER_REGEX.search(self.success_url): warnings.warn( "%()s placeholder style in success_url is deprecated. " "Please replace them by the {} Python format syntax.", RemovedInDjango110Warning, stacklevel=2 ) url = self.success_url % self.object.__dict__ else: url = self.success_url.format(**self.object.__dict__) else: try: url = self.object.get_absolute_url() except AttributeError: raise ImproperlyConfigured( "No URL to redirect to. Either provide a url or define" " a get_absolute_url method on the Model.") return url def form_valid(self, form): """ If the form is valid, save the associated model. """ self.object = form.save() return super(ModelFormMixin, self).form_valid(form) class ProcessFormView(View): """ A mixin that renders a form on GET and processes it on POST. """ def get(self, request, *args, **kwargs): """ Handles GET requests and instantiates a blank version of the form. """ form = self.get_form() return self.render_to_response(self.get_context_data(form=form)) def post(self, request, *args, **kwargs): """ Handles POST requests, instantiating a form instance with the passed POST variables and then checked for validity. """ form = self.get_form() if form.is_valid(): return self.form_valid(form) else: return self.form_invalid(form) # PUT is a valid HTTP verb for creating (with a known URL) or editing an # object, note that browsers only support POST for now. def put(self, *args, **kwargs): return self.post(*args, **kwargs) class BaseFormView(FormMixin, ProcessFormView): """ A base view for displaying a form """ class FormView(TemplateResponseMixin, BaseFormView): """ A view for displaying a form, and rendering a template response. """ class BaseCreateView(ModelFormMixin, ProcessFormView): """ Base view for creating an new object instance. Using this base class requires subclassing to provide a response mixin. """ def get(self, request, *args, **kwargs): self.object = None return super(BaseCreateView, self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.object = None return super(BaseCreateView, self).post(request, *args, **kwargs) class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView): """ View for creating a new object instance, with a response rendered by template. """ template_name_suffix = '_form' class BaseUpdateView(ModelFormMixin, ProcessFormView): """ Base view for updating an existing object. Using this base class requires subclassing to provide a response mixin. """ def get(self, request, *args, **kwargs): self.object = self.get_object() return super(BaseUpdateView, self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.object = self.get_object() return super(BaseUpdateView, self).post(request, *args, **kwargs) class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView): """ View for updating an object, with a response rendered by template. """ template_name_suffix = '_form' class DeletionMixin(object): """ A mixin providing the ability to delete objects """ success_url = None def delete(self, request, *args, **kwargs): """ Calls the delete() method on the fetched object and then redirects to the success URL. """ self.object = self.get_object() success_url = self.get_success_url() self.object.delete() return HttpResponseRedirect(success_url) # Add support for browsers which only accept GET and POST for now. def post(self, request, *args, **kwargs): return self.delete(request, *args, **kwargs) def get_success_url(self): if self.success_url: # force_text can be removed with deprecation warning self.success_url = force_text(self.success_url) if PERCENT_PLACEHOLDER_REGEX.search(self.success_url): warnings.warn( "%()s placeholder style in success_url is deprecated. " "Please replace them by the {} Python format syntax.", RemovedInDjango110Warning, stacklevel=2 ) return self.success_url % self.object.__dict__ else: return self.success_url.format(**self.object.__dict__) else: raise ImproperlyConfigured( "No URL to redirect to. Provide a success_url.") class BaseDeleteView(DeletionMixin, BaseDetailView): """ Base view for deleting an object. Using this base class requires subclassing to provide a response mixin. """ class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView): """ View for deleting an object retrieved with `self.get_object()`, with a response rendered by template. """ template_name_suffix = '_confirm_delete'
beeftornado/sentry
refs/heads/master
src/sentry/auth/providers/saml2/generic/apps.py
2
from __future__ import absolute_import from django.apps import AppConfig class Config(AppConfig): name = "sentry.auth.providers.saml2.generic" def ready(self): from sentry.auth import register from .provider import GenericSAML2Provider register("saml2", GenericSAML2Provider)
cvogt/ensime-sublime
refs/heads/master
rpc.py
2
from __future__ import unicode_literals import inspect, functools from functools import partial as bind import sexp from sexp import key, sym # ############################# DATA STRUCTURES ############################## class ActiveRecord(object): @classmethod def parse_list(cls, raw): if not raw: return [] if type(raw[0]) == type(key(":key")): m = sexp.sexp_to_key_map(raw) field = ":" + cls.__name__.lower() + "s" return [cls.parse(raw) for raw in (m[field] if field in m else [])] else: return [cls.parse(raw) for raw in raw] @classmethod def parse(cls, raw): """Parse a data type from a raw data structure""" if not raw: return None value_map = sexp.sexp_to_key_map(raw) self = cls() populate = getattr(self, "populate") populate(value_map) return self def unparse(self): raise Exception("abstract method: ActiveRecord.unparse - on " + str(this)) def __str__(self): return str(self.__dict__) class Note(ActiveRecord): def populate(self, m): self.message = m[":msg"] self.file_name = m[":file"] self.severity = m[":severity"] self.start = m[":beg"] self.end = m[":end"] self.line = m[":line"] self.col = m[":col"] class CompletionInfoList(ActiveRecord): @classmethod def create(cls, prefix, completions): self = CompletionInfoList() self.prefix = prefix self.completions = completions return self def populate(self, m): self.prefix = m[":prefix"] self.completions = CompletionInfo.parse_list(m[":completions"]) class CompletionSignature(ActiveRecord): """A completion signature consists of the parameter 'sections' which is a list of name to type) and a 'result' type. n.b. these are user readable rather than programmtic for presentation to the user. # sections: List[List[(String, String)]], # result: String """ def __init__(self, sections, result): self.sections = sections self.result = result @classmethod def from_raw(cls, data): # this hacky is all because () in both false and and empty list # the parser cannot tell, so hack it until we move to jerk sections_raw = data[0] if(data[0] is not False) else [] sections = [] for s in sections_raw: if s == False: sections.append([]) else: sections.append(s) result = data[1] return CompletionSignature(sections, result) def __repr__(self): return 'CompletionSignature("{str(self.sections)}", "{self.result}")'.format(self=self) class CompletionInfo(ActiveRecord): def populate(self, m): self.name = m[":name"] self.signature = CompletionSignature.from_raw(m[":type-sig"]) self.is_callable = bool(m[":is-callable"]) if ":is-callable" in m else False self.type_id = m[":type-id"] self.to_insert = m[":to-insert"] if ":to-insert" in m else None def __repr__(self): return 'CompletionInfo("{self.name}", "{self.signature}", {self.is_callable}, {self.type_id}, ...)'.format( self=self) class SourcePosition(ActiveRecord): def populate(self, m): self.file_name = m[":file"] if ":file" in m else None self.offset = m[":offset"] if ":offset" in m else None self.start = m[":start"] if ":start" in m else None self.end = m[":end"] if ":end" in m else None class SymbolInfo(ActiveRecord): def populate(self, m): self.name = m[":name"] self.type = TypeInfo.parse(m[":type"]) self.decl_pos = SourcePosition.parse(m[":decl-pos"]) if ":decl-pos" in m else None self.is_callable = bool(m[":is-callable"]) if ":is-callable" in m else False self.owner_type_id = m[":owner-type-id"] if ":owner-type-id" in m else None class TypeInfo(ActiveRecord): def populate(self, m): self.name = m[":name"] self.type_id = m[":type-id"] isArrowType = bool(m[":arrow-type"]) if ":arrow-type" in m else False if isArrowType: self.arrow_type = True self.result_type = TypeInfo.parse(m[":result-type"]) self.param_sections = ParamSectionInfo.parse_list(m[":param-sections"]) if ":param-sections" in m else [] else: # Basic type self.arrow_type = False self.full_name = m[":full-name"] if ":full-name" in m else None self.decl_as = m[":decl-as"] if ":decl-as" in m else None self.decl_pos = SourcePosition.parse(m[":pos"]) if ":pos" in m else None self.type_args = TypeInfo.parse_list(m[":type-args"]) if ":type-args" in m else [] self.outer_type_id = m[":outer-type-id"] if ":outer-type-id" in m else None self.members = Member.parse_list(m[":members"]) if ":members" in m else [] class SymbolSearchResults(ActiveRecord): # we override parse here because raw contains a List of SymbolSearchResult # typehe ActiveRecord parse method expects raw to contain an object at this point # and calls sexp_to_key_map @classmethod def parse(cls, raw): if not raw: return None self = cls() self.populate(raw) return self def populate(self, m): self.results = SymbolSearchResult.parse_list(m) class SymbolSearchResult(ActiveRecord): def populate(self, m): self.name = m[":name"] self.local_name = m[":local-name"] self.decl_as = m[":decl-as"] if ":decl-as" in m else None self.pos = SourcePosition.parse(m[":pos"]) if ":pos" in m else None class RefactorResult(ActiveRecord): def populate(self, m): self.done = True class Member(ActiveRecord): def populate(self, m): pass class ParamSectionInfo(ActiveRecord): def populate(self, m): self.is_implicit = bool(m[":is-implicit"]) if ":is-implicit" in m else False self.params = Param.parse_list(m[":params"]) if ":params" in m else [] class Param(ActiveRecord): def populate(self, m): pass class DebugEvent(ActiveRecord): def populate(self, m): self.type = str(m[":type"]) if self.type == "output": self.body = m[":body"] elif self.type == "step": self.thread_id = m[":thread-id"] self.thread_name = m[":thread-name"] self.file_name = m[":file"] self.line = m[":line"] elif self.type == "breakpoint": self.thread_id = m[":thread-id"] self.thread_name = m[":thread-name"] self.file_name = m[":file"] self.line = m[":line"] elif self.type == "death": pass elif self.type == "start": pass elif self.type == "disconnect": pass elif self.type == "exception": self.exception_id = m[":exception"] self.thread_id = m[":thread-id"] self.thread_name = m[":thread-name"] self.file_name = m[":file"] self.line = m[":line"] elif self.type == "threadStart": self.thread_id = m[":thread-id"] elif self.type == "threadDeath": self.thread_id = m[":thread-id"] else: raise Exception("unexpected debug event of type " + str(self.type) + ": " + str(m)) class DebugKickoffResult(ActiveRecord): def __nonzero__(self): return not self.error def populate(self, m): status = m[":status"] if status == "success": self.error = False elif status == "error": self.error = True self.code = m[":error-code"] self.details = m[":details"] else: raise Exception("unexpected status: " + str(status)) class DebugBacktrace(ActiveRecord): def populate(self, m): self.frames = DebugStackFrame.parse_list(m[":frames"]) if ":frames" in m else [] self.thread_id = m[":thread-id"] self.thread_name = m[":thread-name"] class SourceFileInfo(ActiveRecord): def populate(self, m): self.file = m[":file"] self.contents = m[":contents"] if ":contents" in m else None self.contents_in = m[":contents-in"] if ":contents-in" in m else None def __init__(self, file_name, contents=None, contents_in=None): self.file = file_name self.contents = contents self.contents_in = contents_in def unparse(self): base = [key(":file"), self.file] if self.contents is not None: base.extend([key(":contents"), self.contents]) if self.contents_in is not None: base.extend([key(":contents-in"), self.contents_in]) return [base] class DebugStackFrame(ActiveRecord): def populate(self, m): self.index = m[":index"] self.locals = DebugStackLocal.parse_list(m[":locals"]) if ":locals" in m else [] self.num_args = m[":num-args"] self.class_name = m[":class-name"] self.method_name = m[":method-name"] self.pc_location = DebugSourcePosition.parse(m[":pc-location"]) self.this_object_id = m[":this-object-id"] class DebugSourcePosition(ActiveRecord): def populate(self, m): self.file_name = m[":file"] self.line = m[":line"] class DebugStackLocal(ActiveRecord): def populate(self, m): self.index = m[":index"] self.name = m[":name"] self.summary = m[":summary"] self.type_name = m[":type-name"] class DebugValue(ActiveRecord): def populate(self, m): self.type = m[":val-type"] self.type_name = m[":type-name"] self.length = m[":length"] if ":length" in m else None self.element_type_name = m[":element-type-name"] if ":element-type-name" in m else None self.summary = m[":summary"] if ":summary" in m else None self.object_id = m[":object-id"] if ":object-id" in m else None self.fields = DebugObjectField.parse_list(m[":fields"]) if ":fields" in m else [] if str(self.type) == "null" or str(self.type) == "prim" or str(self.type) == "obj" or str( self.type) == "str" or str(self.type) == "arr": pass else: raise Exception("unexpected debug value of type " + str(self.type) + ": " + str(m)) class DebugObjectField(ActiveRecord): def populate(self, m): self.index = m[":index"] self.name = m[":name"] self.summary = m[":summary"] self.type_name = m[":type-name"] class DebugLocation(ActiveRecord): def populate(self, m): self.type = str(m[":type"]) if self.type == "reference": self.object_id = m[":object-id"] elif self.type == "element": self.object_id = m[":object-id"] self.index = m[":index"] elif self.type == "field": self.object_id = m[":object-id"] self.field = m[":field"] elif self.type == "slot": self.thread_id = m[":thread-id"] self.frame = m[":frame"] self.offset = m[":offset"] else: raise Exception("unexpected debug location of type " + str(self.type) + ": " + str(m)) class DebugLocationReference(DebugLocation): def __init__(self, object_id): self.object_id = object_id def unparse(self): return [[key(":type"), sym("reference"), key(":object-id"), self.object_id]] class DebugLocationElement(DebugLocation): def __init__(self, object_id, index): self.object_id = object_id self.index = index def unparse(self): return [[key(":type"), sym("element"), key(":object-id"), self.object_id, key(":index"), self.index]] class DebugLocationField(DebugLocation): def __init__(self, object_id, field): self.object_id = object_id self.field = field def unparse(self): return [[key(":type"), sym("field"), key(":object-id"), self.object_id, key(":field"), self.field]] class DebugLocationSlot(DebugLocation): def __init__(self, thread_id, frame, offset): self.thread_id = thread_id self.frame = frame self.offset = offset def unparse(self): return [ [key(":type"), sym("slot"), key(":thread-id"), self.thread_id, key(":frame"), self.frame, key(":offset"), self.offset]] # ############################# REMOTE PROCEDURES ############################## def _mk_req(func, *args, **kwargs): if kwargs: raise Exception("kwargs are not supported by the RPC proxy") req = [] def translate_name(name): if name.startswith("_"): name = name[1:] name = name.replace("_", "-") return name req.append(sym("swank:" + translate_name(func.__name__))) (spec_args, spec_varargs, spec_keywords, spec_defaults) = inspect.getargspec(func) if spec_varargs: raise Exception("varargs in signature of " + str(func)) if spec_keywords: raise Exception("keywords in signature of " + str(func)) if len(spec_args) != len(args): if len(args) < len(spec_args) and len(args) + len(spec_defaults) >= len(spec_args): # everything is fine. we can use default values for parameters to provide arguments to the call args += spec_defaults[len(spec_defaults) - len(spec_args) + len(args):] else: preamble = "argc mismatch in signature of " + str(func) + ": " expected = "expected " + str(len(spec_args)) + " args " + str(spec_args) + ", " actual = "actual " + str(len(args)) + " args " + str(args) + " with types " + str( map(lambda a: type(a), args)) raise Exception(preamble + expected + actual) for arg in args[1:]: # strip off self if hasattr(arg, "unparse"): argreq = arg.unparse() else: argreq = [arg] req.extend(argreq) return req def async_rpc(*args): parser = args[0] if args else lambda raw: raw def wrapper(func): def wrapped(*args, **kwargs): self = args[0] if callable(args[-1]): on_complete = args[-1] args = args[:-1] else: on_complete = None req = _mk_req(func, *args, **kwargs) def callback(payload): data = parser(payload) if (on_complete): on_complete(data) self.env.controller.client.async_req(req, callback, call_back_into_ui_thread=True) return wrapped return wrapper def sync_rpc(*args): parser = args[0] if args else lambda raw: raw def wrapper(func): def wrapped(*args, **kwargs): self = args[0] req = _mk_req(func, *args, **kwargs) timeout = self.env.settings.get("timeout_" + func.__name__) raw = self.env.controller.client.sync_req(req, timeout=timeout) return parser(raw) return wrapped return wrapper class Rpc(object): def __init__(self, env): self.env = env @sync_rpc() def shutdown_server(self): pass @async_rpc() def typecheck_file(self, file): pass @async_rpc() def patch_source(self, file_name, edits): pass @sync_rpc(CompletionInfoList.parse) def completions(self, file_name, position, max_results, case_sensitive, reload_from_disk): pass @async_rpc(TypeInfo.parse) def type_at_point(self, file_name, position): pass @async_rpc(SymbolInfo.parse) def symbol_at_point(self, file_name, position): pass @async_rpc(SymbolSearchResults.parse_list) def import_suggestions(self, file_name, position, type_names, max_results): pass @async_rpc(RefactorResult.parse) def prepare_refactor(self, procedure_id, refactor_type, parameters, require_confirmation): pass @async_rpc() def debug_set_break(self, file_name, line): pass @async_rpc() def debug_clear_break(self, file_name, line): pass @async_rpc() def debug_clear_all_breaks(self): pass @async_rpc(DebugKickoffResult.parse) def _debug_start(self, command_line): pass @async_rpc(DebugKickoffResult.parse) def _debug_attach(self, host, port): pass def debug_start(self, launch, breakpoints, on_complete=None): def set_breakpoints(breakpoints, status): if status: if breakpoints: self.debug_set_break(breakpoints[0].file_name, breakpoints[0].line, bind(set_breakpoints, breakpoints[1:])) else: if launch.main_class: self._debug_start(launch.command_line, on_complete) elif launch.remote_address: self._debug_attach(launch.remote_host, launch.remote_port, on_complete) else: raise Exception("unsupported launch: " + str(launch)) elif on_complete: on_complete(status) def clear_breakpoints(): def callback(status): if status: set_breakpoints(breakpoints, status) elif on_complete: on_complete(status) self.debug_clear_all_breaks(callback) clear_breakpoints() @async_rpc() def debug_stop(self): pass @async_rpc() def debug_step(self, thread_id): pass @async_rpc() def debug_next(self, thread_id): pass @async_rpc() def debug_continue(self, thread_id): pass @sync_rpc(DebugBacktrace.parse) def debug_backtrace(self, thread_id, first_frame=0, num_frames=-1): pass @sync_rpc(DebugValue.parse) def debug_value(self, debug_location): pass @sync_rpc() def debug_to_string(self, thread_id, debug_location): pass
wwj718/edx-platform
refs/heads/master
common/test/acceptance/tests/lms/test_lms_dashboard.py
24
# -*- coding: utf-8 -*- """ End-to-end tests for the main LMS Dashboard (aka, Student Dashboard). """ import datetime from nose.plugins.attrib import attr from ..helpers import UniqueCourseTest from ...fixtures.course import CourseFixture from ...pages.lms.auto_auth import AutoAuthPage from ...pages.lms.dashboard import DashboardPage DEFAULT_SHORT_DATE_FORMAT = "%b %d, %Y" DEFAULT_DAY_AND_TIME_FORMAT = "%A at %-I%P" class BaseLmsDashboardTest(UniqueCourseTest): """ Base test suite for the LMS Student Dashboard """ def setUp(self): """ Initializes the components (page objects, courses, users) for this test suite """ # Some parameters are provided by the parent setUp() routine, such as the following: # self.course_id, self.course_info, self.unique_id super(BaseLmsDashboardTest, self).setUp() # Load page objects for use by the tests self.dashboard_page = DashboardPage(self.browser) # Configure some aspects of the test course and install the settings into the course self.course_fixture = CourseFixture( self.course_info["org"], self.course_info["number"], self.course_info["run"], self.course_info["display_name"], ) self.course_fixture.add_advanced_settings({ u"social_sharing_url": {u"value": "http://custom/course/url"} }) self.course_fixture.install() # Create the test user, register them for the course, and authenticate self.username = "test_{uuid}".format(uuid=self.unique_id[0:6]) self.email = "{user}@example.com".format(user=self.username) AutoAuthPage( self.browser, username=self.username, email=self.email, course_id=self.course_id ).visit() # Navigate the authenticated, enrolled user to the dashboard page and get testing! self.dashboard_page.visit() class LmsDashboardPageTest(BaseLmsDashboardTest): """ Test suite for the LMS Student Dashboard page """ def setUp(self): super(LmsDashboardPageTest, self).setUp() # now datetime for usage in tests self.now = datetime.datetime.now() def test_dashboard_course_listings(self): """ Perform a general validation of the course listings section """ course_listings = self.dashboard_page.get_course_listings() self.assertEqual(len(course_listings), 1) def test_dashboard_social_sharing_feature(self): """ Validate the behavior of the social sharing feature """ twitter_widget = self.dashboard_page.get_course_social_sharing_widget('twitter') twitter_url = "https://twitter.com/intent/tweet?text=Testing+feature%3A%20http%3A%2F%2Fcustom%2Fcourse%2Furl" self.assertEqual(twitter_widget.attrs('title')[0], 'Share on Twitter') self.assertEqual(twitter_widget.attrs('data-tooltip')[0], 'Share on Twitter') self.assertEqual(twitter_widget.attrs('aria-haspopup')[0], 'true') self.assertEqual(twitter_widget.attrs('aria-expanded')[0], 'false') self.assertEqual(twitter_widget.attrs('target')[0], '_blank') self.assertIn(twitter_url, twitter_widget.attrs('href')[0]) self.assertIn(twitter_url, twitter_widget.attrs('onclick')[0]) facebook_widget = self.dashboard_page.get_course_social_sharing_widget('facebook') facebook_url = "https://www.facebook.com/sharer/sharer.php?u=http%3A%2F%2Fcustom%2Fcourse%2Furl" self.assertEqual(facebook_widget.attrs('title')[0], 'Share on Facebook') self.assertEqual(facebook_widget.attrs('data-tooltip')[0], 'Share on Facebook') self.assertEqual(facebook_widget.attrs('aria-haspopup')[0], 'true') self.assertEqual(facebook_widget.attrs('aria-expanded')[0], 'false') self.assertEqual(facebook_widget.attrs('target')[0], '_blank') self.assertIn(facebook_url, facebook_widget.attrs('href')[0]) self.assertIn(facebook_url, facebook_widget.attrs('onclick')[0]) def test_ended_course_date(self): """ Scenario: Course Date should have the format 'Ended - Sep 23, 2015' if the course on student dashboard has ended. As a Student, Given that I have enrolled to a course And the course has ended in the past When I visit dashboard page Then the course date should have the following format "Ended - %b %d, %Y" e.g. "Ended - Sep 23, 2015" """ course_start_date = datetime.datetime(1970, 1, 1) course_end_date = self.now - datetime.timedelta(days=90) self.course_fixture.add_course_details({'start_date': course_start_date, 'end_date': course_end_date}) self.course_fixture.configure_course() end_date = course_end_date.strftime(DEFAULT_SHORT_DATE_FORMAT) expected_course_date = "Ended - {end_date}".format(end_date=end_date) # reload the page for changes to course date changes to appear in dashboard self.dashboard_page.visit() course_date = self.dashboard_page.get_course_date() # Test that proper course date with 'ended' message is displayed if a course has already ended self.assertEqual(course_date, expected_course_date) def test_running_course_date(self): """ Scenario: Course Date should have the format 'Started - Sep 23, 2015' if the course on student dashboard is running. As a Student, Given that I have enrolled to a course And the course has started And the course is in progress When I visit dashboard page Then the course date should have the following format "Started - %b %d, %Y" e.g. "Started - Sep 23, 2015" """ course_start_date = datetime.datetime(1970, 1, 1) course_end_date = self.now + datetime.timedelta(days=90) self.course_fixture.add_course_details({'start_date': course_start_date, 'end_date': course_end_date}) self.course_fixture.configure_course() start_date = course_start_date.strftime(DEFAULT_SHORT_DATE_FORMAT) expected_course_date = "Started - {start_date}".format(start_date=start_date) # reload the page for changes to course date changes to appear in dashboard self.dashboard_page.visit() course_date = self.dashboard_page.get_course_date() # Test that proper course date with 'started' message is displayed if a course is in running state self.assertEqual(course_date, expected_course_date) def test_future_course_date(self): """ Scenario: Course Date should have the format 'Starts - Sep 23, 2015' if the course on student dashboard starts in future. As a Student, Given that I have enrolled to a course And the course starts in future And the course does not start within 5 days When I visit dashboard page Then the course date should have the following format "Starts - %b %d, %Y" e.g. "Starts - Sep 23, 2015" """ course_start_date = self.now + datetime.timedelta(days=30) course_end_date = self.now + datetime.timedelta(days=365) self.course_fixture.add_course_details({'start_date': course_start_date, 'end_date': course_end_date}) self.course_fixture.configure_course() start_date = course_start_date.strftime(DEFAULT_SHORT_DATE_FORMAT) expected_course_date = "Starts - {start_date}".format(start_date=start_date) # reload the page for changes to course date changes to appear in dashboard self.dashboard_page.visit() course_date = self.dashboard_page.get_course_date() # Test that proper course date with 'starts' message is displayed if a course is about to start in future, # and course does not start within 5 days self.assertEqual(course_date, expected_course_date) def test_near_future_course_date(self): """ Scenario: Course Date should have the format 'Starts - Wednesday at 5am UTC' if the course on student dashboard starts within 5 days. As a Student, Given that I have enrolled to a course And the course starts within 5 days When I visit dashboard page Then the course date should have the following format "Starts - %A at %-I%P UTC" e.g. "Starts - Wednesday at 5am UTC" """ course_start_date = self.now + datetime.timedelta(days=2) course_end_date = self.now + datetime.timedelta(days=365) self.course_fixture.add_course_details({'start_date': course_start_date, 'end_date': course_end_date}) self.course_fixture.configure_course() start_date = course_start_date.strftime(DEFAULT_DAY_AND_TIME_FORMAT) expected_course_date = "Starts - {start_date} UTC".format(start_date=start_date) # reload the page for changes to course date changes to appear in dashboard self.dashboard_page.visit() course_date = self.dashboard_page.get_course_date() # Test that proper course date with 'starts' message is displayed if a course is about to start in future, # and course starts within 5 days self.assertEqual(course_date, expected_course_date) @attr('a11y') class LmsDashboardA11yTest(BaseLmsDashboardTest): """ Class to test lms student dashboard accessibility. """ def test_dashboard_course_listings_a11y(self): """ Test the accessibility of the course listings """ course_listings = self.dashboard_page.get_course_listings() self.assertEqual(len(course_listings), 1) self.dashboard_page.a11y_audit.config.set_rules({ "ignore": [ 'skip-link', # TODO: AC-179 'link-href', # TODO: AC-238, AC-179 ], }) self.dashboard_page.a11y_audit.check_for_accessibility_errors()
fingolfin/scummvm
refs/heads/master
devtools/tasmrecover/tasm/proc.py
57
# ScummVM - Graphic Adventure Engine # # ScummVM is the legal property of its developers, whose names # are too numerous to list here. Please refer to the COPYRIGHT # file distributed with this source distribution. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # import re import op class proc: last_addr = 0xc000 def __init__(self, name): self.name = name self.calls = [] self.stmts = [] self.labels = set() self.retlabels = set() self.__label_re = re.compile(r'^(\S+):(.*)$') self.offset = proc.last_addr proc.last_addr += 4 def add_label(self, label): self.stmts.append(op.label(label)) self.labels.add(label) def remove_label(self, label): try: self.labels.remove(label) except: pass for i in xrange(len(self.stmts)): if isinstance(self.stmts[i], op.label) and self.stmts[i].name == label: self.stmts[i] = op._nop(None) return def optimize_sequence(self, cls): i = 0 stmts = self.stmts while i < len(stmts): if not isinstance(stmts[i], cls): i += 1 continue if i > 0 and isinstance(stmts[i - 1], op._rep): #skip rep prefixed instructions for now i += 1 continue j = i + 1 while j < len(stmts): if not isinstance(stmts[j], cls): break j = j + 1 n = j - i if n > 1: print "Eliminate consequtive storage instructions at %u-%u" %(i, j) for k in range(i+1,j): stmts[k] = op._nop(None) stmts[i].repeat = n else: i = j i = 0 while i < len(stmts): if not isinstance(stmts[i], op._rep): i += 1 continue if i + 1 >= len(stmts): break if isinstance(stmts[i + 1], cls): stmts[i + 1].repeat = 'cx' stmts[i + 1].clear_cx = True stmts[i] = op._nop(None) i += 1 return def optimize(self, keep_labels=[]): print "optimizing..." #trivial simplifications while len(self.stmts) and isinstance(self.stmts[-1], op.label): print "stripping last label" self.stmts.pop() #mark labels that directly precede a ret for i in range(len(self.stmts)): if not isinstance(self.stmts[i], op.label): continue j = i while j < len(self.stmts) and isinstance(self.stmts[j], (op.label, op._nop)): j += 1 if j == len(self.stmts) or isinstance(self.stmts[j], op._ret): print "Return label: %s" % (self.stmts[i].name,) self.retlabels.add(self.stmts[i].name) #merging push ax pop bx constructs i = 0 while i + 1 < len(self.stmts): a, b = self.stmts[i], self.stmts[i + 1] if isinstance(a, op._push) and isinstance(b, op._pop): ar, br = a.regs, b.regs movs = [] while len(ar) and len(br): src = ar.pop() dst = br.pop(0) movs.append(op._mov2(dst, src)) if len(br) == 0: self.stmts.pop(i + 1) print "merging %d push-pops into movs" %(len(movs)) for m in movs: print "\t%s <- %s" %(m.dst, m.src) self.stmts[i + 1:i + 1] = movs if len(ar) == 0: self.stmts.pop(i) else: i += 1 #eliminating unused labels for s in list(self.stmts): if not isinstance(s, op.label): continue print "checking label %s..." %s.name used = s.name in keep_labels if s.name not in self.retlabels: for j in self.stmts: if isinstance(j, op.basejmp) and j.label == s.name: print "used" used = True break if not used: print self.labels self.remove_label(s.name) #removing duplicate rets and rets at end for i in xrange(len(self.stmts)): if isinstance(self.stmts[i], op._ret): j = i+1 while j < len(self.stmts) and isinstance(self.stmts[j], op._nop): j += 1 if j == len(self.stmts) or isinstance(self.stmts[j], op._ret): self.stmts[i] = op._nop(None) self.optimize_sequence(op._stosb); self.optimize_sequence(op._stosw); self.optimize_sequence(op._movsb); self.optimize_sequence(op._movsw); def add(self, stmt): #print stmt comment = stmt.rfind(';') if comment >= 0: stmt = stmt[:comment] stmt = stmt.strip() r = self.__label_re.search(stmt) if r is not None: #label self.add_label(r.group(1).lower()) #print "remains: %s" %r.group(2) stmt = r.group(2).strip() if len(stmt) == 0: return s = stmt.split(None) cmd = s[0] cl = getattr(op, '_' + cmd) arg = " ".join(s[1:]) if len(s) > 1 else str() o = cl(arg) self.stmts.append(o) def __str__(self): r = [] for i in self.stmts: r.append(i.__str__()) return "\n".join(r) def visit(self, visitor, skip = 0): for i in xrange(skip, len(self.stmts)): self.stmts[i].visit(visitor)
dhoffman34/django
refs/heads/master
django/db/migrations/executor.py
2
from __future__ import unicode_literals from django.db import migrations from django.apps.registry import apps as global_apps from .loader import MigrationLoader from .recorder import MigrationRecorder class MigrationExecutor(object): """ End-to-end migration execution - loads migrations, and runs them up or down to a specified set of targets. """ def __init__(self, connection, progress_callback=None): self.connection = connection self.loader = MigrationLoader(self.connection) self.recorder = MigrationRecorder(self.connection) self.progress_callback = progress_callback def migration_plan(self, targets): """ Given a set of targets, returns a list of (Migration instance, backwards?). """ plan = [] applied = set(self.loader.applied_migrations) for target in targets: # If the target is (app_label, None), that means unmigrate everything if target[1] is None: for root in self.loader.graph.root_nodes(): if root[0] == target[0]: for migration in self.loader.graph.backwards_plan(root): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.remove(migration) # If the migration is already applied, do backwards mode, # otherwise do forwards mode. elif target in applied: backwards_plan = self.loader.graph.backwards_plan(target)[:-1] # We only do this if the migration is not the most recent one # in its app - that is, another migration with the same app # label is in the backwards plan if any(node[0] == target[0] for node in backwards_plan): for migration in backwards_plan: if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.remove(migration) else: for migration in self.loader.graph.forwards_plan(target): if migration not in applied: plan.append((self.loader.graph.nodes[migration], False)) applied.add(migration) return plan def migrate(self, targets, plan=None, fake=False): """ Migrates the database up to the given targets. """ if plan is None: plan = self.migration_plan(targets) for migration, backwards in plan: if not backwards: self.apply_migration(migration, fake=fake) else: self.unapply_migration(migration, fake=fake) def collect_sql(self, plan): """ Takes a migration plan and returns a list of collected SQL statements that represent the best-efforts version of that plan. """ statements = [] for migration, backwards in plan: with self.connection.schema_editor(collect_sql=True) as schema_editor: project_state = self.loader.project_state((migration.app_label, migration.name), at_end=False) if not backwards: migration.apply(project_state, schema_editor, collect_sql=True) else: migration.unapply(project_state, schema_editor, collect_sql=True) statements.extend(schema_editor.collected_sql) return statements def apply_migration(self, migration, fake=False): """ Runs a migration forwards. """ if self.progress_callback: self.progress_callback("apply_start", migration, fake) if not fake: # Test to see if this is an already-applied initial migration if self.detect_soft_applied(migration): fake = True else: # Alright, do it normally with self.connection.schema_editor() as schema_editor: project_state = self.loader.project_state((migration.app_label, migration.name), at_end=False) migration.apply(project_state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_applied(app_label, name) else: self.recorder.record_applied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("apply_success", migration, fake) def unapply_migration(self, migration, fake=False): """ Runs a migration backwards. """ if self.progress_callback: self.progress_callback("unapply_start", migration, fake) if not fake: with self.connection.schema_editor() as schema_editor: project_state = self.loader.project_state((migration.app_label, migration.name), at_end=False) migration.unapply(project_state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_unapplied(app_label, name) else: self.recorder.record_unapplied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("unapply_success", migration, fake) def detect_soft_applied(self, migration): """ Tests whether a migration has been implicitly applied - that the tables it would create exist. This is intended only for use on initial migrations (as it only looks for CreateModel). """ project_state = self.loader.project_state((migration.app_label, migration.name), at_end=True) apps = project_state.render() found_create_migration = False for operation in migration.operations: if isinstance(operation, migrations.CreateModel): model = apps.get_model(migration.app_label, operation.name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if model._meta.db_table not in self.connection.introspection.get_table_list(self.connection.cursor()): return False found_create_migration = True # If we get this far and we found at least one CreateModel migration, # the migration is considered implicitly applied. return found_create_migration
amenonsen/ansible
refs/heads/devel
test/units/modules/network/onyx/test_onyx_lldp.py
23
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat.mock import patch from ansible.modules.network.onyx import onyx_lldp from units.modules.utils import set_module_args from .onyx_module import TestOnyxModule, load_fixture class TestOnyxInterfaceModule(TestOnyxModule): module = onyx_lldp def setUp(self): super(TestOnyxInterfaceModule, self).setUp() self.mock_get_config = patch.object( onyx_lldp.OnyxLldpModule, "_get_lldp_config") self.get_config = self.mock_get_config.start() self.mock_load_config = patch( 'ansible.module_utils.network.onyx.onyx.load_config') self.load_config = self.mock_load_config.start() def tearDown(self): super(TestOnyxInterfaceModule, self).tearDown() self.mock_get_config.stop() self.mock_load_config.stop() def load_fixtures(self, commands=None, transport='cli'): if commands == ['lldp']: self.get_config.return_value = None else: config_file = 'onyx_lldp_show.cfg' self.get_config.return_value = load_fixture(config_file) self.load_config.return_value = None def test_lldp_no_change(self): set_module_args(dict()) self.execute_module(changed=False) def test_lldp_disable(self): set_module_args(dict(state='absent')) commands = ['no lldp'] self.execute_module(changed=True, commands=commands) def test_lldp_enable(self): set_module_args(dict(state='present')) commands = ['lldp'] self.execute_module(changed=True, commands=commands)
PaddlePaddle/Paddle
refs/heads/develop
python/paddle/fluid/tests/unittests/test_dist_sparse_load_ps0.py
2
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import unittest import numpy as np import tempfile import shutil from op_test import OpTest, randomize_probability import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers import paddle.distributed.fleet.base.role_maker as role_maker from paddle.distributed.fleet import fleet class SparseLoadOp(unittest.TestCase): """ Test load operator. """ def net(self, emb_array, fc_array): with fluid.unique_name.guard(): dense_input = fluid.data('input', shape=[None, 1], dtype="int64") emb = fluid.layers.embedding( input=dense_input, is_sparse=True, size=[10, 10], param_attr=fluid.ParamAttr( name="embedding", initializer=fluid.initializer.NumpyArrayInitializer( emb_array)), ) fc1 = fluid.layers.fc( input=emb, size=10, act="relu", param_attr=fluid.ParamAttr( name='fc', initializer=fluid.initializer.NumpyArrayInitializer( fc_array))) loss = fluid.layers.reduce_mean(fc1) return loss def save_origin_model(self, emb_array, fc_array): startup_program = fluid.framework.Program() test_program = fluid.framework.Program() with fluid.framework.program_guard(test_program, startup_program): with fluid.unique_name.guard(): loss = self.net(emb_array, fc_array) optimizer = fluid.optimizer.Adam(1e-3) optimizer.minimize(loss) exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup_program) model_path = tempfile.mkdtemp() fluid.io.save_persistables(executor=exe, dirname=model_path) return model_path @unittest.skip(reason="Skip unstable ut, need rewrite with new implement") class TestSparseLoadOpCase1(SparseLoadOp): def test_2ps_0_load(self): # init No.0 server env env = {} env["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:4001,127.0.0.1:4002" env["PADDLE_TRAINERS_NUM"] = str(2) env["TRAINING_ROLE"] = "PSERVER" env["PADDLE_PORT"] = "4001" env["POD_IP"] = "127.0.0.1" for k, v in env.items(): os.environ[k] = str(v) """ array([[0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2], [0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3], [0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4, 0.4], [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6], [0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7, 0.7], [0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8], [0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]]) """ emb_array = np.arange(0, 1, 0.1).repeat(10).reshape(10, 10) fc_array = np.arange(0, 1, 0.1).repeat(10).reshape(10, 10) model_path = self.save_origin_model(emb_array, fc_array) role = role_maker.PaddleCloudRoleMaker() fleet.init(role) loss = self.net(emb_array, fc_array) strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True optimizer = fluid.optimizer.Adam(1e-3) optimizer = fleet.distributed_optimizer(optimizer, strategy) optimizer.minimize(loss) fleet.init_server(model_path) fc_w = np.array(fluid.global_scope().find_var("fc").get_tensor()) emb = np.array(fluid.global_scope().find_var("embedding.block0") .get_tensor()) assert fc_w.all() == fc_array.all() assert emb.all() == emb_array[::2].all() shutil.rmtree(model_path) if __name__ == "__main__": paddle.enable_static() unittest.main()
adityahase/frappe
refs/heads/develop
frappe/integrations/doctype/ldap_group_mapping/__init__.py
12133432
thomasgilgenast/spqr-nonrel
refs/heads/master
django/conf/locale/el/__init__.py
12133432
eonpatapon/nova
refs/heads/master
nova/tests/unit/virt/xenapi/image/__init__.py
12133432
AgustinGattone/FiveLazy
refs/heads/master
misitio/__init__.py
12133432
dwlehman/anaconda
refs/heads/master
pyanaconda/threads.py
4
# # threads.py: anaconda thread management # # Copyright (C) 2012 # Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Author(s): Chris Lumens <[email protected]> # import logging log = logging.getLogger("anaconda") import threading _WORKER_THREAD_PREFIX = "AnaWorkerThread" class ThreadManager(object): """A singleton class for managing threads and processes. Notes: THE INSTANCE HAS TO BE CREATED IN THE MAIN THREAD! This manager makes one assumption that contradicts python's threading module documentation. In this class, we assume that thread names are unique and meaningful. This is an okay assumption for us to make given that anaconda is only ever going to have a handful of special purpose threads. """ def __init__(self): self._objs = {} self._objs_lock = threading.RLock() self._errors = {} self._main_thread = threading.current_thread() def __call__(self): return self def add(self, obj): """Given a Thread or Process object, add it to the list of known objects and start it. It is assumed that obj.name is unique and descriptive. """ # we need to lock the thread dictionary when adding a new thread, # so that callers can't get & join threads that are not yet started with self._objs_lock: if obj.name in self._objs: raise KeyError("Cannot add thread '%s', a thread with the same name already running" % obj.name) self._objs[obj.name] = obj obj.start() return obj.name def remove(self, name): """Removes a thread from the list of known objects. This should only be called when a thread exits, or there will be no way to get a handle on it. """ with self._objs_lock: self._objs.pop(name) def exists(self, name): """Determine if a thread or process exists with the given name.""" # thread in the ThreadManager only officially exists once started with self._objs_lock: return name in self._objs def get(self, name): """Given an object name, see if it exists and return the object. Return None if no such object exists. Additionally, this method will re-raise any uncaught exception in the thread. """ # without the lock it would be possible to get & join # a thread that was not yet started with self._objs_lock: obj = self._objs.get(name) if obj: self.raise_if_error(name) return obj def wait(self, name): """Wait for the thread to exit and if the thread exited with an error re-raise it here. """ ret_val = True # we don't need a lock here, # because get() acquires it itself try: self.get(name).join() except AttributeError: ret_val = False # - if there is a thread object for the given name, # we join it # - if there is not a thread object for the given name, # we get None, try to join it, suppress the AttributeError # and return immediately self.raise_if_error(name) # return True if we waited for the thread, False otherwise return ret_val def wait_all(self): """Wait for all threads to exit and if there was an error re-raise it. """ for name in self._objs.keys(): if self.get(name) == threading.current_thread(): continue log.debug("Waiting for thread %s to exit", name) self.wait(name) if self.any_errors: thread_names = ", ".join(thread_name for thread_name in self._errors.keys() if self._errors[thread_name]) msg = "Unhandled errors from the following threads detected: %s" % thread_names raise RuntimeError(msg) def set_error(self, name, *exc_info): """Set the error data for a thread The exception data is expected to be the tuple from sys.exc_info() """ self._errors[name] = exc_info def get_error(self, name): """Get the error data for a thread using its name """ return self._errors.get(name) @property def any_errors(self): """Return True of there have been any errors in any threads """ return any(self._errors.values()) def raise_if_error(self, name): """If a thread has failed due to an exception, raise it into the main thread and remove it from errors. """ if name not in self._errors: # no errors found for the thread return exc_info = self._errors.pop(name) if exc_info: raise exc_info[0], exc_info[1], exc_info[2] def in_main_thread(self): """Return True if it is run in the main thread.""" cur_thread = threading.current_thread() return cur_thread is self._main_thread @property def running(self): """ Return the number of running threads. :returns: number of running threads :rtype: int """ with self._objs_lock: return len(self._objs) @property def names(self): """ Return the names of the running threads. :returns: list of thread names :rtype: list of strings """ with self._objs_lock: return self._objs.keys() def wait_for_error_threads(self): """ Waits for all threads that caused exceptions. In other words, waits for exception handling (possibly interactive) to be finished. """ for thread_name in self._errors.keys(): thread = self._objs[thread_name] thread.join() class AnacondaThread(threading.Thread): """A threading.Thread subclass that exists only for a couple purposes: (1) Make exceptions that happen in a thread invoke our exception handling code as well. Otherwise, threads will silently die and we are doing a lot of complicated code in them now. (2) Remove themselves from the thread manager when completed. (3) All created threads are made daemonic, which means anaconda will quit when the main process is killed. """ # class-wide dictionary ensuring unique thread names _prefix_thread_counts = dict() def __init__(self, *args, **kwargs): # if neither name nor prefix is given, use the worker prefix if "name" not in kwargs and "prefix" not in kwargs: kwargs["prefix"] = _WORKER_THREAD_PREFIX # if prefix is specified, use it to construct new thread name prefix = kwargs.pop("prefix", None) if prefix: thread_num = self._prefix_thread_counts.get(prefix, 0) + 1 self._prefix_thread_counts[prefix] = thread_num kwargs["name"] = prefix + str(thread_num) if "fatal" in kwargs: self._fatal = kwargs.pop("fatal") else: self._fatal = True threading.Thread.__init__(self, *args, **kwargs) self.daemon = True def run(self, *args, **kwargs): # http://bugs.python.org/issue1230540#msg25696 import sys log.info("Running Thread: %s (%s)", self.name, self.ident) try: threading.Thread.run(self, *args, **kwargs) # pylint: disable=bare-except except: threadMgr.set_error(self.name, *sys.exc_info()) if self._fatal: sys.excepthook(*sys.exc_info()) finally: threadMgr.remove(self.name) log.info("Thread Done: %s (%s)", self.name, self.ident) def initThreading(): """Set up threading for anaconda's use. This method must be called before any GTK or threading code is called, or else threads will only run when an event is triggered in the GTK main loop. And IT HAS TO BE CALLED IN THE MAIN THREAD. """ global threadMgr threadMgr = ThreadManager() threadMgr = None
neep305/swordfish
refs/heads/master
text_analysis/apps.py
1
from django.apps import AppConfig class TextAnalysisConfig(AppConfig): name = 'text_analysis'
zhaochao/fuel-main
refs/heads/master
utils/jenkins/report-exporter/spreadsheet.py
7
# Copyright 2015 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from conf import GOOGLE from gdata.spreadsheet import text_db import logging logger = logging.getLogger(__package__) class Document(): def __init__(self): self.gclient = text_db.DatabaseClient( GOOGLE["user"], GOOGLE["password"], ) self.gspreadsheet = self.gclient.GetDatabases( spreadsheet_key=GOOGLE["key"] )[0] def get_page(self, name): tables = self.gspreadsheet.GetTables(name=name) # GetTables by name searches by substring in the table name. # GetTables(name="smth") can return ["smth","smth_else"] # Thus we run additional check for table.name tables = [table for table in tables if table.name == name] if len(tables) == 0: # Create new worksheet logger.debug("Create new worksheet {0}".format(name)) wrksh = self.gspreadsheet.client._GetSpreadsheetsClient().AddWorksheet( title=name, row_count=1, col_count=50, key=self.gspreadsheet.spreadsheet_key, ) table = text_db.Table( name=name, worksheet_entry=wrksh, database_client=self.gspreadsheet.client, spreadsheet_key=self.gspreadsheet.spreadsheet_key ) elif len(tables) == 1: table = tables[0] logger.debug("Use worksheet {0}".format(table.name)) else: logger.error( "There are {0} tables named {1}".format( len(tables), name, ) ) raise return Page(table) class Page(): def __init__(self, table): self.table = table self.table.LookupFields() def build_exists(self, number): records = self.table.FindRecords( "number == {0}".format(number) ) return records def add_build(self, build_record): """Adds build to the table If there is a row with same build id and build number, do nothing. """ build_number = build_record[0][1] if self.build_exists(build_number): logger.debug( "Build {0} is already there".format(build_number) ) return None logger.debug("Create record " "for build {0}".format(build_number)) self.update_columns(build_record) self.table.AddRecord(dict(build_record)) logger.info("Created record " "for build {0}".format(build_number)) def update_columns(self, build_record): """Update table columns If current build has more tests than the previous one we extend the table by appending more columns. """ fields_changed = False fields = self.table.fields for key in [key for key, value in build_record if key not in fields]: fields_changed = True fields.append(key) if fields_changed: logger.debug("New columns: {}".format(fields)) self.table.SetFields(fields) logger.debug("New columns added") return fields
orekyuu/intellij-community
refs/heads/master
python/testData/refactoring/rename/renameParameter_after.py
83
def method_name(qu): while (qu < 10): print(qu) qu = qu + 1 return qu
dvdhrm/systemd
refs/heads/master
test/test-network/systemd-networkd-tests.py
1
#!/usr/bin/env python3 # SPDX-License-Identifier: LGPL-2.1+ # systemd-networkd tests import os import re import shutil import signal import socket import subprocess import sys import time import unittest from shutil import copytree network_unit_file_path='/run/systemd/network' networkd_runtime_directory='/run/systemd/netif' networkd_ci_path='/run/networkd-ci' network_sysctl_ipv6_path='/proc/sys/net/ipv6/conf' network_sysctl_ipv4_path='/proc/sys/net/ipv4/conf' dnsmasq_pid_file='/run/networkd-ci/test-test-dnsmasq.pid' dnsmasq_log_file='/run/networkd-ci/test-dnsmasq-log-file' def is_module_available(module_name): lsmod_output = subprocess.check_output('lsmod', universal_newlines=True) module_re = re.compile(r'^{0}\b'.format(re.escape(module_name)), re.MULTILINE) return module_re.search(lsmod_output) or not subprocess.call(["modprobe", module_name]) def expectedFailureIfModuleIsNotAvailable(module_name): def f(func): if not is_module_available(module_name): return unittest.expectedFailure(func) return func return f def expectedFailureIfERSPANModuleIsNotAvailable(): def f(func): rc = subprocess.call(['ip', 'link', 'add', 'dev', 'erspan99', 'type', 'erspan', 'seq', 'key', '30', 'local', '192.168.1.4', 'remote', '192.168.1.1', 'erspan_ver', '1', 'erspan', '123']) if rc == 0: subprocess.call(['ip', 'link', 'del', 'erspan99']) return func else: return unittest.expectedFailure(func) return f def expectedFailureIfRoutingPolicyPortRangeIsNotAvailable(): def f(func): rc = subprocess.call(['ip', 'rule', 'add', 'from', '192.168.100.19', 'sport', '1123-1150', 'dport', '3224-3290', 'table', '7']) if rc == 0: subprocess.call(['ip', 'rule', 'del', 'from', '192.168.100.19', 'sport', '1123-1150', 'dport', '3224-3290', 'table', '7']) return func else: return unittest.expectedFailure(func) return f def expectedFailureIfRoutingPolicyIPProtoIsNotAvailable(): def f(func): rc = subprocess.call(['ip', 'rule', 'add', 'not', 'from', '192.168.100.19', 'ipproto', 'tcp', 'table', '7']) if rc == 0: subprocess.call(['ip', 'rule', 'del', 'not', 'from', '192.168.100.19', 'ipproto', 'tcp', 'table', '7']) return func else: return unittest.expectedFailure(func) return f def setUpModule(): os.makedirs(network_unit_file_path, exist_ok=True) os.makedirs(networkd_ci_path, exist_ok=True) shutil.rmtree(networkd_ci_path) copytree(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'conf'), networkd_ci_path) subprocess.check_call('systemctl stop systemd-networkd.socket', shell=True) def tearDownModule(): shutil.rmtree(networkd_ci_path) subprocess.check_call('systemctl stop systemd-networkd.service', shell=True) subprocess.check_call('systemctl start systemd-networkd.socket', shell=True) subprocess.check_call('systemctl start systemd-networkd.service', shell=True) class Utilities(): def read_link_attr(self, link, dev, attribute): with open(os.path.join(os.path.join(os.path.join('/sys/class/net/', link), dev), attribute)) as f: return f.readline().strip() def read_bridge_port_attr(self, bridge, link, attribute): path_bridge = os.path.join('/sys/devices/virtual/net', bridge) path_port = 'lower_' + link + '/brport' path = os.path.join(path_bridge, path_port) with open(os.path.join(path, attribute)) as f: return f.readline().strip() def link_exits(self, link): return os.path.exists(os.path.join('/sys/class/net', link)) def link_remove(self, links): for link in links: if os.path.exists(os.path.join('/sys/class/net', link)): subprocess.call(['ip', 'link', 'del', 'dev', link]) time.sleep(1) def read_ipv6_sysctl_attr(self, link, attribute): with open(os.path.join(os.path.join(network_sysctl_ipv6_path, link), attribute)) as f: return f.readline().strip() def read_ipv4_sysctl_attr(self, link, attribute): with open(os.path.join(os.path.join(network_sysctl_ipv4_path, link), attribute)) as f: return f.readline().strip() def copy_unit_to_networkd_unit_path(self, *units): for unit in units: shutil.copy(os.path.join(networkd_ci_path, unit), network_unit_file_path) if (os.path.exists(os.path.join(networkd_ci_path, unit + '.d'))): copytree(os.path.join(networkd_ci_path, unit + '.d'), os.path.join(network_unit_file_path, unit + '.d')) def remove_unit_from_networkd_path(self, units): for unit in units: if (os.path.exists(os.path.join(network_unit_file_path, unit))): os.remove(os.path.join(network_unit_file_path, unit)) if (os.path.exists(os.path.join(network_unit_file_path, unit + '.d'))): shutil.rmtree(os.path.join(network_unit_file_path, unit + '.d')) def start_dnsmasq(self, additional_options=''): dnsmasq_command = 'dnsmasq -8 /var/run/networkd-ci/test-dnsmasq-log-file --log-queries=extra --log-dhcp --pid-file=/var/run/networkd-ci/test-test-dnsmasq.pid --conf-file=/dev/null --interface=veth-peer --enable-ra --dhcp-range=2600::10,2600::20 --dhcp-range=192.168.5.10,192.168.5.200 -R --dhcp-leasefile=/var/run/networkd-ci/lease --dhcp-option=26,1492 --dhcp-option=option:router,192.168.5.1 --dhcp-option=33,192.168.5.4,192.168.5.5 --port=0 ' + additional_options subprocess.check_call(dnsmasq_command, shell=True) time.sleep(10) def stop_dnsmasq(self, pid_file): if os.path.exists(pid_file): with open(pid_file, 'r') as f: pid = f.read().rstrip(' \t\r\n\0') os.kill(int(pid), signal.SIGTERM) os.remove(pid_file) def search_words_in_dnsmasq_log(self, words, show_all=False): if os.path.exists(dnsmasq_log_file): with open (dnsmasq_log_file) as in_file: contents = in_file.read() if show_all: print(contents) for line in contents.split('\n'): if words in line: in_file.close() print("%s, %s" % (words, line)) return True return False def remove_lease_file(self): if os.path.exists(os.path.join(networkd_ci_path, 'lease')): os.remove(os.path.join(networkd_ci_path, 'lease')) def remove_log_file(self): if os.path.exists(dnsmasq_log_file): os.remove(dnsmasq_log_file) def start_networkd(self): if (os.path.exists(os.path.join(networkd_runtime_directory, 'state'))): subprocess.check_call('systemctl stop systemd-networkd', shell=True) os.remove(os.path.join(networkd_runtime_directory, 'state')) subprocess.check_call('systemctl start systemd-networkd', shell=True) else: subprocess.check_call('systemctl restart systemd-networkd', shell=True) time.sleep(5) print() class NetworkdNetDevTests(unittest.TestCase, Utilities): links =[ '6rdtun99', 'bond99', 'bridge99', 'dropin-test', 'dummy98', 'erspan-test', 'geneve99', 'gretap99', 'gretun99', 'ip6gretap99', 'ip6tnl99', 'ipiptun99', 'ipvlan99', 'isataptun99', 'macvlan99', 'macvtap99', 'sittun99', 'tap99', 'test1', 'tun99', 'vcan99', 'veth99', 'vlan99', 'vrf99', 'vti6tun99', 'vtitun99', 'vxlan99', 'wg98', 'wg99'] units = [ '10-dropin-test.netdev', '11-dummy.netdev', '12-dummy.netdev', '21-macvlan.netdev', '21-macvtap.netdev', '21-vlan.netdev', '21-vlan.network', '25-6rd-tunnel.netdev', '25-bond.netdev', '25-bond-balanced-tlb.netdev', '25-bridge.netdev', '25-erspan-tunnel.netdev', '25-geneve.netdev', '25-gretap-tunnel.netdev', '25-gre-tunnel.netdev', '25-ip6gre-tunnel.netdev', '25-ip6tnl-tunnel.netdev', '25-ipip-tunnel-independent.netdev', '25-ipip-tunnel.netdev', '25-ipvlan.netdev', '25-isatap-tunnel.netdev', '25-sit-tunnel.netdev', '25-tap.netdev', '25-tun.netdev', '25-vcan.netdev', '25-veth.netdev', '25-vrf.netdev', '25-vti6-tunnel.netdev', '25-vti-tunnel.netdev', '25-vxlan.netdev', '25-wireguard-23-peers.netdev', '25-wireguard-23-peers.network', '25-wireguard.netdev', '6rd.network', 'gre.network', 'gretap.network', 'gretun.network', 'ip6gretap.network', 'ip6tnl.network', 'ipip.network', 'ipvlan.network', 'isatap.network', 'macvlan.network', 'macvtap.network', 'sit.network', 'vti6.network', 'vti.network', 'vxlan.network'] def setUp(self): self.link_remove(self.links) def tearDown(self): self.link_remove(self.links) self.remove_unit_from_networkd_path(self.units) def test_dropin(self): self.copy_unit_to_networkd_unit_path('10-dropin-test.netdev') self.start_networkd() self.assertTrue(self.link_exits('dropin-test')) output = subprocess.check_output(['ip', 'link', 'show', 'dropin-test']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '00:50:56:c0:00:28') output = subprocess.check_output(['networkctl', 'list']).rstrip().decode('utf-8') self.assertRegex(output, '1 lo ') self.assertRegex(output, 'dropin-test') output = subprocess.check_output(['networkctl', 'list', 'dropin-test']).rstrip().decode('utf-8') self.assertNotRegex(output, '1 lo ') self.assertRegex(output, 'dropin-test') output = subprocess.check_output(['networkctl', 'list', 'dropin-*']).rstrip().decode('utf-8') self.assertNotRegex(output, '1 lo ') self.assertRegex(output, 'dropin-test') output = subprocess.check_output(['networkctl', 'status', 'dropin-*']).rstrip().decode('utf-8') self.assertNotRegex(output, '1: lo ') self.assertRegex(output, 'dropin-test') ret = subprocess.run(['ethtool', '--driver', 'dropin-test'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) print(ret.stdout.rstrip().decode('utf-8')) if ret.returncode == 0 and re.search('driver: dummy', ret.stdout.rstrip().decode('utf-8')) != None: self.assertRegex(output, 'Driver: dummy') else: print('ethtool does not support driver field at least for dummy interfaces, skipping test for Driver field of networkctl.') def test_bridge(self): self.copy_unit_to_networkd_unit_path('25-bridge.netdev') self.start_networkd() self.assertTrue(self.link_exits('bridge99')) self.assertEqual('900', self.read_link_attr('bridge99', 'bridge', 'hello_time')) self.assertEqual('900', self.read_link_attr('bridge99', 'bridge', 'max_age')) self.assertEqual('900', self.read_link_attr('bridge99', 'bridge','forward_delay')) self.assertEqual('900', self.read_link_attr('bridge99', 'bridge','ageing_time')) self.assertEqual('9', self.read_link_attr('bridge99', 'bridge','priority')) self.assertEqual('1', self.read_link_attr('bridge99', 'bridge','multicast_querier')) self.assertEqual('1', self.read_link_attr('bridge99', 'bridge','multicast_snooping')) self.assertEqual('1', self.read_link_attr('bridge99', 'bridge','stp_state')) def test_bond(self): self.copy_unit_to_networkd_unit_path('25-bond.netdev') self.start_networkd() self.assertTrue(self.link_exits('bond99')) self.assertEqual('802.3ad 4', self.read_link_attr('bond99', 'bonding', 'mode')) self.assertEqual('layer3+4 1', self.read_link_attr('bond99', 'bonding', 'xmit_hash_policy')) self.assertEqual('1000', self.read_link_attr('bond99', 'bonding', 'miimon')) self.assertEqual('fast 1', self.read_link_attr('bond99', 'bonding', 'lacp_rate')) self.assertEqual('2000', self.read_link_attr('bond99', 'bonding', 'updelay')) self.assertEqual('2000', self.read_link_attr('bond99', 'bonding', 'downdelay')) self.assertEqual('4', self.read_link_attr('bond99', 'bonding', 'resend_igmp')) self.assertEqual('1', self.read_link_attr('bond99', 'bonding', 'min_links')) self.assertEqual('1218', self.read_link_attr('bond99', 'bonding', 'ad_actor_sys_prio')) self.assertEqual('811', self.read_link_attr('bond99', 'bonding', 'ad_user_port_key')) self.assertEqual('00:11:22:33:44:55', self.read_link_attr('bond99', 'bonding', 'ad_actor_system')) def test_bond_balanced_tlb(self): self.copy_unit_to_networkd_unit_path('25-bond-balanced-tlb.netdev') self.start_networkd() self.assertTrue(self.link_exits('bond99')) self.assertEqual('balance-tlb 5', self.read_link_attr('bond99', 'bonding', 'mode')) self.assertEqual('1', self.read_link_attr('bond99', 'bonding', 'tlb_dynamic_lb')) def test_vlan(self): self.copy_unit_to_networkd_unit_path('21-vlan.netdev', '11-dummy.netdev', '21-vlan.network') self.start_networkd() self.assertTrue(self.link_exits('vlan99')) output = subprocess.check_output(['ip', '-d', 'link', 'show', 'vlan99']).rstrip().decode('utf-8') print(output) self.assertTrue(output, 'REORDER_HDR') self.assertTrue(output, 'LOOSE_BINDING') self.assertTrue(output, 'GVRP') self.assertTrue(output, 'MVRP') self.assertTrue(output, '99') def test_macvtap(self): self.copy_unit_to_networkd_unit_path('21-macvtap.netdev', '11-dummy.netdev', 'macvtap.network') self.start_networkd() self.assertTrue(self.link_exits('macvtap99')) def test_macvlan(self): self.copy_unit_to_networkd_unit_path('21-macvlan.netdev', '11-dummy.netdev', 'macvlan.network') self.start_networkd() self.assertTrue(self.link_exits('macvlan99')) @expectedFailureIfModuleIsNotAvailable('ipvlan') def test_ipvlan(self): self.copy_unit_to_networkd_unit_path('25-ipvlan.netdev', '11-dummy.netdev', 'ipvlan.network') self.start_networkd() self.assertTrue(self.link_exits('ipvlan99')) def test_veth(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev') self.start_networkd() self.assertTrue(self.link_exits('veth99')) def test_dummy(self): self.copy_unit_to_networkd_unit_path('11-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('test1')) def test_tun(self): self.copy_unit_to_networkd_unit_path('25-tun.netdev') self.start_networkd() self.assertTrue(self.link_exits('tun99')) def test_tap(self): self.copy_unit_to_networkd_unit_path('25-tap.netdev') self.start_networkd() self.assertTrue(self.link_exits('tap99')) @expectedFailureIfModuleIsNotAvailable('vrf') def test_vrf(self): self.copy_unit_to_networkd_unit_path('25-vrf.netdev') self.start_networkd() self.assertTrue(self.link_exits('vrf99')) @expectedFailureIfModuleIsNotAvailable('vcan') def test_vcan(self): self.copy_unit_to_networkd_unit_path('25-vcan.netdev') self.start_networkd() self.assertTrue(self.link_exits('vcan99')) @expectedFailureIfModuleIsNotAvailable('wireguard') def test_wireguard(self): self.copy_unit_to_networkd_unit_path('25-wireguard.netdev') self.start_networkd() if shutil.which('wg'): subprocess.call('wg') output = subprocess.check_output(['wg', 'show', 'wg99', 'listen-port']).rstrip().decode('utf-8') self.assertTrue(output, '51820') output = subprocess.check_output(['wg', 'show', 'wg99', 'fwmark']).rstrip().decode('utf-8') self.assertTrue(output, '0x4d2') output = subprocess.check_output(['wg', 'show', 'wg99', 'allowed-ips']).rstrip().decode('utf-8') self.assertTrue(output, 'RDf+LSpeEre7YEIKaxg+wbpsNV7du+ktR99uBEtIiCA=\t192.168.26.0/24 fd31:bf08:57cb::/48') output = subprocess.check_output(['wg', 'show', 'wg99', 'persistent-keepalive']).rstrip().decode('utf-8') self.assertTrue(output, 'RDf+LSpeEre7YEIKaxg+wbpsNV7du+ktR99uBEtIiCA=\t20') output = subprocess.check_output(['wg', 'show', 'wg99', 'endpoints']).rstrip().decode('utf-8') self.assertTrue(output, 'RDf+LSpeEre7YEIKaxg+wbpsNV7du+ktR99uBEtIiCA=\t192.168.27.3:51820') self.assertTrue(self.link_exits('wg99')) @expectedFailureIfModuleIsNotAvailable('wireguard') def test_wireguard_23_peers(self): self.copy_unit_to_networkd_unit_path('25-wireguard-23-peers.netdev', '25-wireguard-23-peers.network') self.start_networkd() if shutil.which('wg'): subprocess.call('wg') self.assertTrue(self.link_exits('wg98')) def test_geneve(self): self.copy_unit_to_networkd_unit_path('25-geneve.netdev') self.start_networkd() self.assertTrue(self.link_exits('geneve99')) output = subprocess.check_output(['ip', '-d', 'link', 'show', 'geneve99']).rstrip().decode('utf-8') print(output) self.assertTrue(output, '192.168.22.1') self.assertTrue(output, '6082') self.assertTrue(output, 'udpcsum') self.assertTrue(output, 'udp6zerocsumrx') def test_ipip_tunnel(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-ipip-tunnel.netdev', 'ipip.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('ipiptun99')) def test_gre_tunnel(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-gre-tunnel.netdev', 'gretun.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('gretun99')) def test_gretap_tunnel(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-gretap-tunnel.netdev', 'gretap.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('gretap99')) def test_ip6gretap_tunnel(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-ip6gre-tunnel.netdev', 'ip6gretap.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('ip6gretap99')) def test_vti_tunnel(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-vti-tunnel.netdev', 'vti.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('vtitun99')) def test_vti6_tunnel(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-vti6-tunnel.netdev', 'vti6.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('vti6tun99')) def test_ip6tnl_tunnel(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-ip6tnl-tunnel.netdev', 'ip6tnl.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('ip6tnl99')) def test_sit_tunnel(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-sit-tunnel.netdev', 'sit.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('sittun99')) def test_isatap_tunnel(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-isatap-tunnel.netdev', 'isatap.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('isataptun99')) output = subprocess.check_output(['ip', '-d', 'link', 'show', 'isataptun99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, "isatap ") def test_6rd_tunnel(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '25-6rd-tunnel.netdev', '6rd.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('sittun99')) @expectedFailureIfERSPANModuleIsNotAvailable() def test_erspan_tunnel(self): self.copy_unit_to_networkd_unit_path('25-erspan-tunnel.netdev') self.start_networkd() self.assertTrue(self.link_exits('erspan-test')) output = subprocess.check_output(['ip', '-d', 'link', 'show', 'erspan-test']).rstrip().decode('utf-8') print(output) self.assertTrue(output, '172.16.1.200') self.assertTrue(output, '172.16.1.100') self.assertTrue(output, '101') def test_tunnel_independent(self): self.copy_unit_to_networkd_unit_path('25-ipip-tunnel-independent.netdev') self.start_networkd() self.assertTrue(self.link_exits('ipiptun99')) def test_vxlan(self): self.copy_unit_to_networkd_unit_path('25-vxlan.netdev', 'vxlan.network','11-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('vxlan99')) output = subprocess.check_output(['ip', '-d', 'link', 'show', 'vxlan99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, "999") self.assertRegex(output, '5555') self.assertRegex(output, 'l2miss') self.assertRegex(output, 'l3miss') self.assertRegex(output, 'udpcsum') self.assertRegex(output, 'udp6zerocsumtx') self.assertRegex(output, 'udp6zerocsumrx') self.assertRegex(output, 'remcsumtx') self.assertRegex(output, 'remcsumrx') self.assertRegex(output, 'gbp') class NetworkdNetWorkTests(unittest.TestCase, Utilities): links = [ 'bond199', 'dummy98', 'dummy99', 'test1'] units = [ '11-dummy.netdev', '12-dummy.netdev', '23-active-slave.network', '23-bond199.network', '23-primary-slave.network', '23-test1-bond199.network', '25-address-link-section.network', '25-address-section-miscellaneous.network', '25-address-section.network', '25-bind-carrier.network', '25-bond-active-backup-slave.netdev', '25-fibrule-invert.network', '25-fibrule-port-range.network', '25-ipv6-address-label-section.network', '25-neighbor-section.network', '25-link-local-addressing-no.network', '25-link-local-addressing-yes.network', '25-link-section-unmanaged.network', '25-route-gateway.network', '25-route-gateway-on-link.network', '25-route-ipv6-src.network', '25-route-reverse-order.network', '25-route-section.network', '25-route-tcp-window-settings.network', '25-route-type.network', '25-sysctl.network', 'configure-without-carrier.network', 'routing-policy-rule.network', 'test-static.network'] def setUp(self): self.link_remove(self.links) def tearDown(self): self.link_remove(self.links) self.remove_unit_from_networkd_path(self.units) def test_static_address(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', 'test-static.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['networkctl', 'status', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.0.15') self.assertRegex(output, '192.168.0.1') self.assertRegex(output, 'routable') def test_configure_without_carrier(self): self.copy_unit_to_networkd_unit_path('configure-without-carrier.network', '11-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('test1')) output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.0.15') self.assertRegex(output, '192.168.0.1') self.assertRegex(output, 'routable') def test_bond_active_slave(self): self.copy_unit_to_networkd_unit_path('23-active-slave.network', '23-bond199.network', '25-bond-active-backup-slave.netdev', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('bond199')) output = subprocess.check_output(['ip', '-d', 'link', 'show', 'bond199']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'active_slave dummy98') def test_bond_primary_slave(self): self.copy_unit_to_networkd_unit_path('23-primary-slave.network', '23-test1-bond199.network', '25-bond-active-backup-slave.netdev', '11-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('test1')) self.assertTrue(self.link_exits('bond199')) output = subprocess.check_output(['ip', '-d', 'link', 'show', 'bond199']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'primary test1') def test_routing_policy_rule(self): self.copy_unit_to_networkd_unit_path('routing-policy-rule.network', '11-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('test1')) output = subprocess.check_output(['ip', 'rule']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '111') self.assertRegex(output, 'from 192.168.100.18') self.assertRegex(output, r'tos (?:0x08|throughput)\s') self.assertRegex(output, 'iif test1') self.assertRegex(output, 'oif test1') self.assertRegex(output, 'lookup 7') subprocess.call(['ip', 'rule', 'del', 'table', '7']) @expectedFailureIfRoutingPolicyPortRangeIsNotAvailable() def test_routing_policy_rule_port_range(self): self.copy_unit_to_networkd_unit_path('25-fibrule-port-range.network', '11-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('test1')) output = subprocess.check_output(['ip', 'rule']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '111') self.assertRegex(output, 'from 192.168.100.18') self.assertRegex(output, '1123-1150') self.assertRegex(output, '3224-3290') self.assertRegex(output, 'tcp') self.assertRegex(output, 'lookup 7') subprocess.call(['ip', 'rule', 'del', 'table', '7']) @expectedFailureIfRoutingPolicyIPProtoIsNotAvailable() def test_routing_policy_rule_invert(self): self.copy_unit_to_networkd_unit_path('25-fibrule-invert.network', '11-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('test1')) output = subprocess.check_output(['ip', 'rule']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '111') self.assertRegex(output, 'not.*?from.*?192.168.100.18') self.assertRegex(output, 'tcp') self.assertRegex(output, 'lookup 7') subprocess.call(['ip', 'rule', 'del', 'table', '7']) def test_address_peer(self): self.copy_unit_to_networkd_unit_path('25-address-section.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['ip', 'address', 'show', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'inet 10.2.3.4 peer 10.2.3.5/16 scope global 32') self.assertRegex(output, 'inet 10.6.7.8/16 brd 10.6.255.255 scope global 33') self.assertRegex(output, 'inet6 2001:db8::20 peer 2001:db8::10/128 scope global') output = subprocess.check_output(['networkctl', 'status', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: routable \(configured\)') def test_address_preferred_lifetime_zero_ipv6(self): self.copy_unit_to_networkd_unit_path('25-address-section-miscellaneous.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['ip', 'address', 'show', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'inet 10.2.3.4/16 brd 10.2.255.255 scope link deprecated dummy98') self.assertRegex(output, 'inet6 2001:db8:0:f101::1/64 scope global') def test_ip_route(self): self.copy_unit_to_networkd_unit_path('25-route-section.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['ip', 'route', 'list', 'dev', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.0.1') self.assertRegex(output, 'static') self.assertRegex(output, '192.168.0.0/24') def test_ip_route_reverse(self): self.copy_unit_to_networkd_unit_path('25-route-reverse-order.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['ip', '-6', 'route', 'show', 'dev', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '2001:1234:5:8fff:ff:ff:ff:ff') self.assertRegex(output, '2001:1234:5:8f63::1') def test_ip_route_blackhole_unreachable_prohibit(self): self.copy_unit_to_networkd_unit_path('25-route-type.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['ip', 'route', 'list']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'blackhole') self.assertRegex(output, 'unreachable') self.assertRegex(output, 'prohibit') subprocess.call(['ip', 'route', 'del', 'blackhole', '202.54.1.2']) subprocess.call(['ip', 'route', 'del', 'unreachable', '202.54.1.3']) subprocess.call(['ip', 'route', 'del', 'prohibit', '202.54.1.4']) def test_ip_route_tcp_window(self): self.copy_unit_to_networkd_unit_path('25-route-tcp-window-settings.network', '11-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('test1')) output = subprocess.check_output(['ip', 'route', 'list']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'initcwnd 20') self.assertRegex(output, 'initrwnd 30') def test_ip_route_gateway(self): self.copy_unit_to_networkd_unit_path('25-route-gateway.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['ip', 'route', 'list', 'dev', 'dummy98', 'default']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'default') self.assertRegex(output, 'via') self.assertRegex(output, '149.10.124.64') self.assertRegex(output, 'proto') self.assertRegex(output, 'static') output = subprocess.check_output(['ip', 'route', 'list', 'dev', 'dummy98', 'src', '149.10.124.58']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '149.10.124.48/28') self.assertRegex(output, 'proto') self.assertRegex(output, 'kernel') self.assertRegex(output, 'scope') self.assertRegex(output, 'link') def test_ip_route_gateway_on_link(self): self.copy_unit_to_networkd_unit_path('25-route-gateway-on-link.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['ip', 'route', 'list', 'dev', 'dummy98', 'default']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'default') self.assertRegex(output, 'via') self.assertRegex(output, '149.10.125.65') self.assertRegex(output, 'proto') self.assertRegex(output, 'static') self.assertRegex(output, 'onlink') output = subprocess.check_output(['ip', 'route', 'list', 'dev', 'dummy98', 'src', '149.10.124.58']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '149.10.124.48/28') self.assertRegex(output, 'proto') self.assertRegex(output, 'kernel') self.assertRegex(output, 'scope') self.assertRegex(output, 'link') def test_ip_route_ipv6_src_route(self): # a dummy device does not make the addresses go through tentative state, so we # reuse a bond from an earlier test, which does make the addresses go through # tentative state, and do our test on that self.copy_unit_to_networkd_unit_path('23-active-slave.network', '25-route-ipv6-src.network', '25-bond-active-backup-slave.netdev', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('bond199')) output = subprocess.check_output(['ip', '-6', 'route', 'list', 'dev', 'bond199']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'abcd::/16') self.assertRegex(output, 'src') self.assertRegex(output, '2001:1234:56:8f63::2') def test_ip_link_mac_address(self): self.copy_unit_to_networkd_unit_path('25-address-link-section.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['ip', 'link', 'show', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '00:01:02:aa:bb:cc') def test_ip_link_unmanaged(self): self.copy_unit_to_networkd_unit_path('25-link-section-unmanaged.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['networkctl', 'status', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'unmanaged') def test_ipv6_address_label(self): self.copy_unit_to_networkd_unit_path('25-ipv6-address-label-section.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['ip', 'addrlabel', 'list']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '2004:da8:1::/64') def test_ipv6_neighbor(self): self.copy_unit_to_networkd_unit_path('25-neighbor-section.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['ip', 'neigh', 'list']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.10.1.*00:00:5e:00:02:65.*PERMANENT') self.assertRegex(output, '2004:da8:1::1.*00:00:5e:00:02:66.*PERMANENT') def test_link_local_addressing(self): self.copy_unit_to_networkd_unit_path('25-link-local-addressing-yes.network', '11-dummy.netdev', '25-link-local-addressing-no.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('test1')) self.assertTrue(self.link_exits('dummy98')) time.sleep(10) output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'test1']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'inet .* scope link') self.assertRegex(output, 'inet6 .* scope link') output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertNotRegex(output, 'inet6* .* scope link') output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: degraded \(configured\)') output = subprocess.check_output(['networkctl', 'status', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: carrier \(configured\)') ''' Documentation/networking/ip-sysctl.txt addr_gen_mode - INTEGER Defines how link-local and autoconf addresses are generated. 0: generate address based on EUI64 (default) 1: do no generate a link-local address, use EUI64 for addresses generated from autoconf 2: generate stable privacy addresses, using the secret from stable_secret (RFC7217) 3: generate stable privacy addresses, using a random secret if unset ''' test1_addr_gen_mode = '' if os.path.exists(os.path.join(os.path.join(network_sysctl_ipv6_path, 'test1'), 'stable_secret')): with open(os.path.join(os.path.join(network_sysctl_ipv6_path, 'test1'), 'stable_secret')) as f: try: f.readline() except IOError: # if stable_secret is unset, then EIO is returned test1_addr_gen_mode = '0' else: test1_addr_gen_mode = '2' else: test1_addr_gen_mode = '0' if os.path.exists(os.path.join(os.path.join(network_sysctl_ipv6_path, 'test1'), 'addr_gen_mode')): self.assertEqual(self.read_ipv6_sysctl_attr('test1', 'addr_gen_mode'), '0') if os.path.exists(os.path.join(os.path.join(network_sysctl_ipv6_path, 'dummy98'), 'addr_gen_mode')): self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'addr_gen_mode'), '1') def test_sysctl(self): self.copy_unit_to_networkd_unit_path('25-sysctl.network', '12-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'forwarding'), '1') self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'use_tempaddr'), '2') self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'dad_transmits'), '3') self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'hop_limit'), '5') self.assertEqual(self.read_ipv6_sysctl_attr('dummy98', 'proxy_ndp'), '1') self.assertEqual(self.read_ipv4_sysctl_attr('dummy98', 'forwarding'),'1') self.assertEqual(self.read_ipv4_sysctl_attr('dummy98', 'proxy_arp'), '1') def test_bind_carrier(self): self.copy_unit_to_networkd_unit_path('25-bind-carrier.network', '11-dummy.netdev') self.start_networkd() self.assertTrue(self.link_exits('test1')) self.assertEqual(subprocess.call(['ip', 'link', 'add', 'dummy98', 'type', 'dummy']), 0) self.assertEqual(subprocess.call(['ip', 'link', 'set', 'dummy98', 'up']), 0) time.sleep(2) output = subprocess.check_output(['ip', 'address', 'show', 'test1']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'UP,LOWER_UP') self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1') output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8') self.assertRegex(output, 'State: routable \(configured\)') self.assertEqual(subprocess.call(['ip', 'link', 'add', 'dummy99', 'type', 'dummy']), 0) self.assertEqual(subprocess.call(['ip', 'link', 'set', 'dummy99', 'up']), 0) time.sleep(2) output = subprocess.check_output(['ip', 'address', 'show', 'test1']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'UP,LOWER_UP') self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1') output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8') self.assertRegex(output, 'State: routable \(configured\)') self.assertEqual(subprocess.call(['ip', 'link', 'del', 'dummy98']), 0) time.sleep(2) output = subprocess.check_output(['ip', 'address', 'show', 'test1']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'UP,LOWER_UP') self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1') output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8') self.assertRegex(output, 'State: routable \(configured\)') self.assertEqual(subprocess.call(['ip', 'link', 'del', 'dummy99']), 0) time.sleep(2) output = subprocess.check_output(['ip', 'address', 'show', 'test1']).rstrip().decode('utf-8') print(output) self.assertNotRegex(output, 'UP,LOWER_UP') self.assertRegex(output, 'DOWN') self.assertNotRegex(output, '192.168.10') output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8') self.assertRegex(output, 'State: off \(configured\)') self.assertEqual(subprocess.call(['ip', 'link', 'add', 'dummy98', 'type', 'dummy']), 0) self.assertEqual(subprocess.call(['ip', 'link', 'set', 'dummy98', 'up']), 0) time.sleep(2) output = subprocess.check_output(['ip', 'address', 'show', 'test1']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'UP,LOWER_UP') self.assertRegex(output, 'inet 192.168.10.30/24 brd 192.168.10.255 scope global test1') output = subprocess.check_output(['networkctl', 'status', 'test1']).rstrip().decode('utf-8') self.assertRegex(output, 'State: routable \(configured\)') class NetworkdNetWorkBondTests(unittest.TestCase, Utilities): links = [ 'bond99', 'veth99'] units = [ '25-bond.netdev', '25-veth.netdev', 'bond99.network', 'dhcp-server.network', 'veth-bond.network'] def setUp(self): self.link_remove(self.links) def tearDown(self): self.link_remove(self.links) self.remove_unit_from_networkd_path(self.units) def test_bridge_property(self): self.copy_unit_to_networkd_unit_path('25-bond.netdev', '25-veth.netdev', 'bond99.network', 'dhcp-server.network', 'veth-bond.network') self.start_networkd() self.assertTrue(self.link_exits('bond99')) self.assertTrue(self.link_exits('veth99')) self.assertTrue(self.link_exits('veth-peer')) output = subprocess.check_output(['ip', '-d', 'link', 'show', 'veth-peer']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'UP,LOWER_UP') output = subprocess.check_output(['ip', '-d', 'link', 'show', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'SLAVE,UP,LOWER_UP') output = subprocess.check_output(['ip', '-d', 'link', 'show', 'bond99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'MASTER,UP,LOWER_UP') output = subprocess.check_output(['networkctl', 'status', 'veth-peer']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: routable \(configured\)') output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: enslaved \(configured\)') output = subprocess.check_output(['networkctl', 'status', 'bond99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: routable \(configured\)') self.assertEqual(subprocess.call(['ip', 'link', 'set', 'veth99', 'down']), 0) time.sleep(2) output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: off \(configured\)') output = subprocess.check_output(['networkctl', 'status', 'bond99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: degraded \(configured\)') self.assertEqual(subprocess.call(['ip', 'link', 'set', 'veth99', 'up']), 0) time.sleep(2) output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: enslaved \(configured\)') output = subprocess.check_output(['networkctl', 'status', 'bond99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: routable \(configured\)') class NetworkdNetWorkBridgeTests(unittest.TestCase, Utilities): links = [ 'bridge99', 'dummy98', 'test1'] units = [ '11-dummy.netdev', '12-dummy.netdev', '26-bridge.netdev', '26-bridge-slave-interface-1.network', '26-bridge-slave-interface-2.network', 'bridge99-ignore-carrier-loss.network', 'bridge99.network'] def setUp(self): self.link_remove(self.links) def tearDown(self): self.link_remove(self.links) self.remove_unit_from_networkd_path(self.units) def test_bridge_property(self): self.copy_unit_to_networkd_unit_path('11-dummy.netdev', '12-dummy.netdev', '26-bridge.netdev', '26-bridge-slave-interface-1.network', '26-bridge-slave-interface-2.network', 'bridge99.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('test1')) self.assertTrue(self.link_exits('bridge99')) output = subprocess.check_output(['ip', '-d', 'link', 'show', 'test1']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'master') self.assertRegex(output, 'bridge') output = subprocess.check_output(['ip', '-d', 'link', 'show', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'master') self.assertRegex(output, 'bridge') output = subprocess.check_output(['ip', 'addr', 'show', 'bridge99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.0.15') self.assertRegex(output, '192.168.0.1') output = subprocess.check_output(['bridge', '-d', 'link', 'show', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertEqual(self.read_bridge_port_attr('bridge99', 'dummy98', 'hairpin_mode'), '1') self.assertEqual(self.read_bridge_port_attr('bridge99', 'dummy98', 'path_cost'), '400') self.assertEqual(self.read_bridge_port_attr('bridge99', 'dummy98', 'unicast_flood'), '1') self.assertEqual(self.read_bridge_port_attr('bridge99', 'dummy98', 'multicast_fast_leave'), '1') # CONFIG_BRIDGE_IGMP_SNOOPING=y if (os.path.exists('/sys/devices/virtual/net/bridge00/lower_dummy98/brport/multicast_to_unicast')): self.assertEqual(self.read_bridge_port_attr('bridge99', 'dummy98', 'multicast_to_unicast'), '1') self.assertEqual(subprocess.call(['ip', 'address', 'add', '192.168.0.16/24', 'dev', 'bridge99']), 0) time.sleep(1) self.assertEqual(subprocess.call(['ip', 'link', 'del', 'test1']), 0) self.assertEqual(subprocess.call(['ip', 'link', 'del', 'dummy98']), 0) time.sleep(3) output = subprocess.check_output(['ip', 'address', 'show', 'bridge99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'NO-CARRIER') self.assertNotRegex(output, '192.168.0.15/24') self.assertNotRegex(output, '192.168.0.16/24') def test_bridge_ignore_carrier_loss(self): self.copy_unit_to_networkd_unit_path('11-dummy.netdev', '12-dummy.netdev', '26-bridge.netdev', '26-bridge-slave-interface-1.network', '26-bridge-slave-interface-2.network', 'bridge99-ignore-carrier-loss.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) self.assertTrue(self.link_exits('test1')) self.assertTrue(self.link_exits('bridge99')) self.assertEqual(subprocess.call(['ip', 'address', 'add', '192.168.0.16/24', 'dev', 'bridge99']), 0) time.sleep(1) self.assertEqual(subprocess.call(['ip', 'link', 'del', 'test1']), 0) self.assertEqual(subprocess.call(['ip', 'link', 'del', 'dummy98']), 0) time.sleep(3) output = subprocess.check_output(['ip', 'address', 'show', 'bridge99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'NO-CARRIER') self.assertRegex(output, 'inet 192.168.0.15/24 brd 192.168.0.255 scope global bridge99') self.assertRegex(output, 'inet 192.168.0.16/24 scope global secondary bridge99') class NetworkdNetWorkLLDPTests(unittest.TestCase, Utilities): links = ['veth99'] units = [ '23-emit-lldp.network', '24-lldp.network', '25-veth.netdev'] def setUp(self): self.link_remove(self.links) def tearDown(self): self.link_remove(self.links) self.remove_unit_from_networkd_path(self.units) def test_lldp(self): self.copy_unit_to_networkd_unit_path('23-emit-lldp.network', '24-lldp.network', '25-veth.netdev') self.start_networkd() self.assertTrue(self.link_exits('veth99')) output = subprocess.check_output(['networkctl', 'lldp']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'veth-peer') self.assertRegex(output, 'veth99') class NetworkdNetworkRATests(unittest.TestCase, Utilities): links = ['veth99'] units = [ '25-veth.netdev', 'ipv6-prefix.network', 'ipv6-prefix-veth.network'] def setUp(self): self.link_remove(self.links) def tearDown(self): self.link_remove(self.links) self.remove_unit_from_networkd_path(self.units) def test_ipv6_prefix_delegation(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'ipv6-prefix.network', 'ipv6-prefix-veth.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '2002:da8:1:0') class NetworkdNetworkDHCPServerTests(unittest.TestCase, Utilities): links = [ 'dummy98', 'veth99'] units = [ '12-dummy.netdev', '24-search-domain.network', '25-veth.netdev', 'dhcp-client.network', 'dhcp-client-timezone-router.network', 'dhcp-server.network', 'dhcp-server-timezone-router.network'] def setUp(self): self.link_remove(self.links) def tearDown(self): self.link_remove(self.links) self.remove_unit_from_networkd_path(self.units) def test_dhcp_server(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-client.network', 'dhcp-server.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.5.*') self.assertRegex(output, 'Gateway: 192.168.5.1') self.assertRegex(output, 'DNS: 192.168.5.1') self.assertRegex(output, 'NTP: 192.168.5.1') def test_domain(self): self.copy_unit_to_networkd_unit_path('12-dummy.netdev', '24-search-domain.network') self.start_networkd() self.assertTrue(self.link_exits('dummy98')) output = subprocess.check_output(['networkctl', 'status', 'dummy98']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'Address: 192.168.42.100') self.assertRegex(output, 'DNS: 192.168.42.1') self.assertRegex(output, 'Search Domains: one') def test_emit_router_timezone(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-client-timezone-router.network', 'dhcp-server-timezone-router.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'Gateway: 192.168.5.*') self.assertRegex(output, '192.168.5.*') self.assertRegex(output, 'Europe/Berlin') class NetworkdNetworkDHCPClientTests(unittest.TestCase, Utilities): links = [ 'dummy98', 'veth99', 'vrf99'] units = [ '25-veth.netdev', '25-vrf.netdev', '25-vrf.network', 'dhcp-client-anonymize.network', 'dhcp-client-critical-connection.network', 'dhcp-client-ipv4-dhcp-settings.network', 'dhcp-client-ipv4-only-ipv6-disabled.network', 'dhcp-client-ipv4-only.network', 'dhcp-client-ipv6-only.network', 'dhcp-client-ipv6-rapid-commit.network', 'dhcp-client-listen-port.network', 'dhcp-client-route-metric.network', 'dhcp-client-route-table.network', 'dhcp-client-vrf.network', 'dhcp-client.network', 'dhcp-server-veth-peer.network', 'dhcp-v4-server-veth-peer.network', 'static.network'] def setUp(self): self.link_remove(self.links) self.stop_dnsmasq(dnsmasq_pid_file) def tearDown(self): self.link_remove(self.links) self.remove_unit_from_networkd_path(self.units) self.stop_dnsmasq(dnsmasq_pid_file) self.remove_lease_file() self.remove_log_file() def test_dhcp_client_ipv6_only(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-only.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '2600::') self.assertNotRegex(output, '192.168.5') def test_dhcp_client_ipv4_only(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv4-only-ipv6-disabled.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertNotRegex(output, '2600::') self.assertRegex(output, '192.168.5') def test_dhcp_client_ipv4_ipv6(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-only.network', 'dhcp-client-ipv4-only.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '2600::') self.assertRegex(output, '192.168.5') def test_dhcp_client_settings(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv4-dhcp-settings.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() print('## ip address show dev veth99') output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '12:34:56:78:9a:bc') self.assertRegex(output, '192.168.5') self.assertRegex(output, '1492') # issue #8726 print('## ip route show table main dev veth99') output = subprocess.check_output(['ip', 'route', 'show', 'table', 'main', 'dev', 'veth99']).rstrip().decode('utf-8') print(output) self.assertNotRegex(output, 'proto dhcp') print('## ip route show table 211 dev veth99') output = subprocess.check_output(['ip', 'route', 'show', 'table', '211', 'dev', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'default via 192.168.5.1 proto dhcp') self.assertRegex(output, '192.168.5.0/24 via 192.168.5.5 proto dhcp') self.assertRegex(output, '192.168.5.1 proto dhcp scope link') print('## dnsmasq log') self.assertTrue(self.search_words_in_dnsmasq_log('vendor class: SusantVendorTest', True)) self.assertTrue(self.search_words_in_dnsmasq_log('DHCPDISCOVER(veth-peer) 12:34:56:78:9a:bc')) self.assertTrue(self.search_words_in_dnsmasq_log('client provides name: test-hostname')) self.assertTrue(self.search_words_in_dnsmasq_log('26:mtu')) def test_dhcp6_client_settings_rapidcommit_true(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-only.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '12:34:56:78:9a:bc') self.assertTrue(self.search_words_in_dnsmasq_log('14:rapid-commit', True)) def test_dhcp6_client_settings_rapidcommit_false(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-ipv6-rapid-commit.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '12:34:56:78:9a:bc') self.assertFalse(self.search_words_in_dnsmasq_log('14:rapid-commit', True)) def test_dhcp_client_settings_anonymize(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-anonymize.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() self.assertFalse(self.search_words_in_dnsmasq_log('VendorClassIdentifier=SusantVendorTest', True)) self.assertFalse(self.search_words_in_dnsmasq_log('test-hostname')) self.assertFalse(self.search_words_in_dnsmasq_log('26:mtu')) def test_dhcp_client_listen_port(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-listen-port.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq('--dhcp-alternate-port=67,5555') output = subprocess.check_output(['ip', '-4', 'address', 'show', 'dev', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.5.* dynamic') def test_dhcp_route_table_id(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network', 'dhcp-client-route-table.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() output = subprocess.check_output(['ip', 'route', 'show', 'table', '12']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'veth99 proto dhcp') self.assertRegex(output, '192.168.5.1') def test_dhcp_route_metric(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network', 'dhcp-client-route-metric.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() output = subprocess.check_output(['ip', 'route', 'show', 'dev', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'metric 24') def test_dhcp_route_criticalconnection_true(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-v4-server-veth-peer.network', 'dhcp-client-critical-connection.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.5.*') # Stoping dnsmasq as networkd won't be allowed to renew the DHCP lease. self.stop_dnsmasq(dnsmasq_pid_file) # Sleep for 120 sec as the dnsmasq minimum lease time can only be set to 120 time.sleep(125) output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.5.*') def test_dhcp_client_reuse_address_as_static(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.start_dnsmasq() output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'veth99', 'scope', 'global']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.5') self.assertRegex(output, '2600::') ipv4_address = re.search('192\.168\.5\.[0-9]*/24', output) ipv6_address = re.search('2600::[0-9a-f:]*/128', output) static_network = '\n'.join(['[Match]', 'Name=veth99', '[Network]', 'IPv6AcceptRA=no', 'Address=' + ipv4_address.group(), 'Address=' + ipv6_address.group()]) print(static_network) self.remove_unit_from_networkd_path(['dhcp-client.network']) with open(os.path.join(network_unit_file_path, 'static.network'), mode='w') as f: f.write(static_network) self.start_networkd() self.assertTrue(self.link_exits('veth99')) output = subprocess.check_output(['ip', '-4', 'address', 'show', 'dev', 'veth99', 'scope', 'global']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '192.168.5') self.assertRegex(output, 'valid_lft forever preferred_lft forever') output = subprocess.check_output(['ip', '-6', 'address', 'show', 'dev', 'veth99', 'scope', 'global']).rstrip().decode('utf-8') print(output) self.assertRegex(output, '2600::') self.assertRegex(output, 'valid_lft forever preferred_lft forever') @expectedFailureIfModuleIsNotAvailable('vrf') def test_dhcp_client_vrf(self): self.copy_unit_to_networkd_unit_path('25-veth.netdev', 'dhcp-server-veth-peer.network', 'dhcp-client-vrf.network', '25-vrf.netdev', '25-vrf.network') self.start_networkd() self.assertTrue(self.link_exits('veth99')) self.assertTrue(self.link_exits('vrf99')) self.start_dnsmasq() print('## ip -d link show dev vrf99') output = subprocess.check_output(['ip', '-d', 'link', 'show', 'dev', 'vrf99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'vrf table 42') print('## ip address show vrf vrf99') output_ip_vrf = subprocess.check_output(['ip', 'address', 'show', 'vrf', 'vrf99']).rstrip().decode('utf-8') print(output_ip_vrf) print('## ip address show dev veth99') output = subprocess.check_output(['ip', 'address', 'show', 'dev', 'veth99']).rstrip().decode('utf-8') print(output) self.assertEqual(output, output_ip_vrf) self.assertRegex(output, 'inet 169.254.[0-9]*.[0-9]*/16 brd 169.254.255.255 scope link veth99') self.assertRegex(output, 'inet 192.168.5.[0-9]*/24 brd 192.168.5.255 scope global dynamic veth99') self.assertRegex(output, 'inet6 2600::[0-9a-f]*/128 scope global dynamic noprefixroute') self.assertRegex(output, 'inet6 .* scope link') print('## ip route show vrf vrf99') output = subprocess.check_output(['ip', 'route', 'show', 'vrf', 'vrf99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'default via 192.168.5.1 dev veth99 proto dhcp src 192.168.5.') self.assertRegex(output, 'default dev veth99 proto static scope link') self.assertRegex(output, '169.254.0.0/16 dev veth99 proto kernel scope link src 169.254') self.assertRegex(output, '192.168.5.0/24 dev veth99 proto kernel scope link src 192.168.5') self.assertRegex(output, '192.168.5.0/24 via 192.168.5.5 dev veth99 proto dhcp') self.assertRegex(output, '192.168.5.1 dev veth99 proto dhcp scope link src 192.168.5') print('## ip route show table main dev veth99') output = subprocess.check_output(['ip', 'route', 'show', 'table', 'main', 'dev', 'veth99']).rstrip().decode('utf-8') print(output) self.assertEqual(output, '') output = subprocess.check_output(['networkctl', 'status', 'vrf99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: carrier \(configured\)') output = subprocess.check_output(['networkctl', 'status', 'veth99']).rstrip().decode('utf-8') print(output) self.assertRegex(output, 'State: routable \(configured\)') if __name__ == '__main__': unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout, verbosity=3))
xujb/odoo
refs/heads/8.0
addons/hr_attendance/wizard/hr_attendance_error.py
377
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv from openerp.tools.translate import _ class hr_attendance_error(osv.osv_memory): _name = 'hr.attendance.error' _description = 'Print Error Attendance Report' _columns = { 'init_date': fields.date('Starting Date', required=True), 'end_date': fields.date('Ending Date', required=True), 'max_delay': fields.integer('Max. Delay (Min)', required=True) } _defaults = { 'init_date': lambda *a: time.strftime('%Y-%m-%d'), 'end_date': lambda *a: time.strftime('%Y-%m-%d'), 'max_delay': 120, } def print_report(self, cr, uid, ids, context=None): emp_ids = [] data_error = self.read(cr, uid, ids, context=context)[0] date_from = data_error['init_date'] date_to = data_error['end_date'] cr.execute("SELECT id FROM hr_attendance WHERE employee_id IN %s AND to_char(name,'YYYY-mm-dd')<=%s AND to_char(name,'YYYY-mm-dd')>=%s AND action IN %s ORDER BY name" ,(tuple(context['active_ids']), date_to, date_from, tuple(['sign_in','sign_out']))) attendance_ids = [x[0] for x in cr.fetchall()] if not attendance_ids: raise osv.except_osv(_('No Data Available!'), _('No records are found for your selection!')) attendance_records = self.pool.get('hr.attendance').browse(cr, uid, attendance_ids, context=context) for rec in attendance_records: if rec.employee_id.id not in emp_ids: emp_ids.append(rec.employee_id.id) data_error['emp_ids'] = emp_ids datas = { 'ids': [], 'model': 'hr.employee', 'form': data_error } return self.pool['report'].get_action( cr, uid, [], 'hr_attendance.report_attendanceerrors', data=datas, context=context ) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
webu/django-categories
refs/heads/master
doc_src/code_examples/custom_categories6.py
13
class CategoryAdminForm(CategoryBaseAdminForm): class Meta: model = Category def clean_alternate_title(self): if self.instance is None or not self.cleaned_data['alternate_title']: return self.cleaned_data['name'] else: return self.cleaned_data['alternate_title']
tawsifkhan/scikit-learn
refs/heads/master
doc/conf.py
210
# -*- coding: utf-8 -*- # # scikit-learn documentation build configuration file, created by # sphinx-quickstart on Fri Jan 8 09:13:42 2010. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from __future__ import print_function import sys import os from sklearn.externals.six import u # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.insert(0, os.path.abspath('sphinxext')) from github_link import make_linkcode_resolve # -- General configuration --------------------------------------------------- # Try to override the matplotlib configuration as early as possible try: import gen_rst except: pass # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['gen_rst', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.pngmath', 'numpy_ext.numpydoc', 'sphinx.ext.linkcode', ] autosummary_generate = True autodoc_default_flags = ['members', 'inherited-members'] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # generate autosummary even if no references autosummary_generate = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # Generate the plots for the gallery plot_gallery = True # The master toctree document. master_doc = 'index' # General information about the project. project = u('scikit-learn') copyright = u('2010 - 2014, scikit-learn developers (BSD License)') # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. import sklearn version = sklearn.__version__ # The full version, including alpha/beta/rc tags. release = sklearn.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be # searched for source files. exclude_trees = ['_build', 'templates', 'includes'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'scikit-learn' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {'oldversion': False, 'collapsiblesidebar': True, 'google_analytics': True, 'surveybanner': False, 'sprintbanner': True} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'scikit-learn' # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'logos/scikit-learn-logo-small.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'logos/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['images'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'scikit-learndoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'), u('scikit-learn developers'), 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = "logos/scikit-learn-logo.png" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. latex_preamble = r""" \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats} \usepackage{enumitem} \setlistdepth{10} """ # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False trim_doctests_flags = True def generate_example_rst(app, what, name, obj, options, lines): # generate empty examples files, so that we don't get # inclusion errors if there are no examples for a class / module examples_path = os.path.join(app.srcdir, "modules", "generated", "%s.examples" % name) if not os.path.exists(examples_path): # touch file open(examples_path, 'w').close() def setup(app): # to hide/show the prompt in code examples: app.add_javascript('js/copybutton.js') app.connect('autodoc-process-docstring', generate_example_rst) # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve('sklearn', u'https://github.com/scikit-learn/' 'scikit-learn/blob/{revision}/' '{package}/{path}#L{lineno}')
openstack/cinder
refs/heads/master
cinder/tests/unit/volume/drivers/test_spdk.py
2
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import json from unittest import mock from os_brick import initiator from os_brick.initiator import connector from oslo_utils import timeutils from oslo_utils import units from cinder import context from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers import spdk as spdk_driver BDEVS = [{ "num_blocks": 4096000, "name": "Nvme0n1", "driver_specific": { "nvme": { "trid": { "trtype": "PCIe", "traddr": "0000:00:04.0" }, "ns_data": { "id": 1 }, "pci_address": "0000:00:04.0", "vs": { "nvme_version": "1.1" }, "ctrlr_data": { "firmware_revision": "1.0", "serial_number": "deadbeef", "oacs": { "ns_manage": 0, "security": 0, "firmware": 0, "format": 0 }, "vendor_id": "0x8086", "model_number": "QEMU NVMe Ctrl" }, "csts": { "rdy": 1, "cfs": 0 } } }, "supported_io_types": { "reset": True, "nvme_admin": True, "unmap": False, "read": True, "write_zeroes": False, "write": True, "flush": True, "nvme_io": True }, "claimed": False, "block_size": 512, "product_name": "NVMe disk", "aliases": ["Nvme0n1"] }, { "num_blocks": 8192, "uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d", "aliases": [ "Nvme0n1p0" ], "driver_specific": { "lvol": { "base_bdev": "Nvme0n1", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": False } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Split Disk", "name": "Nvme0n1p0" }, { "num_blocks": 8192, "uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d", "aliases": [ "Nvme0n1p1" ], "driver_specific": { "lvol": { "base_bdev": "Nvme0n1", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": False } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Split Disk", "name": "Nvme0n1p1" }, { "num_blocks": 8192, "uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d", "aliases": [ "lvs_test/lvol0" ], "driver_specific": { "lvol": { "base_bdev": "Malloc0", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": False } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Logical Volume", "name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297" }, { "num_blocks": 8192, "uuid": "8dec1964-d533-41df-bea7-40520efdb416", "aliases": [ "lvs_test/lvol1" ], "driver_specific": { "lvol": { "base_bdev": "Malloc0", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": True } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Logical Volume", "name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967298" }] LVOL_STORES = [{ "uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "base_bdev": "Nvme0n1", "free_clusters": 5976, "cluster_size": 1048576, "total_data_clusters": 5976, "block_size": 4096, "name": "lvs_test" }] NVMF_SUBSYSTEMS = [{ "listen_addresses": [], "subtype": "Discovery", "nqn": "nqn.2014-08.org.nvmexpress.discovery", "hosts": [], "allow_any_host": True }, { "listen_addresses": [], "subtype": "NVMe", "hosts": [{ "nqn": "nqn.2016-06.io.spdk:init" }], "namespaces": [{ "bdev_name": "Nvme0n1p0", "nsid": 1, "name": "Nvme0n1p0" }], "allow_any_host": False, "serial_number": "SPDK00000000000001", "nqn": "nqn.2016-06.io.spdk:cnode1" }, { "listen_addresses": [], "subtype": "NVMe", "hosts": [], "namespaces": [{ "bdev_name": "Nvme1n1p0", "nsid": 1, "name": "Nvme1n1p0" }], "allow_any_host": True, "serial_number": "SPDK00000000000002", "nqn": "nqn.2016-06.io.spdk:cnode2" }] class Volume(object): def __init__(self): self.size = 1 self.name = "lvol2" class Snapshot(object): def __init__(self): self.name = "snapshot0" self.volume_size = 1 class JSONRPCException(Exception): def __init__(self, message): self.message = message class JSONRPCClient(object): def __init__(self, addr=None, port=None): self.methods = {"bdev_get_bdevs": self.get_bdevs, "bdev_lvol_get_lvstores": self.get_lvol_stores, "bdev_lvol_delete": self.destroy_lvol_bdev, "bdev_lvol_snapshot": self.snapshot_lvol_bdev, "bdev_lvol_clone": self.clone_lvol_bdev, "bdev_lvol_create": self.construct_lvol_bdev, "bdev_lvol_resize": self.resize_lvol_bdev, "nvmf_get_subsystems": self.get_nvmf_subsystems, "construct_nvmf_subsystem": self.construct_nvmf_subsystem, "nvmf_create_subsystem": self.nvmf_subsystem_create, "nvmf_subsystem_add_listener": self.nvmf_subsystem_add_listener, "nvmf_subsystem_add_ns": self.nvmf_subsystem_add_ns, "bdev_lvol_inflate": self.inflate_lvol_bdev} self.bdevs = copy.deepcopy(BDEVS) self.nvmf_subsystems = copy.deepcopy(NVMF_SUBSYSTEMS) self.lvol_stores = copy.deepcopy(LVOL_STORES) def get_bdevs(self, params=None): if params and 'name' in params: for bdev in self.bdevs: for alias in bdev['aliases']: if params['name'] in alias: return json.dumps({"result": [bdev]}) if bdev['name'] == params['name']: return json.dumps({"result": [bdev]}) return json.dumps({"error": "Not found"}) return json.dumps({"result": self.bdevs}) def destroy_lvol_bdev(self, params=None): if 'name' not in params: return json.dumps({}) i = 0 found_bdev = -1 for bdev in self.bdevs: if bdev['name'] == params['name']: found_bdev = i break i += 1 if found_bdev != -1: del self.bdevs[found_bdev] return json.dumps({"result": {}}) def get_lvol_stores(self, params=None): return json.dumps({"result": self.lvol_stores}) def snapshot_lvol_bdev(self, params=None): snapshot = { 'num_blocks': 5376, 'name': '58b17014-d4a1-4f85-9761-093643ed18f2', 'aliases': ['lvs_test/%s' % params['snapshot_name']], 'driver_specific': { 'lvol': { 'base_bdev': u'Malloc0', 'lvol_store_uuid': u'58b17014-d4a1-4f85-9761-093643ed18f1', 'thin_provision': False, 'clones': ['clone0', 'clone1'] } }, 'claimed': False, 'block_size': 4096, 'product_name': 'Logical Volume', 'supported_io_types': { 'reset': True, 'nvme_admin': False, 'unmap': True, 'read': True, 'write_zeroes': True, 'write': True, 'flush': False, 'nvme_io': False } } self.bdevs.append(snapshot) return json.dumps({"result": [snapshot]}) def clone_lvol_bdev(self, params=None): clone = { 'num_blocks': 7936, 'supported_io_types': { 'reset': True, 'nvme_admin': False, 'unmap': True, 'read': True, 'write_zeroes': True, 'write': True, 'flush': False, 'nvme_io': False }, 'name': '3735a554-0dce-4d13-ba67-597d41186104', 'driver_specific': { 'lvol': { 'base_bdev': 'Malloc0', 'lvol_store_uuid': '58b17014-d4a1-4f85-9761-093643ed18f1', 'thin_provision': False } }, 'block_size': 4096, 'claimed': False, 'aliases': [u'lvs_test/%s' % params['clone_name']], 'product_name': 'Logical Volume', 'uuid': '3735a554-0dce-4d13-ba67-597d41186104' } self.bdevs.append(clone) return json.dumps({"result": [clone]}) def construct_lvol_bdev(self, params=None): lvol_bdev = { "num_blocks": 8192, "uuid": "8dec1964-d533-41df-bea7-40520efdb416", "aliases": [ "lvs_test/%s" % params['lvol_name'] ], "driver_specific": { "lvol": { "base_bdev": "Malloc0", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": True } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Logical Volume", "name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967299" } self.bdevs.append(lvol_bdev) return json.dumps({"result": [{}]}) def get_nvmf_subsystems(self, params=None): return json.dumps({"result": self.nvmf_subsystems}) def resize_lvol_bdev(self, params=None): if params: if "name" in params: tmp_bdev = json.loads( self.get_bdevs(params={"name": params['name']}))['result'] if "size" in params: for bdev in self.bdevs: if bdev['name'] == tmp_bdev[0]['name']: bdev['num_blocks'] = params['size'] \ / bdev['block_size'] return json.dumps({"result": {}}) return json.dumps({"error": {}}) def inflate_lvol_bdev(self, params=None): return json.dumps({'result': {}}) def construct_nvmf_subsystem(self, params=None): nvmf_subsystem = { "listen_addresses": [], "subtype": "NVMe", "hosts": [], "namespaces": [{ "bdev_name": "Nvme1n1p0", "nsid": 1, "name": "Nvme1n1p0" }], "allow_any_host": True, "serial_number": params['serial_number'], "nqn": params['nqn'] } self.nvmf_subsystems.append(nvmf_subsystem) return json.dumps({"result": nvmf_subsystem}) def nvmf_subsystem_create(self, params=None): nvmf_subsystem = { "namespaces": [], "nqn": params['nqn'], "serial_number": "S0000000000000000001", "allow_any_host": False, "subtype": "NVMe", "hosts": [], "listen_addresses": [] } self.nvmf_subsystems.append(nvmf_subsystem) return json.dumps({"result": nvmf_subsystem}) def nvmf_subsystem_add_listener(self, params=None): for nvmf_subsystem in self.nvmf_subsystems: if nvmf_subsystem['nqn'] == params['nqn']: nvmf_subsystem['listen_addresses'].append( params['listen_address'] ) return json.dumps({"result": ""}) def nvmf_subsystem_add_ns(self, params=None): for nvmf_subsystem in self.nvmf_subsystems: if nvmf_subsystem['nqn'] == params['nqn']: nvmf_subsystem['namespaces'].append( params['namespace'] ) return json.dumps({"result": ""}) def call(self, method, params=None): req = {} req['jsonrpc'] = '2.0' req['method'] = method req['id'] = 1 if (params): req['params'] = params response = json.loads(self.methods[method](params)) if not response: if method == "kill_instance": return {} msg = "Timeout while waiting for response:" raise JSONRPCException(msg) if 'error' in response: msg = "\n".join(["Got JSON-RPC error response", "request:", json.dumps(req, indent=2), "response:", json.dumps(response['error'], indent=2)]) raise JSONRPCException(msg) return response['result'] class SpdkDriverTestCase(test.TestCase): def setUp(self): super(SpdkDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.target_helper = "" self.configuration.target_ip_address = "192.168.0.1" self.configuration.target_port = 4420 self.configuration.target_prefix = "nqn.2014-08.io.spdk" self.configuration.nvmet_port_id = "1" self.configuration.nvmet_ns_id = "fake_id" self.configuration.nvmet_subsystem_name = "2014-08.io.spdk" self.configuration.target_protocol = "nvmet_rdma" self.configuration.spdk_rpc_ip = "127.0.0.1" self.configuration.spdk_rpc_port = 8000 self.configuration.spdk_rpc_protocol = "https" mock_safe_get = mock.Mock() mock_safe_get.return_value = 'spdk-nvmeof' self.configuration.safe_get = mock_safe_get self.jsonrpcclient = JSONRPCClient() self.driver = spdk_driver.SPDKDriver(configuration= self.configuration) self._context = context.get_admin_context() self.updated_at = timeutils.utcnow() def test__update_volume_stats(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): self.driver._update_volume_stats() self.assertEqual(1, len(self.driver._stats['pools'])) self.assertEqual("lvs_test", self.driver._stats['pools'][0]['pool_name']) self.assertEqual('SPDK', self.driver._stats['volume_backend_name']) self.assertEqual('Open Source', self.driver._stats['vendor_name']) self.assertEqual('NVMe-oF', self.driver._stats['storage_protocol']) self.assertIsNotNone(self.driver._stats['driver_version']) def test__get_spdk_volume_name(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): bdev = self.driver._get_spdk_volume_name("lvs_test/lvol0") self.assertEqual('58b17014-d4a1-4f85-9761' '-093643ed18f1_4294967297', bdev) bdev = self.driver._get_spdk_volume_name("Nvme1n1") self.assertIsNone(bdev) def test__get_spdk_lvs_uuid(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): bdev = self.driver._rpc_call( "bdev_get_bdevs", params={"name": "lvs_test/lvol0"}) self.assertEqual( bdev[0]['driver_specific']['lvol']['lvol_store_uuid'], self.driver._get_spdk_lvs_uuid( "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297")) self.assertIsNone( self.driver._get_spdk_lvs_uuid("lvs_test/fake")) def test__get_spdk_lvs_free_space(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): lvs = self.driver._rpc_call("bdev_lvol_get_lvstores") lvol_store = None for lvol in lvs: if lvol['name'] == "lvs_test": lvol_store = lvol self.assertIsNotNone(lvol_store) free_size = (lvol_store['free_clusters'] * lvol_store['cluster_size'] / units.Gi) self.assertEqual(free_size, self.driver._get_spdk_lvs_free_space( "58b17014-d4a1-4f85-9761-093643ed18f1")) self.assertEqual(0, self.driver._get_spdk_lvs_free_space("fake")) def test__delete_bdev(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): self.driver._delete_bdev("lvs_test/lvol1") bdev = self.driver._get_spdk_volume_name("lvs_test/lvol1") self.assertIsNone(bdev) self.driver._delete_bdev("lvs_test/lvol1") bdev = self.driver._get_spdk_volume_name("lvs_test/lvol1") self.assertIsNone(bdev) def test__create_volume(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): self.driver._create_volume(Volume()) bdev = self.driver._get_spdk_volume_name("lvs_test/lvol2") self.assertEqual("58b17014-d4a1-4f85-9761" "-093643ed18f1_4294967299", bdev) volume_clone = Volume() volume_clone.name = "clone0" self.driver._rpc_call("bdev_lvol_snapshot", params={'snapshot_name': "snapshot0", 'lvol_name': "lvs_test/lvol2"}) bdev = self.driver._get_spdk_volume_name("lvs_test/snapshot0") self.assertEqual("58b17014-d4a1-4f85-9761-093643ed18f2", bdev) snapshot = Snapshot() self.driver._create_volume(volume_clone, snapshot) bdev = self.driver._get_spdk_volume_name("lvs_test/clone0") self.assertEqual("3735a554-0dce-4d13-ba67-597d41186104", bdev) def test_check_for_setup_error(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): self.driver.check_for_setup_error() @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_create_volume(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) volume_get.return_value = db_volume self.driver.create_volume(db_volume) bdev = self.driver._get_spdk_volume_name("lvs_test/%s" % db_volume.name) self.assertEqual("58b17014-d4a1-4f85-9761" "-093643ed18f1_4294967299", bdev) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_delete_volume(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() with mock.patch.object(self.driver.target_driver, "_rpc_call", self.jsonrpcclient.call): nqn = "nqn.2016-06.io.spdk:cnode%s" \ % self.driver.target_driver._get_first_free_node() db_volume['provider_id'] = nqn ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) volume_get.return_value = db_volume start_bdevs_len = len(self.driver._rpc_call('bdev_get_bdevs')) self.driver.create_volume(db_volume) tmp_bdevs = self.driver._rpc_call('bdev_get_bdevs') self.assertEqual(start_bdevs_len + 1, len(tmp_bdevs)) volume = Volume() volume.name = "lvs_test/%s" % db_volume.name volume_name = self.driver._get_spdk_volume_name(volume.name) self.driver._rpc_call('bdev_lvol_delete', {"name": volume_name}) self.driver.delete_volume(volume) bdev = self.driver._get_spdk_volume_name("lvs_test/%s" % db_volume.name) self.assertIsNone(bdev) tmp_bdevs = self.driver._rpc_call('bdev_get_bdevs') self.assertEqual(start_bdevs_len, len(tmp_bdevs)) def get_volume_stats(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): self.driver.get_volume_stats(True) self.driver.get_volume_stats(False) def test_create_volume_from_snapshot(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): volume_clone = Volume() volume_clone.name = "clone0" self.driver._rpc_call("bdev_lvol_snapshot", params={'snapshot_name': "snapshot0", 'lvol_name': "lvs_test/lvol2"}) snapshot = Snapshot() self.driver.create_volume_from_snapshot(volume_clone, snapshot) bdev = self.driver._get_spdk_volume_name("lvs_test/clone0") self.assertEqual("3735a554-0dce-4d13-ba67-597d41186104", bdev) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_create_snapshot(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['name'] = "lvs_test/lvol0" ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) volume_get.return_value = db_volume snapshot = {} snapshot['volume_id'] = db_volume['id'] snapshot['name'] = "snapshot0" snapshot['volume'] = db_volume for bdev in self.jsonrpcclient.bdevs: if bdev['aliases'][-1] == "lvs_test/lvol0": bdev['aliases'].append(db_volume.name) self.driver.create_snapshot(snapshot) bdev = self.driver._get_spdk_volume_name("lvs_test/snapshot0") self.assertEqual("58b17014-d4a1-4f85-9761-093643ed18f2", bdev) def test_delete_snapshot(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): snapshot = Snapshot() snapshot.name = "snapshot0" self.driver._rpc_call("bdev_lvol_snapshot", params = {'snapshot_name': snapshot.name}) self.driver.delete_snapshot(snapshot) snapshot = self.driver._get_spdk_volume_name("lvs_test/" + snapshot.name) self.assertIsNone(snapshot) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_create_cloned_volume(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['name'] = "lvs_test/lvol0" db_volume['size'] = 1 ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) cloned_volume = Volume() cloned_volume.name = 'lvs_test/cloned_volume' for bdev in self.jsonrpcclient.bdevs: if bdev['aliases'][-1] == "lvs_test/lvol0": bdev['aliases'].append(db_volume.name) self.driver.create_cloned_volume(cloned_volume, db_volume) bdev = self.driver._get_spdk_volume_name("lvs_test/cloned_volume") self.assertEqual("3735a554-0dce-4d13-ba67-597d41186104", bdev) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_copy_image_to_volume(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['provider_location'] = "127.0.0.1:3262 RDMA " \ "2016-06.io.spdk:cnode2" ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) volume_get.return_value = db_volume with mock.patch.object(self.driver.target_driver, "_rpc_call", self.jsonrpcclient.call): self.driver.copy_image_to_volume(ctxt, db_volume, None, None) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_copy_volume_to_image(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): provider_location = "127.0.0.1:3262 RDMA 2016-06.io.spdk:cnode2" volume = test_utils.create_volume( self._context, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at, provider_location=provider_location) extra_specs = { 'image_service:store_id': 'fake-store' } test_utils.create_volume_type(self._context.elevated(), id=fake.VOLUME_TYPE_ID, name="test_type", extra_specs=extra_specs) ctxt = context.get_admin_context() volume_get.return_value = volume with mock.patch.object(self.driver.target_driver, "_rpc_call", self.jsonrpcclient.call): self.driver.copy_volume_to_image(ctxt, volume, None, None) def test_extend_volume(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): volume = Volume() volume.name = "lvs_test/lvol0" self.driver.extend_volume(volume, 2) bdev = self.driver._rpc_call("bdev_get_bdevs", params={"name": "lvs_test/lvol0"}) self.assertEqual(2 * units.Gi, bdev[0]['num_blocks'] * bdev[0]['block_size']) def test_ensure_export(self): pass def test_create_export(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['provider_location'] = "192.168.0.1:4420 rdma " \ "2014-08.io.spdk:cnode2" ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) with mock.patch.object(self.driver.target_driver, "_rpc_call", self.jsonrpcclient.call): expected_return = { 'provider_location': self.driver.target_driver.get_nvmeof_location( "nqn.%s:cnode%s" % ( self.configuration.nvmet_subsystem_name, self.driver.target_driver._get_first_free_node() ), self.configuration.target_ip_address, self.configuration.target_port, "rdma", self.configuration.nvmet_ns_id ), 'provider_auth': '' } export = self.driver.create_export(ctxt, db_volume, None) self.assertEqual(expected_return, export) def test_remove_export(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['provider_location'] = "127.0.0.1:4420 rdma " \ "2016-06.io.spdk:cnode2" ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) with mock.patch.object(self.driver.target_driver, "_rpc_call", self.jsonrpcclient.call): self.driver.create_export(ctxt, db_volume, None) self.assertIsNone(self.driver.remove_export(ctxt, db_volume)) def test_initialize_connection(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['provider_location'] = "127.0.0.1:3262 RDMA " \ "2016-06.io.spdk:cnode2 1" ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) target_connector = \ connector.InitiatorConnector.factory(initiator.NVME, utils.get_root_helper()) self.driver.initialize_connection(db_volume, target_connector) def test_validate_connector(self): mock_connector = {'initiator': 'fake_init'} self.assertTrue(self.driver.validate_connector(mock_connector)) def test_terminate_connection(self): pass
neumerance/cloudloon2
refs/heads/master
horizon/workflows/base.py
5
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import inspect import logging from django.core import urlresolvers from django import forms from django.forms.forms import NON_FIELD_ERRORS # noqa from django import template from django.template.defaultfilters import linebreaks # noqa from django.template.defaultfilters import safe # noqa from django.template.defaultfilters import slugify # noqa from django.utils.encoding import force_unicode # noqa from django.utils.importlib import import_module # noqa from django.utils.translation import ugettext_lazy as _ # noqa from horizon import base from horizon import exceptions from horizon.templatetags.horizon import has_permissions # noqa from horizon.utils import html LOG = logging.getLogger(__name__) class WorkflowContext(dict): def __init__(self, workflow, *args, **kwargs): super(WorkflowContext, self).__init__(*args, **kwargs) self._workflow = workflow def __setitem__(self, key, val): super(WorkflowContext, self).__setitem__(key, val) return self._workflow._trigger_handlers(key) def __delitem__(self, key): return self.__setitem__(key, None) def set(self, key, val): return self.__setitem__(key, val) def unset(self, key): return self.__delitem__(key) class ActionMetaclass(forms.forms.DeclarativeFieldsMetaclass): def __new__(mcs, name, bases, attrs): # Pop Meta for later processing opts = attrs.pop("Meta", None) # Create our new class cls = super(ActionMetaclass, mcs).__new__(mcs, name, bases, attrs) # Process options from Meta cls.name = getattr(opts, "name", name) cls.slug = getattr(opts, "slug", slugify(name)) cls.permissions = getattr(opts, "permissions", ()) cls.progress_message = getattr(opts, "progress_message", _("Processing...")) cls.help_text = getattr(opts, "help_text", "") cls.help_text_template = getattr(opts, "help_text_template", None) return cls class Action(forms.Form): """ An ``Action`` represents an atomic logical interaction you can have with the system. This is easier to understand with a conceptual example: in the context of a "launch instance" workflow, actions would include "naming the instance", "selecting an image", and ultimately "launching the instance". Because ``Actions`` are always interactive, they always provide form controls, and thus inherit from Django's ``Form`` class. However, they have some additional intelligence added to them: * ``Actions`` are aware of the permissions required to complete them. * ``Actions`` have a meta-level concept of "help text" which is meant to be displayed in such a way as to give context to the action regardless of where the action is presented in a site or workflow. * ``Actions`` understand how to handle their inputs and produce outputs, much like :class:`~horizon.forms.SelfHandlingForm` does now. ``Action`` classes may define the following attributes in a ``Meta`` class within them: .. attribute:: name The verbose name for this action. Defaults to the name of the class. .. attribute:: slug A semi-unique slug for this action. Defaults to the "slugified" name of the class. .. attribute:: permissions A list of permission names which this action requires in order to be completed. Defaults to an empty list (``[]``). .. attribute:: help_text A string of simple help text to be displayed alongside the Action's fields. .. attribute:: help_text_template A path to a template which contains more complex help text to be displayed alongside the Action's fields. In conjunction with :meth:`~horizon.workflows.Action.get_help_text` method you can customize your help text template to display practically anything. """ __metaclass__ = ActionMetaclass def __init__(self, request, context, *args, **kwargs): if request.method == "POST": super(Action, self).__init__(request.POST, initial=context) else: super(Action, self).__init__(initial=context) if not hasattr(self, "handle"): raise AttributeError("The action %s must define a handle method." % self.__class__.__name__) self.request = request self._populate_choices(request, context) self.required_css_class = 'required' def __unicode__(self): return force_unicode(self.name) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.slug) def _populate_choices(self, request, context): for field_name, bound_field in self.fields.items(): meth = getattr(self, "populate_%s_choices" % field_name, None) if meth is not None and callable(meth): bound_field.choices = meth(request, context) def get_help_text(self, extra_context=None): """ Returns the help text for this step. """ text = "" extra_context = extra_context or {} if self.help_text_template: tmpl = template.loader.get_template(self.help_text_template) context = template.RequestContext(self.request, extra_context) text += tmpl.render(context) else: text += linebreaks(force_unicode(self.help_text)) return safe(text) def add_error(self, message): """ Adds an error to the Action's Step based on API issues. """ self._get_errors()[NON_FIELD_ERRORS] = self.error_class([message]) def handle(self, request, context): """ Handles any requisite processing for this action. The method should return either ``None`` or a dictionary of data to be passed to :meth:`~horizon.workflows.Step.contribute`. Returns ``None`` by default, effectively making it a no-op. """ return None class MembershipAction(Action): """ An action that allows a user to add/remove members from a group. Extend the Action class with additional helper method for membership management. """ def get_default_role_field_name(self): return "default_" + self.slug + "_role" def get_member_field_name(self, role_id): return self.slug + "_role_" + role_id class Step(object): """ A step is a wrapper around an action which defines it's context in a workflow. It knows about details such as: * The workflow's context data (data passed from step to step). * The data which must be present in the context to begin this step (the step's dependencies). * The keys which will be added to the context data upon completion of the step. * The connections between this step's fields and changes in the context data (e.g. if that piece of data changes, what needs to be updated in this step). A ``Step`` class has the following attributes: .. attribute:: action The :class:`~horizon.workflows.Action` class which this step wraps. .. attribute:: depends_on A list of context data keys which this step requires in order to begin interaction. .. attribute:: contributes A list of keys which this step will contribute to the workflow's context data. Optional keys should still be listed, even if their values may be set to ``None``. .. attribute:: connections A dictionary which maps context data key names to lists of callbacks. The callbacks may be functions, dotted python paths to functions which may be imported, or dotted strings beginning with ``"self"`` to indicate methods on the current ``Step`` instance. .. attribute:: before Another ``Step`` class. This optional attribute is used to provide control over workflow ordering when steps are dynamically added to workflows. The workflow mechanism will attempt to place the current step before the step specified in the attribute. .. attribute:: after Another ``Step`` class. This attribute has the same purpose as :meth:`~horizon.workflows.Step.before` except that it will instead attempt to place the current step after the given step. .. attribute:: help_text A string of simple help text which will be prepended to the ``Action`` class' help text if desired. .. attribute:: template_name A path to a template which will be used to render this step. In general the default common template should be used. Default: ``"horizon/common/_workflow_step.html"``. .. attribute:: has_errors A boolean value which indicates whether or not this step has any errors on the action within it or in the scope of the workflow. This attribute will only accurately reflect this status after validation has occurred. .. attribute:: slug Inherited from the ``Action`` class. .. attribute:: name Inherited from the ``Action`` class. .. attribute:: permissions Inherited from the ``Action`` class. """ action_class = None depends_on = () contributes = () connections = None before = None after = None help_text = "" template_name = "horizon/common/_workflow_step.html" def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.slug) def __unicode__(self): return force_unicode(self.name) def __init__(self, workflow): super(Step, self).__init__() self.workflow = workflow cls = self.__class__.__name__ if not (self.action_class and issubclass(self.action_class, Action)): raise AttributeError("You must specify an action for %s." % cls) self.slug = self.action_class.slug self.name = self.action_class.name self.permissions = self.action_class.permissions self.has_errors = False self._handlers = {} if self.connections is None: # We want a dict, but don't want to declare a mutable type on the # class directly. self.connections = {} # Gather our connection handlers and make sure they exist. for key, handlers in self.connections.items(): self._handlers[key] = [] # TODO(gabriel): This is a poor substitute for broader handling if not isinstance(handlers, (list, tuple)): raise TypeError("The connection handlers for %s must be a " "list or tuple." % cls) for possible_handler in handlers: if callable(possible_handler): # If it's callable we know the function exists and is valid self._handlers[key].append(possible_handler) continue elif not isinstance(possible_handler, basestring): return TypeError("Connection handlers must be either " "callables or strings.") bits = possible_handler.split(".") if bits[0] == "self": root = self for bit in bits[1:]: try: root = getattr(root, bit) except AttributeError: raise AttributeError("The connection handler %s " "could not be found on %s." % (possible_handler, cls)) handler = root elif len(bits) == 1: # Import by name from local module not supported raise ValueError("Importing a local function as a string " "is not supported for the connection " "handler %s on %s." % (possible_handler, cls)) else: # Try a general import module_name = ".".join(bits[:-1]) try: mod = import_module(module_name) handler = getattr(mod, bits[-1]) except ImportError: raise ImportError("Could not import %s from the " "module %s as a connection " "handler on %s." % (bits[-1], module_name, cls)) except AttributeError: raise AttributeError("Could not import %s from the " "module %s as a connection " "handler on %s." % (bits[-1], module_name, cls)) self._handlers[key].append(handler) @property def action(self): if not getattr(self, "_action", None): try: # Hook in the action context customization. workflow_context = dict(self.workflow.context) context = self.prepare_action_context(self.workflow.request, workflow_context) self._action = self.action_class(self.workflow.request, context) except Exception: LOG.exception("Problem instantiating action class.") raise return self._action def prepare_action_context(self, request, context): """ Allows for customization of how the workflow context is passed to the action; this is the reverse of what "contribute" does to make the action outputs sane for the workflow. Changes to the context are not saved globally here. They are localized to the action. Simply returns the unaltered context by default. """ return context def get_id(self): """ Returns the ID for this step. Suitable for use in HTML markup. """ return "%s__%s" % (self.workflow.slug, self.slug) def _verify_contributions(self, context): for key in self.contributes: # Make sure we don't skip steps based on weird behavior of # POST query dicts. field = self.action.fields.get(key, None) if field and field.required and not context.get(key): context.pop(key, None) failed_to_contribute = set(self.contributes) failed_to_contribute -= set(context.keys()) if failed_to_contribute: raise exceptions.WorkflowError("The following expected data was " "not added to the workflow context " "by the step %s: %s." % (self.__class__, failed_to_contribute)) return True def contribute(self, data, context): """ Adds the data listed in ``contributes`` to the workflow's shared context. By default, the context is simply updated with all the data returned by the action. Note that even if the value of one of the ``contributes`` keys is not present (e.g. optional) the key should still be added to the context with a value of ``None``. """ if data: for key in self.contributes: context[key] = data.get(key, None) return context def render(self): """ Renders the step. """ step_template = template.loader.get_template(self.template_name) extra_context = {"form": self.action, "step": self} context = template.RequestContext(self.workflow.request, extra_context) return step_template.render(context) def get_help_text(self): """ Returns the help text for this step. """ text = linebreaks(force_unicode(self.help_text)) text += self.action.get_help_text() return safe(text) def add_error(self, message): """ Adds an error to the Step based on API issues. """ self.action.add_error(message) def has_required_fields(self): """ Returns True if action contains any required fields """ for key in self.contributes: field = self.action.fields.get(key, None) if (field and field.required): return True return False class WorkflowMetaclass(type): def __new__(mcs, name, bases, attrs): super(WorkflowMetaclass, mcs).__new__(mcs, name, bases, attrs) attrs["_cls_registry"] = set([]) return type.__new__(mcs, name, bases, attrs) class UpdateMembersStep(Step): """A step that allows a user to add/remove members from a group. .. attribute:: show_roles Set to False to disable the display of the roles dropdown. .. attribute:: available_list_title The title used for the available list column. .. attribute:: members_list_title The title used for the members list column. .. attribute:: no_available_text The placeholder text used when the available list is empty. .. attribute:: no_members_text The placeholder text used when the members list is empty. """ template_name = "horizon/common/_workflow_step_update_members.html" show_roles = True available_list_title = _("All available") members_list_title = _("Members") no_available_text = _("None available.") no_members_text = _("No members.") def get_member_field_name(self, role_id): if issubclass(self.action_class, MembershipAction): return self.action.get_member_field_name(role_id) else: return self.slug + "_role_" + role_id class Workflow(html.HTMLElement): """ A Workflow is a collection of Steps. It's interface is very straightforward, but it is responsible for handling some very important tasks such as: * Handling the injection, removal, and ordering of arbitrary steps. * Determining if the workflow can be completed by a given user at runtime based on all available information. * Dispatching connections between steps to ensure that when context data changes all the applicable callback functions are executed. * Verifying/validating the overall data integrity and subsequently triggering the final method to complete the workflow. The ``Workflow`` class has the following attributes: .. attribute:: name The verbose name for this workflow which will be displayed to the user. Defaults to the class name. .. attribute:: slug The unique slug for this workflow. Required. .. attribute:: steps Read-only access to the final ordered set of step instances for this workflow. .. attribute:: default_steps A list of :class:`~horizon.workflows.Step` classes which serve as the starting point for this workflow's ordered steps. Defaults to an empty list (``[]``). .. attribute:: finalize_button_name The name which will appear on the submit button for the workflow's form. Defaults to ``"Save"``. .. attribute:: success_message A string which will be displayed to the user upon successful completion of the workflow. Defaults to ``"{{ workflow.name }} completed successfully."`` .. attribute:: failure_message A string which will be displayed to the user upon failure to complete the workflow. Defaults to ``"{{ workflow.name }} did not complete."`` .. attribute:: depends_on A roll-up list of all the ``depends_on`` values compiled from the workflow's steps. .. attribute:: contributions A roll-up list of all the ``contributes`` values compiled from the workflow's steps. .. attribute:: template_name Path to the template which should be used to render this workflow. In general the default common template should be used. Default: ``"horizon/common/_workflow.html"``. .. attribute:: entry_point The slug of the step which should initially be active when the workflow is rendered. This can be passed in upon initialization of the workflow, or set anytime after initialization but before calling either ``get_entry_point`` or ``render``. .. attribute:: redirect_param_name The name of a parameter used for tracking the URL to redirect to upon completion of the workflow. Defaults to ``"next"``. .. attribute:: object The object (if any) which this workflow relates to. In the case of a workflow which creates a new resource the object would be the created resource after the relevant creation steps have been undertaken. In the case of a workflow which updates a resource it would be the resource being updated after it has been retrieved. """ __metaclass__ = WorkflowMetaclass slug = None default_steps = () template_name = "horizon/common/_workflow.html" finalize_button_name = _("Save") success_message = _("%s completed successfully.") failure_message = _("%s did not complete.") redirect_param_name = "next" multipart = False _registerable_class = Step def __unicode__(self): return self.name def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.slug) def __init__(self, request=None, context_seed=None, entry_point=None, *args, **kwargs): super(Workflow, self).__init__(*args, **kwargs) if self.slug is None: raise AttributeError("The workflow %s must have a slug." % self.__class__.__name__) self.name = getattr(self, "name", self.__class__.__name__) self.request = request self.depends_on = set([]) self.contributions = set([]) self.entry_point = entry_point self.object = None # Put together our steps in order. Note that we pre-register # non-default steps so that we can identify them and subsequently # insert them in order correctly. self._registry = dict([(step_class, step_class(self)) for step_class in self.__class__._cls_registry if step_class not in self.default_steps]) self._gather_steps() # Determine all the context data we need to end up with. for step in self.steps: self.depends_on = self.depends_on | set(step.depends_on) self.contributions = self.contributions | set(step.contributes) # Initialize our context. For ease we can preseed it with a # regular dictionary. This should happen after steps have been # registered and ordered. self.context = WorkflowContext(self) context_seed = context_seed or {} clean_seed = dict([(key, val) for key, val in context_seed.items() if key in self.contributions | self.depends_on]) self.context_seed = clean_seed self.context.update(clean_seed) if request and request.method == "POST": for step in self.steps: valid = step.action.is_valid() # Be sure to use the CLEANED data if the workflow is valid. if valid: data = step.action.cleaned_data else: data = request.POST self.context = step.contribute(data, self.context) @property def steps(self): if getattr(self, "_ordered_steps", None) is None: self._gather_steps() return self._ordered_steps def get_step(self, slug): """ Returns the instantiated step matching the given slug. """ for step in self.steps: if step.slug == slug: return step def _gather_steps(self): ordered_step_classes = self._order_steps() for default_step in self.default_steps: self.register(default_step) self._registry[default_step] = default_step(self) self._ordered_steps = [self._registry[step_class] for step_class in ordered_step_classes if has_permissions(self.request.user, self._registry[step_class])] def _order_steps(self): steps = list(copy.copy(self.default_steps)) additional = self._registry.keys() for step in additional: try: min_pos = steps.index(step.after) except ValueError: min_pos = 0 try: max_pos = steps.index(step.before) except ValueError: max_pos = len(steps) if min_pos > max_pos: raise exceptions.WorkflowError("The step %(new)s can't be " "placed between the steps " "%(after)s and %(before)s; the " "step %(before)s comes before " "%(after)s." % {"new": additional, "after": step.after, "before": step.before}) steps.insert(max_pos, step) return steps def get_entry_point(self): """ Returns the slug of the step which the workflow should begin on. This method takes into account both already-available data and errors within the steps. """ # If we have a valid specified entry point, use it. if self.entry_point: if self.get_step(self.entry_point): return self.entry_point # Otherwise fall back to calculating the appropriate entry point. for step in self.steps: if step.has_errors: return step.slug try: step._verify_contributions(self.context) except exceptions.WorkflowError: return step.slug # If nothing else, just return the first step. return self.steps[0].slug def _trigger_handlers(self, key): responses = [] handlers = [(step.slug, f) for step in self.steps for f in step._handlers.get(key, [])] for slug, handler in handlers: responses.append((slug, handler(self.request, self.context))) return responses @classmethod def register(cls, step_class): """ Registers a :class:`~horizon.workflows.Step` with the workflow. """ if not inspect.isclass(step_class): raise ValueError('Only classes may be registered.') elif not issubclass(step_class, cls._registerable_class): raise ValueError('Only %s classes or subclasses may be registered.' % cls._registerable_class.__name__) if step_class in cls._cls_registry: return False else: cls._cls_registry.add(step_class) return True @classmethod def unregister(cls, step_class): """ Unregisters a :class:`~horizon.workflows.Step` from the workflow. """ try: cls._cls_registry.remove(step_class) except KeyError: raise base.NotRegistered('%s is not registered' % cls) return cls._unregister(step_class) def validate(self, context): """ Hook for custom context data validation. Should return a boolean value or raise :class:`~horizon.exceptions.WorkflowValidationError`. """ return True def is_valid(self): """ Verified that all required data is present in the context and calls the ``validate`` method to allow for finer-grained checks on the context data. """ missing = self.depends_on - set(self.context.keys()) if missing: raise exceptions.WorkflowValidationError( "Unable to complete the workflow. The values %s are " "required but not present." % ", ".join(missing)) # Validate each step. Cycle through all of them to catch all errors # in one pass before returning. steps_valid = True for step in self.steps: if not step.action.is_valid(): steps_valid = False step.has_errors = True if not steps_valid: return steps_valid return self.validate(self.context) def finalize(self): """ Finalizes a workflow by running through all the actions in order and calling their ``handle`` methods. Returns ``True`` on full success, or ``False`` for a partial success, e.g. there were non-critical errors. (If it failed completely the function wouldn't return.) """ partial = False for step in self.steps: try: data = step.action.handle(self.request, self.context) if data is True or data is None: continue elif data is False: partial = True else: self.context = step.contribute(data or {}, self.context) except Exception: partial = True exceptions.handle(self.request) if not self.handle(self.request, self.context): partial = True return not partial def handle(self, request, context): """ Handles any final processing for this workflow. Should return a boolean value indicating success. """ return True def get_success_url(self): """ Returns a URL to redirect the user to upon completion. By default it will attempt to parse a ``success_url`` attribute on the workflow, which can take the form of a reversible URL pattern name, or a standard HTTP URL. """ try: return urlresolvers.reverse(self.success_url) except urlresolvers.NoReverseMatch: return self.success_url def format_status_message(self, message): """ Hook to allow customization of the message returned to the user upon successful or unsuccessful completion of the workflow. By default it simply inserts the workflow's name into the message string. """ if "%s" in message: return message % self.name else: return message def render(self): """ Renders the workflow. """ workflow_template = template.loader.get_template(self.template_name) extra_context = {"workflow": self} if self.request.is_ajax(): extra_context['modal'] = True context = template.RequestContext(self.request, extra_context) return workflow_template.render(context) def get_absolute_url(self): """ Returns the canonical URL for this workflow. This is used for the POST action attribute on the form element wrapping the workflow. For convenience it defaults to the value of ``request.get_full_path()`` with any query string stripped off, e.g. the path at which the workflow was requested. """ return self.request.get_full_path().partition('?')[0] def add_error_to_step(self, message, slug): """ Adds an error to the workflow's Step with the specifed slug based on API issues. This is useful when you wish for API errors to appear as errors on the form rather than using the messages framework. """ step = self.get_step(slug) if step: step.add_error(message)
abaditsegay/arangodb
refs/heads/devel
3rdParty/V8-4.3.61/third_party/python_26/Lib/tabnanny.py
394
#! /usr/bin/env python """The Tab Nanny despises ambiguous indentation. She knows no mercy. tabnanny -- Detection of ambiguous indentation For the time being this module is intended to be called as a script. However it is possible to import it into an IDE and use the function check() described below. Warning: The API provided by this module is likely to change in future releases; such changes may not be backward compatible. """ # Released to the public domain, by Tim Peters, 15 April 1998. # XXX Note: this is now a standard library module. # XXX The API needs to undergo changes however; the current code is too # XXX script-like. This will be addressed later. __version__ = "6" import os import sys import getopt import tokenize if not hasattr(tokenize, 'NL'): raise ValueError("tokenize.NL doesn't exist -- tokenize module too old") __all__ = ["check", "NannyNag", "process_tokens"] verbose = 0 filename_only = 0 def errprint(*args): sep = "" for arg in args: sys.stderr.write(sep + str(arg)) sep = " " sys.stderr.write("\n") def main(): global verbose, filename_only try: opts, args = getopt.getopt(sys.argv[1:], "qv") except getopt.error, msg: errprint(msg) return for o, a in opts: if o == '-q': filename_only = filename_only + 1 if o == '-v': verbose = verbose + 1 if not args: errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...") return for arg in args: check(arg) class NannyNag(Exception): """ Raised by tokeneater() if detecting an ambiguous indent. Captured and handled in check(). """ def __init__(self, lineno, msg, line): self.lineno, self.msg, self.line = lineno, msg, line def get_lineno(self): return self.lineno def get_msg(self): return self.msg def get_line(self): return self.line def check(file): """check(file_or_dir) If file_or_dir is a directory and not a symbolic link, then recursively descend the directory tree named by file_or_dir, checking all .py files along the way. If file_or_dir is an ordinary Python source file, it is checked for whitespace related problems. The diagnostic messages are written to standard output using the print statement. """ if os.path.isdir(file) and not os.path.islink(file): if verbose: print "%r: listing directory" % (file,) names = os.listdir(file) for name in names: fullname = os.path.join(file, name) if (os.path.isdir(fullname) and not os.path.islink(fullname) or os.path.normcase(name[-3:]) == ".py"): check(fullname) return try: f = open(file) except IOError, msg: errprint("%r: I/O Error: %s" % (file, msg)) return if verbose > 1: print "checking %r ..." % file try: process_tokens(tokenize.generate_tokens(f.readline)) except tokenize.TokenError, msg: errprint("%r: Token Error: %s" % (file, msg)) return except IndentationError, msg: errprint("%r: Indentation Error: %s" % (file, msg)) return except NannyNag, nag: badline = nag.get_lineno() line = nag.get_line() if verbose: print "%r: *** Line %d: trouble in tab city! ***" % (file, badline) print "offending line: %r" % (line,) print nag.get_msg() else: if ' ' in file: file = '"' + file + '"' if filename_only: print file else: print file, badline, repr(line) return if verbose: print "%r: Clean bill of health." % (file,) class Whitespace: # the characters used for space and tab S, T = ' \t' # members: # raw # the original string # n # the number of leading whitespace characters in raw # nt # the number of tabs in raw[:n] # norm # the normal form as a pair (count, trailing), where: # count # a tuple such that raw[:n] contains count[i] # instances of S * i + T # trailing # the number of trailing spaces in raw[:n] # It's A Theorem that m.indent_level(t) == # n.indent_level(t) for all t >= 1 iff m.norm == n.norm. # is_simple # true iff raw[:n] is of the form (T*)(S*) def __init__(self, ws): self.raw = ws S, T = Whitespace.S, Whitespace.T count = [] b = n = nt = 0 for ch in self.raw: if ch == S: n = n + 1 b = b + 1 elif ch == T: n = n + 1 nt = nt + 1 if b >= len(count): count = count + [0] * (b - len(count) + 1) count[b] = count[b] + 1 b = 0 else: break self.n = n self.nt = nt self.norm = tuple(count), b self.is_simple = len(count) <= 1 # return length of longest contiguous run of spaces (whether or not # preceding a tab) def longest_run_of_spaces(self): count, trailing = self.norm return max(len(count)-1, trailing) def indent_level(self, tabsize): # count, il = self.norm # for i in range(len(count)): # if count[i]: # il = il + (i/tabsize + 1)*tabsize * count[i] # return il # quicker: # il = trailing + sum (i/ts + 1)*ts*count[i] = # trailing + ts * sum (i/ts + 1)*count[i] = # trailing + ts * sum i/ts*count[i] + count[i] = # trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] = # trailing + ts * [(sum i/ts*count[i]) + num_tabs] # and note that i/ts*count[i] is 0 when i < ts count, trailing = self.norm il = 0 for i in range(tabsize, len(count)): il = il + i/tabsize * count[i] return trailing + tabsize * (il + self.nt) # return true iff self.indent_level(t) == other.indent_level(t) # for all t >= 1 def equal(self, other): return self.norm == other.norm # return a list of tuples (ts, i1, i2) such that # i1 == self.indent_level(ts) != other.indent_level(ts) == i2. # Intended to be used after not self.equal(other) is known, in which # case it will return at least one witnessing tab size. def not_equal_witness(self, other): n = max(self.longest_run_of_spaces(), other.longest_run_of_spaces()) + 1 a = [] for ts in range(1, n+1): if self.indent_level(ts) != other.indent_level(ts): a.append( (ts, self.indent_level(ts), other.indent_level(ts)) ) return a # Return True iff self.indent_level(t) < other.indent_level(t) # for all t >= 1. # The algorithm is due to Vincent Broman. # Easy to prove it's correct. # XXXpost that. # Trivial to prove n is sharp (consider T vs ST). # Unknown whether there's a faster general way. I suspected so at # first, but no longer. # For the special (but common!) case where M and N are both of the # form (T*)(S*), M.less(N) iff M.len() < N.len() and # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded. # XXXwrite that up. # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1. def less(self, other): if self.n >= other.n: return False if self.is_simple and other.is_simple: return self.nt <= other.nt n = max(self.longest_run_of_spaces(), other.longest_run_of_spaces()) + 1 # the self.n >= other.n test already did it for ts=1 for ts in range(2, n+1): if self.indent_level(ts) >= other.indent_level(ts): return False return True # return a list of tuples (ts, i1, i2) such that # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2. # Intended to be used after not self.less(other) is known, in which # case it will return at least one witnessing tab size. def not_less_witness(self, other): n = max(self.longest_run_of_spaces(), other.longest_run_of_spaces()) + 1 a = [] for ts in range(1, n+1): if self.indent_level(ts) >= other.indent_level(ts): a.append( (ts, self.indent_level(ts), other.indent_level(ts)) ) return a def format_witnesses(w): firsts = map(lambda tup: str(tup[0]), w) prefix = "at tab size" if len(w) > 1: prefix = prefix + "s" return prefix + " " + ', '.join(firsts) def process_tokens(tokens): INDENT = tokenize.INDENT DEDENT = tokenize.DEDENT NEWLINE = tokenize.NEWLINE JUNK = tokenize.COMMENT, tokenize.NL indents = [Whitespace("")] check_equal = 0 for (type, token, start, end, line) in tokens: if type == NEWLINE: # a program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? # If an INDENT appears, setting check_equal is wrong, and will # be undone when we see the INDENT. check_equal = 1 elif type == INDENT: check_equal = 0 thisguy = Whitespace(token) if not indents[-1].less(thisguy): witness = indents[-1].not_less_witness(thisguy) msg = "indent not greater e.g. " + format_witnesses(witness) raise NannyNag(start[0], msg, line) indents.append(thisguy) elif type == DEDENT: # there's nothing we need to check here! what's important is # that when the run of DEDENTs ends, the indentation of the # program statement (or ENDMARKER) that triggered the run is # equal to what's left at the top of the indents stack # Ouch! This assert triggers if the last line of the source # is indented *and* lacks a newline -- then DEDENTs pop out # of thin air. # assert check_equal # else no earlier NEWLINE, or an earlier INDENT check_equal = 1 del indents[-1] elif check_equal and type not in JUNK: # this is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER; the "line" argument exposes the leading whitespace # for this statement; in the case of ENDMARKER, line is an empty # string, so will properly match the empty string with which the # "indents" stack was seeded check_equal = 0 thisguy = Whitespace(line) if not indents[-1].equal(thisguy): witness = indents[-1].not_equal_witness(thisguy) msg = "indent not equal e.g. " + format_witnesses(witness) raise NannyNag(start[0], msg, line) if __name__ == '__main__': main()
szlaozhu/twister-core
refs/heads/master
libtorrent/dht_flood.py
15
#! /usr/bin/env python import socket import sys from types import StringType, IntType, LongType, DictType, ListType, TupleType import random port = int(sys.argv[1]) # from BitTorrent 4.3.0 def encode_bencached(x,r): r.append(x.bencoded) def encode_int(x, r): r.extend(('i', str(x), 'e')) def encode_string(x, r): r.extend((str(len(x)), ':', x)) def encode_list(x, r): r.append('l') for i in x: encode_func[type(i)](i, r) r.append('e') def encode_dict(x,r): r.append('d') ilist = x.items() ilist.sort() for k, v in ilist: r.extend((str(len(k)), ':', k)) encode_func[type(v)](v, r) r.append('e') encode_func = {} encode_func[IntType] = encode_int encode_func[LongType] = encode_int encode_func[StringType] = encode_string encode_func[ListType] = encode_list encode_func[TupleType] = encode_list encode_func[DictType] = encode_dict def bencode(x): r = [] encode_func[type(x)](x, r) return ''.join(r) def send_dht_message(msg): s.sendto(bencode(msg), 0, ('127.0.0.1', port)) def random_key(): ret = '' for i in range(0, 20): ret += chr(random.randint(0, 255)) return ret s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) node_id = '1' * 20; query = 'getPeers' print 'test random info-hashes' for i in xrange(1, 30000): send_dht_message({'x': {'id': node_id, 'infoHash': random_key()}, 'q': query, 'z': 'q', 't': '%d' % i}) print 'test random peer-ids' for i in xrange(1, 30000): send_dht_message({'x': {'id': random_key(), 'infoHash': random_key()}, 'q': query, 'z': 'q', 't': '%d' % i})
rdhyee/osf.io
refs/heads/develop
website/addons/forward/tests/test_utils.py
15
# -*- coding: utf-8 -*- """Tests for website.addons.forward.utils.""" from nose.tools import * # PEP8 asserts from tests.base import OsfTestCase from website.addons.forward.tests.factories import ForwardSettingsFactory from website.addons.forward import utils class TestUtils(OsfTestCase): def test_serialize_settings(self): node_settings = ForwardSettingsFactory() serialized = utils.serialize_settings(node_settings) assert_equal( serialized, { 'url': node_settings.url, 'label': node_settings.label, } ) def test_settings_complete_true(self): node_settings = ForwardSettingsFactory() assert_true(utils.settings_complete(node_settings)) def test_settings_complete_true_no_redirect(self): """Regression test: Model can be complete when `redirect_bool` is False. """ node_settings = ForwardSettingsFactory(redirect_bool=False) assert_true(utils.settings_complete(node_settings)) def test_settings_complete_false(self): node_settings = ForwardSettingsFactory(url=None) assert_false(utils.settings_complete(node_settings))
darmaa/odoo
refs/heads/master
addons/l10n_be/wizard/l10n_be_vat_intra.py
33
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # Adapted by Noviat to # - make the 'mand_id' field optional # - support Noviat tax code scheme # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time import base64 from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp.report import report_sxw class partner_vat_intra(osv.osv_memory): """ Partner Vat Intra """ _name = "partner.vat.intra" _description = 'Partner VAT Intra' def _get_xml_data(self, cr, uid, context=None): if context.get('file_save', False): return base64.encodestring(context['file_save'].encode('utf8')) return '' def _get_europe_country(self, cursor, user, context=None): return self.pool.get('res.country').search(cursor, user, [('code', 'in', ['AT', 'BG', 'CY', 'CZ', 'DK', 'EE', 'FI', 'FR', 'DE', 'GR', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE', 'GB'])]) _columns = { 'name': fields.char('File Name', size=32), 'period_code': fields.char('Period Code',size = 6,required = True, help = '''This is where you have to set the period code for the intracom declaration using the format: ppyyyy PP can stand for a month: from '01' to '12'. PP can stand for a trimester: '31','32','33','34' The first figure means that it is a trimester, The second figure identify the trimester. PP can stand for a complete fiscal year: '00'. YYYY stands for the year (4 positions). ''' ), 'period_ids': fields.many2many('account.period', 'account_period_rel', 'acc_id', 'period_id', 'Period (s)', help = 'Select here the period(s) you want to include in your intracom declaration'), 'tax_code_id': fields.many2one('account.tax.code', 'Company', domain=[('parent_id', '=', False)], help="Keep empty to use the user's company", required=True), 'test_xml': fields.boolean('Test XML file', help="Sets the XML output as test file"), 'mand_id' : fields.char('Reference', size=14, help="Reference given by the Representative of the sending company."), 'msg': fields.text('File created', size=14, readonly=True), 'no_vat': fields.text('Partner With No VAT', size=14, readonly=True, help="The Partner whose VAT number is not defined and they are not included in XML File."), 'file_save' : fields.binary('Save File', readonly=True), 'country_ids': fields.many2many('res.country', 'vat_country_rel', 'vat_id', 'country_id', 'European Countries'), 'comments': fields.text('Comments'), } def _get_tax_code(self, cr, uid, context=None): obj_tax_code = self.pool.get('account.tax.code') obj_user = self.pool.get('res.users') company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', False)], context=context) return tax_code_ids and tax_code_ids[0] or False _defaults = { 'country_ids': _get_europe_country, 'file_save': _get_xml_data, 'name': 'vat_intra.xml', 'tax_code_id': _get_tax_code, } def _get_datas(self, cr, uid, ids, context=None): """Collects require data for vat intra xml :param ids: id of wizard. :return: dict of all data to be used to generate xml for Partner VAT Intra. :rtype: dict """ if context is None: context = {} obj_user = self.pool.get('res.users') obj_sequence = self.pool.get('ir.sequence') obj_partner = self.pool.get('res.partner') xmldict = {} post_code = street = city = country = data_clientinfo = '' seq = amount_sum = 0 wiz_data = self.browse(cr, uid, ids[0], context=context) comments = wiz_data.comments if wiz_data.tax_code_id: data_company = wiz_data.tax_code_id.company_id else: data_company = obj_user.browse(cr, uid, uid, context=context).company_id # Get Company vat company_vat = data_company.partner_id.vat if not company_vat: raise osv.except_osv(_('Insufficient Data!'),_('No VAT number associated with your company.')) company_vat = company_vat.replace(' ','').upper() issued_by = company_vat[:2] if len(wiz_data.period_code) != 6: raise osv.except_osv(_('Error!'), _('Period code is not valid.')) if not wiz_data.period_ids: raise osv.except_osv(_('Insufficient Data!'),_('Please select at least one Period.')) p_id_list = obj_partner.search(cr, uid, [('vat','!=',False)], context=context) if not p_id_list: raise osv.except_osv(_('Insufficient Data!'),_('No partner has a VAT number associated with him.')) seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum') dnum = company_vat[2:] + seq_declarantnum[-4:] addr = obj_partner.address_get(cr, uid, [data_company.partner_id.id], ['invoice']) email = data_company.partner_id.email or '' phone = data_company.partner_id.phone or '' if addr.get('invoice',False): ads = obj_partner.browse(cr, uid, [addr['invoice']])[0] city = (ads.city or '') post_code = (ads.zip or '') if ads.street: street = ads.street if ads.street2: street += ' ' street += ads.street2 if ads.country_id: country = ads.country_id.code if not country: country = company_vat[:2] if not email: raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.')) if not phone: raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.')) xmldict.update({ 'company_name': data_company.name, 'company_vat': company_vat, 'vatnum': company_vat[2:], 'mand_id': wiz_data.mand_id, 'sender_date': str(time.strftime('%Y-%m-%d')), 'street': street, 'city': city, 'post_code': post_code, 'country': country, 'email': email, 'phone': phone.replace('/','').replace('.','').replace('(','').replace(')','').replace(' ',''), 'period': wiz_data.period_code, 'clientlist': [], 'comments': comments, 'issued_by': issued_by, }) #tax code 44: services #tax code 46L: normal good deliveries #tax code 46T: ABC good deliveries #tax code 48xxx: credite note on tax code xxx codes = ('44', '46L', '46T', '48s44', '48s46L', '48s46T') cr.execute('''SELECT p.name As partner_name, l.partner_id AS partner_id, p.vat AS vat, (CASE WHEN t.code = '48s44' THEN '44' WHEN t.code = '48s46L' THEN '46L' WHEN t.code = '48s46T' THEN '46T' ELSE t.code END) AS intra_code, SUM(CASE WHEN t.code in ('48s44','48s46L','48s46T') THEN -l.tax_amount ELSE l.tax_amount END) AS amount FROM account_move_line l LEFT JOIN account_tax_code t ON (l.tax_code_id = t.id) LEFT JOIN res_partner p ON (l.partner_id = p.id) WHERE t.code IN %s AND l.period_id IN %s AND t.company_id = %s GROUP BY p.name, l.partner_id, p.vat, intra_code''', (codes, tuple([p.id for p in wiz_data.period_ids]), data_company.id)) p_count = 0 for row in cr.dictfetchall(): if not row['vat']: row['vat'] = '' p_count += 1 seq += 1 amt = row['amount'] or 0.0 amount_sum += amt intra_code = row['intra_code'] == '44' and 'S' or (row['intra_code'] == '46L' and 'L' or (row['intra_code'] == '46T' and 'T' or '')) xmldict['clientlist'].append({ 'partner_name': row['partner_name'], 'seq': seq, 'vatnum': row['vat'][2:].replace(' ','').upper(), 'vat': row['vat'], 'country': row['vat'][:2], 'amount': round(amt,2), 'intra_code': row['intra_code'], 'code': intra_code}) xmldict.update({'dnum': dnum, 'clientnbr': str(seq), 'amountsum': round(amount_sum,2), 'partner_wo_vat': p_count}) return xmldict def create_xml(self, cursor, user, ids, context=None): """Creates xml that is to be exported and sent to estate for partner vat intra. :return: Value for next action. :rtype: dict """ mod_obj = self.pool.get('ir.model.data') xml_data = self._get_datas(cursor, user, ids, context=context) month_quarter = xml_data['period'][:2] year = xml_data['period'][2:] data_file = '' # Can't we do this by etree? data_head = """<?xml version="1.0" encoding="ISO-8859-1"?> <ns2:IntraConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/IntraConsignment" IntraListingsNbr="1"> <ns2:Representative> <RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(company_vat)s</RepresentativeID> <Name>%(company_name)s</Name> <Street>%(street)s</Street> <PostCode>%(post_code)s</PostCode> <City>%(city)s</City> <CountryCode>%(country)s</CountryCode> <EmailAddress>%(email)s</EmailAddress> <Phone>%(phone)s</Phone> </ns2:Representative>""" % (xml_data) if xml_data['mand_id']: data_head += '\n\t\t<ns2:RepresentativeReference>%(mand_id)s</ns2:RepresentativeReference>' % (xml_data) data_comp_period = '\n\t\t<ns2:Declarant>\n\t\t\t<VATNumber>%(vatnum)s</VATNumber>\n\t\t\t<Name>%(company_name)s</Name>\n\t\t\t<Street>%(street)s</Street>\n\t\t\t<PostCode>%(post_code)s</PostCode>\n\t\t\t<City>%(city)s</City>\n\t\t\t<CountryCode>%(country)s</CountryCode>\n\t\t\t<EmailAddress>%(email)s</EmailAddress>\n\t\t\t<Phone>%(phone)s</Phone>\n\t\t</ns2:Declarant>' % (xml_data) if month_quarter.startswith('3'): data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Quarter>'+month_quarter[1]+'</ns2:Quarter> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>' elif month_quarter.startswith('0') and month_quarter.endswith('0'): data_comp_period+= '\n\t\t<ns2:Period>\n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>' else: data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Month>'+month_quarter+'</ns2:Month> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>' data_clientinfo = '' for client in xml_data['clientlist']: if not client['vatnum']: raise osv.except_osv(_('Insufficient Data!'),_('No vat number defined for %s.') % client['partner_name']) data_clientinfo +='\n\t\t<ns2:IntraClient SequenceNumber="%(seq)s">\n\t\t\t<ns2:CompanyVATNumber issuedBy="%(country)s">%(vatnum)s</ns2:CompanyVATNumber>\n\t\t\t<ns2:Code>%(code)s</ns2:Code>\n\t\t\t<ns2:Amount>%(amount).2f</ns2:Amount>\n\t\t</ns2:IntraClient>' % (client) data_decl = '\n\t<ns2:IntraListing SequenceNumber="1" ClientsNbr="%(clientnbr)s" DeclarantReference="%(dnum)s" AmountSum="%(amountsum).2f">' % (xml_data) data_file += data_head + data_decl + data_comp_period + data_clientinfo + '\n\t\t<ns2:Comment>%(comments)s</ns2:Comment>\n\t</ns2:IntraListing>\n</ns2:IntraConsignment>' % (xml_data) context['file_save'] = data_file model_data_ids = mod_obj.search(cursor, user,[('model','=','ir.ui.view'),('name','=','view_vat_intra_save')], context=context) resource_id = mod_obj.read(cursor, user, model_data_ids, fields=['res_id'], context=context)[0]['res_id'] return { 'name': _('Save'), 'context': context, 'view_type': 'form', 'view_mode': 'form', 'res_model': 'partner.vat.intra', 'views': [(resource_id,'form')], 'view_id': 'view_vat_intra_save', 'type': 'ir.actions.act_window', 'target': 'new', } def preview(self, cr, uid, ids, context=None): xml_data = self._get_datas(cr, uid, ids, context=context) datas = { 'ids': [], 'model': 'partner.vat.intra', 'form': xml_data } return self.pool['report'].get_action( cr, uid, [], 'l10n_be.report_l10nvatintraprint', data=datas, context=context ) class vat_intra_print(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(vat_intra_print, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'time': time, }) class wrapped_vat_intra_print(osv.AbstractModel): _name = 'report.l10n_be.report_l10nvatintraprint' _inherit = 'report.abstract_report' _template = 'l10n_be.report_l10nvatintraprint' _wrapped_report_class = vat_intra_print # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
seek-ai/esengine
refs/heads/master
esengine/utils/validation.py
2
# coding: utf-8 from esengine.exceptions import ClientError def validate_client(es): """ A valid ES client is a interface which must implements at least "index" and "search" public methods. preferably an elasticsearch.ElasticSearch() instance :param es: :return: None """ if not es: raise ClientError("ES client cannot be Nonetype") try: if not callable(es.index) or not callable(es.search) or \ not callable(es.get): raise ClientError( "index or search or get Interface is not callable" ) except AttributeError as e: raise ClientError(str(e)) class FieldValidator(object): def __init__(self): self.validation = [] def validate_value(self, field, value): pass def validate_item(self, field, item): pass def __call__(self, field, value): self.validate_value(field, value) if field._multi: [self.validate_item(field, item) for item in value] return self.validation
overtherain/scriptfile
refs/heads/master
software/googleAppEngine/lib/django_1_3/tests/regressiontests/forms/localflavor/ie.py
89
from django.contrib.localflavor.ie.forms import IECountySelect from utils import LocalFlavorTestCase class IELocalFlavorTests(LocalFlavorTestCase): def test_IECountySelect(self): f = IECountySelect() out = u'''<select name="counties"> <option value="antrim">Antrim</option> <option value="armagh">Armagh</option> <option value="carlow">Carlow</option> <option value="cavan">Cavan</option> <option value="clare">Clare</option> <option value="cork">Cork</option> <option value="derry">Derry</option> <option value="donegal">Donegal</option> <option value="down">Down</option> <option value="dublin" selected="selected">Dublin</option> <option value="fermanagh">Fermanagh</option> <option value="galway">Galway</option> <option value="kerry">Kerry</option> <option value="kildare">Kildare</option> <option value="kilkenny">Kilkenny</option> <option value="laois">Laois</option> <option value="leitrim">Leitrim</option> <option value="limerick">Limerick</option> <option value="longford">Longford</option> <option value="louth">Louth</option> <option value="mayo">Mayo</option> <option value="meath">Meath</option> <option value="monaghan">Monaghan</option> <option value="offaly">Offaly</option> <option value="roscommon">Roscommon</option> <option value="sligo">Sligo</option> <option value="tipperary">Tipperary</option> <option value="tyrone">Tyrone</option> <option value="waterford">Waterford</option> <option value="westmeath">Westmeath</option> <option value="wexford">Wexford</option> <option value="wicklow">Wicklow</option> </select>''' self.assertEqual(f.render('counties', 'dublin'), out)
fgaudin/aemanager
refs/heads/master
accounts/forms.py
1
from django.forms import ModelForm from accounts.models import Expense, Invoice, InvoiceRow, INVOICE_STATE_PAID, \ MAX_INVOICE_ID from django import forms from django.utils.translation import ugettext_lazy as _ class ExpenseForm(ModelForm): amount = forms.DecimalField(max_digits=12, decimal_places=2, label=_('Amount'), localize=True) class Meta: model = Expense exclude = ['owner', 'uuid'] def __init__(self, *args, **kwargs): super(ExpenseForm, self).__init__(*args, **kwargs) self.fields['date'].widget.attrs['class'] = 'date' class InvoiceForm(ModelForm): invoice_id = forms.IntegerField(max_value=MAX_INVOICE_ID, label=_('Invoice id'), help_text=_('An integer less than or equal to %d. Must be sequential.') % (MAX_INVOICE_ID)) penalty_rate = forms.DecimalField(max_digits=4, decimal_places=2, label=_('Penalty rate'), localize=True, required=False, help_text=_('at least three times the <a href="%(french_rate)s">legal rate</a> or by default <a href="%(ecb_rate)s">rate applied by the European Central Bank</a> to its most recent refinancing operation plus 10 points') % {'french_rate': 'http://www.minefe.gouv.fr/directions_services/dgtpe/taux/taux_legal.php', 'ecb_rate': 'http://fr.global-rates.com/taux-de-interets/banques-centrales/banque-centrale-europeenne/taux-de-bce.aspx'}) class Meta: model = Invoice exclude = ['owner', 'uuid', 'proposal', 'amount'] def __init__(self, *args, **kwargs): super(InvoiceForm, self).__init__(*args, **kwargs) self.fields['edition_date'].widget.attrs['class'] = 'date' self.fields['payment_date'].widget.attrs['class'] = 'date' self.fields['paid_date'].widget.attrs['class'] = 'date' self.fields['execution_begin_date'].widget.attrs['class'] = 'date' self.fields['execution_end_date'].widget.attrs['class'] = 'date' self.fields['penalty_date'].widget.attrs['class'] = 'date' self.fields['footer_note'].widget.attrs['size'] = '90' def clean(self): super(InvoiceForm, self).clean() cleaned_data = self.cleaned_data state = cleaned_data.get("state") paid_date = cleaned_data.get("paid_date") if state == INVOICE_STATE_PAID and not paid_date: msg = _('This field is required since invoice state is set to "paid".') self._errors["paid_date"] = self.error_class([msg]) del cleaned_data["paid_date"] payment_type = cleaned_data.get('payment_type') if state == INVOICE_STATE_PAID and not payment_type: msg = _('This field is required since invoice state is set to "paid".') self._errors["payment_type"] = self.error_class([msg]) del cleaned_data["payment_type"] return cleaned_data class InvoiceRowForm(ModelForm): quantity = forms.DecimalField(max_digits=6, decimal_places=2, label=_('Quantity'), localize=True) unit_price = forms.DecimalField(max_digits=12, decimal_places=2, label=_('Unit price'), localize=True) class Meta: model = InvoiceRow exclude = ['owner', 'uuid'] def __init__(self, *args, **kwargs): super(InvoiceRowForm, self).__init__(*args, **kwargs) self.fields['label'].widget.attrs['class'] = 'label-field' self.fields['proposal'].widget.attrs['class'] = 'proposal-field' self.fields['balance_payments'].widget.attrs['class'] = 'balance-payments-field' self.fields['category'].widget.attrs['class'] = 'category-field' self.fields['quantity'].widget.attrs['class'] = 'quantity-field' self.fields['unit_price'].widget.attrs['class'] = 'unit-price-field' self.fields['vat_rate'].widget.attrs['class'] = 'vat-rate-field' self.fields['detail'].widget.attrs['class'] = 'row-detail'
brianv0/sqlalchemy
refs/heads/master
lib/sqlalchemy/orm/interfaces.py
51
# orm/interfaces.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Contains various base classes used throughout the ORM. Defines some key base classes prominent within the internals, as well as the now-deprecated ORM extension classes. Other than the deprecated extensions, this module and the classes within are mostly private, though some attributes are exposed when inspecting mappings. """ from __future__ import absolute_import from .. import util from ..sql import operators from .base import (ONETOMANY, MANYTOONE, MANYTOMANY, EXT_CONTINUE, EXT_STOP, NOT_EXTENSION) from .base import (InspectionAttr, InspectionAttr, InspectionAttrInfo, _MappedAttribute) import collections from .. import inspect # imported later MapperExtension = SessionExtension = AttributeExtension = None __all__ = ( 'AttributeExtension', 'EXT_CONTINUE', 'EXT_STOP', 'ONETOMANY', 'MANYTOMANY', 'MANYTOONE', 'NOT_EXTENSION', 'LoaderStrategy', 'MapperExtension', 'MapperOption', 'MapperProperty', 'PropComparator', 'SessionExtension', 'StrategizedProperty', ) class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots): """Represent a particular class attribute mapped by :class:`.Mapper`. The most common occurrences of :class:`.MapperProperty` are the mapped :class:`.Column`, which is represented in a mapping as an instance of :class:`.ColumnProperty`, and a reference to another class produced by :func:`.relationship`, represented in the mapping as an instance of :class:`.RelationshipProperty`. """ __slots__ = ( '_configure_started', '_configure_finished', 'parent', 'key', 'info' ) cascade = frozenset() """The set of 'cascade' attribute names. This collection is checked before the 'cascade_iterator' method is called. The collection typically only applies to a RelationshipProperty. """ is_property = True """Part of the InspectionAttr interface; states this object is a mapper property. """ def _memoized_attr_info(self): """Info dictionary associated with the object, allowing user-defined data to be associated with this :class:`.InspectionAttr`. The dictionary is generated when first accessed. Alternatively, it can be specified as a constructor argument to the :func:`.column_property`, :func:`.relationship`, or :func:`.composite` functions. .. versionadded:: 0.8 Added support for .info to all :class:`.MapperProperty` subclasses. .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also available on extension types via the :attr:`.InspectionAttrInfo.info` attribute, so that it can apply to a wider variety of ORM and extension constructs. .. seealso:: :attr:`.QueryableAttribute.info` :attr:`.SchemaItem.info` """ return {} def setup(self, context, entity, path, adapter, **kwargs): """Called by Query for the purposes of constructing a SQL statement. Each MapperProperty associated with the target mapper processes the statement referenced by the query context, adding columns and/or criterion as appropriate. """ def create_row_processor(self, context, path, mapper, result, adapter, populators): """Produce row processing functions and append to the given set of populators lists. """ def cascade_iterator(self, type_, state, visited_instances=None, halt_on=None): """Iterate through instances related to the given instance for a particular 'cascade', starting with this MapperProperty. Return an iterator3-tuples (instance, mapper, state). Note that the 'cascade' collection on this MapperProperty is checked first for the given type before cascade_iterator is called. This method typically only applies to RelationshipProperty. """ return iter(()) def set_parent(self, parent, init): """Set the parent mapper that references this MapperProperty. This method is overridden by some subclasses to perform extra setup when the mapper is first known. """ self.parent = parent def instrument_class(self, mapper): """Hook called by the Mapper to the property to initiate instrumentation of the class attribute managed by this MapperProperty. The MapperProperty here will typically call out to the attributes module to set up an InstrumentedAttribute. This step is the first of two steps to set up an InstrumentedAttribute, and is called early in the mapper setup process. The second step is typically the init_class_attribute step, called from StrategizedProperty via the post_instrument_class() hook. This step assigns additional state to the InstrumentedAttribute (specifically the "impl") which has been determined after the MapperProperty has determined what kind of persistence management it needs to do (e.g. scalar, object, collection, etc). """ def __init__(self): self._configure_started = False self._configure_finished = False def init(self): """Called after all mappers are created to assemble relationships between mappers and perform other post-mapper-creation initialization steps. """ self._configure_started = True self.do_init() self._configure_finished = True @property def class_attribute(self): """Return the class-bound descriptor corresponding to this :class:`.MapperProperty`. This is basically a ``getattr()`` call:: return getattr(self.parent.class_, self.key) I.e. if this :class:`.MapperProperty` were named ``addresses``, and the class to which it is mapped is ``User``, this sequence is possible:: >>> from sqlalchemy import inspect >>> mapper = inspect(User) >>> addresses_property = mapper.attrs.addresses >>> addresses_property.class_attribute is User.addresses True >>> User.addresses.property is addresses_property True """ return getattr(self.parent.class_, self.key) def do_init(self): """Perform subclass-specific initialization post-mapper-creation steps. This is a template method called by the ``MapperProperty`` object's init() method. """ def post_instrument_class(self, mapper): """Perform instrumentation adjustments that need to occur after init() has completed. The given Mapper is the Mapper invoking the operation, which may not be the same Mapper as self.parent in an inheritance scenario; however, Mapper will always at least be a sub-mapper of self.parent. This method is typically used by StrategizedProperty, which delegates it to LoaderStrategy.init_class_attribute() to perform final setup on the class-bound InstrumentedAttribute. """ def merge(self, session, source_state, source_dict, dest_state, dest_dict, load, _recursive): """Merge the attribute represented by this ``MapperProperty`` from source to destination object. """ def __repr__(self): return '<%s at 0x%x; %s>' % ( self.__class__.__name__, id(self), getattr(self, 'key', 'no key')) class PropComparator(operators.ColumnOperators): """Defines SQL operators for :class:`.MapperProperty` objects. SQLAlchemy allows for operators to be redefined at both the Core and ORM level. :class:`.PropComparator` is the base class of operator redefinition for ORM-level operations, including those of :class:`.ColumnProperty`, :class:`.RelationshipProperty`, and :class:`.CompositeProperty`. .. note:: With the advent of Hybrid properties introduced in SQLAlchemy 0.7, as well as Core-level operator redefinition in SQLAlchemy 0.8, the use case for user-defined :class:`.PropComparator` instances is extremely rare. See :ref:`hybrids_toplevel` as well as :ref:`types_operators`. User-defined subclasses of :class:`.PropComparator` may be created. The built-in Python comparison and math operator methods, such as :meth:`.operators.ColumnOperators.__eq__`, :meth:`.operators.ColumnOperators.__lt__`, and :meth:`.operators.ColumnOperators.__add__`, can be overridden to provide new operator behavior. The custom :class:`.PropComparator` is passed to the :class:`.MapperProperty` instance via the ``comparator_factory`` argument. In each case, the appropriate subclass of :class:`.PropComparator` should be used:: # definition of custom PropComparator subclasses from sqlalchemy.orm.properties import \\ ColumnProperty,\\ CompositeProperty,\\ RelationshipProperty class MyColumnComparator(ColumnProperty.Comparator): def __eq__(self, other): return self.__clause_element__() == other class MyRelationshipComparator(RelationshipProperty.Comparator): def any(self, expression): "define the 'any' operation" # ... class MyCompositeComparator(CompositeProperty.Comparator): def __gt__(self, other): "redefine the 'greater than' operation" return sql.and_(*[a>b for a, b in zip(self.__clause_element__().clauses, other.__composite_values__())]) # application of custom PropComparator subclasses from sqlalchemy.orm import column_property, relationship, composite from sqlalchemy import Column, String class SomeMappedClass(Base): some_column = column_property(Column("some_column", String), comparator_factory=MyColumnComparator) some_relationship = relationship(SomeOtherClass, comparator_factory=MyRelationshipComparator) some_composite = composite( Column("a", String), Column("b", String), comparator_factory=MyCompositeComparator ) Note that for column-level operator redefinition, it's usually simpler to define the operators at the Core level, using the :attr:`.TypeEngine.comparator_factory` attribute. See :ref:`types_operators` for more detail. See also: :class:`.ColumnProperty.Comparator` :class:`.RelationshipProperty.Comparator` :class:`.CompositeProperty.Comparator` :class:`.ColumnOperators` :ref:`types_operators` :attr:`.TypeEngine.comparator_factory` """ __slots__ = 'prop', 'property', '_parententity', '_adapt_to_entity' def __init__(self, prop, parentmapper, adapt_to_entity=None): self.prop = self.property = prop self._parententity = adapt_to_entity or parentmapper self._adapt_to_entity = adapt_to_entity def __clause_element__(self): raise NotImplementedError("%r" % self) def _query_clause_element(self): return self.__clause_element__() def adapt_to_entity(self, adapt_to_entity): """Return a copy of this PropComparator which will use the given :class:`.AliasedInsp` to produce corresponding expressions. """ return self.__class__(self.prop, self._parententity, adapt_to_entity) @property def _parentmapper(self): """legacy; this is renamed to _parententity to be compatible with QueryableAttribute.""" return inspect(self._parententity).mapper @property def adapter(self): """Produce a callable that adapts column expressions to suit an aliased version of this comparator. """ if self._adapt_to_entity is None: return None else: return self._adapt_to_entity._adapt_element @property def info(self): return self.property.info @staticmethod def any_op(a, b, **kwargs): return a.any(b, **kwargs) @staticmethod def has_op(a, b, **kwargs): return a.has(b, **kwargs) @staticmethod def of_type_op(a, class_): return a.of_type(class_) def of_type(self, class_): """Redefine this object in terms of a polymorphic subclass. Returns a new PropComparator from which further criterion can be evaluated. e.g.:: query.join(Company.employees.of_type(Engineer)).\\ filter(Engineer.name=='foo') :param \class_: a class or mapper indicating that criterion will be against this specific subclass. """ return self.operate(PropComparator.of_type_op, class_) def any(self, criterion=None, **kwargs): """Return true if this collection contains any member that meets the given criterion. The usual implementation of ``any()`` is :meth:`.RelationshipProperty.Comparator.any`. :param criterion: an optional ClauseElement formulated against the member class' table or attributes. :param \**kwargs: key/value pairs corresponding to member class attribute names which will be compared via equality to the corresponding values. """ return self.operate(PropComparator.any_op, criterion, **kwargs) def has(self, criterion=None, **kwargs): """Return true if this element references a member which meets the given criterion. The usual implementation of ``has()`` is :meth:`.RelationshipProperty.Comparator.has`. :param criterion: an optional ClauseElement formulated against the member class' table or attributes. :param \**kwargs: key/value pairs corresponding to member class attribute names which will be compared via equality to the corresponding values. """ return self.operate(PropComparator.has_op, criterion, **kwargs) class StrategizedProperty(MapperProperty): """A MapperProperty which uses selectable strategies to affect loading behavior. There is a single strategy selected by default. Alternate strategies can be selected at Query time through the usage of ``StrategizedOption`` objects via the Query.options() method. The mechanics of StrategizedProperty are used for every Query invocation for every mapped attribute participating in that Query, to determine first how the attribute will be rendered in SQL and secondly how the attribute will retrieve a value from a result row and apply it to a mapped object. The routines here are very performance-critical. """ __slots__ = '_strategies', 'strategy' strategy_wildcard_key = None def _get_context_loader(self, context, path): load = None # use EntityRegistry.__getitem__()->PropRegistry here so # that the path is stated in terms of our base search_path = dict.__getitem__(path, self) # search among: exact match, "attr.*", "default" strategy # if any. for path_key in ( search_path._loader_key, search_path._wildcard_path_loader_key, search_path._default_path_loader_key ): if path_key in context.attributes: load = context.attributes[path_key] break return load def _get_strategy(self, key): try: return self._strategies[key] except KeyError: cls = self._strategy_lookup(*key) self._strategies[key] = self._strategies[ cls] = strategy = cls(self) return strategy def _get_strategy_by_cls(self, cls): return self._get_strategy(cls._strategy_keys[0]) def setup( self, context, entity, path, adapter, **kwargs): loader = self._get_context_loader(context, path) if loader and loader.strategy: strat = self._get_strategy(loader.strategy) else: strat = self.strategy strat.setup_query(context, entity, path, loader, adapter, **kwargs) def create_row_processor( self, context, path, mapper, result, adapter, populators): loader = self._get_context_loader(context, path) if loader and loader.strategy: strat = self._get_strategy(loader.strategy) else: strat = self.strategy strat.create_row_processor( context, path, loader, mapper, result, adapter, populators) def do_init(self): self._strategies = {} self.strategy = self._get_strategy_by_cls(self.strategy_class) def post_instrument_class(self, mapper): if not self.parent.non_primary and \ not mapper.class_manager._attr_has_impl(self.key): self.strategy.init_class_attribute(mapper) _all_strategies = collections.defaultdict(dict) @classmethod def strategy_for(cls, **kw): def decorate(dec_cls): # ensure each subclass of the strategy has its # own _strategy_keys collection if '_strategy_keys' not in dec_cls.__dict__: dec_cls._strategy_keys = [] key = tuple(sorted(kw.items())) cls._all_strategies[cls][key] = dec_cls dec_cls._strategy_keys.append(key) return dec_cls return decorate @classmethod def _strategy_lookup(cls, *key): for prop_cls in cls.__mro__: if prop_cls in cls._all_strategies: strategies = cls._all_strategies[prop_cls] try: return strategies[key] except KeyError: pass raise Exception("can't locate strategy for %s %s" % (cls, key)) class MapperOption(object): """Describe a modification to a Query.""" propagate_to_loaders = False """if True, indicate this option should be carried along to "secondary" Query objects produced during lazy loads or refresh operations. """ def process_query(self, query): """Apply a modification to the given :class:`.Query`.""" def process_query_conditionally(self, query): """same as process_query(), except that this option may not apply to the given query. This is typically used during a lazy load or scalar refresh operation to propagate options stated in the original Query to the new Query being used for the load. It occurs for those options that specify propagate_to_loaders=True. """ self.process_query(query) class LoaderStrategy(object): """Describe the loading behavior of a StrategizedProperty object. The ``LoaderStrategy`` interacts with the querying process in three ways: * it controls the configuration of the ``InstrumentedAttribute`` placed on a class to handle the behavior of the attribute. this may involve setting up class-level callable functions to fire off a select operation when the attribute is first accessed (i.e. a lazy load) * it processes the ``QueryContext`` at statement construction time, where it can modify the SQL statement that is being produced. For example, simple column attributes will add their represented column to the list of selected columns, a joined eager loader may establish join clauses to add to the statement. * It produces "row processor" functions at result fetching time. These "row processor" functions populate a particular attribute on a particular mapped instance. """ __slots__ = 'parent_property', 'is_class_level', 'parent', 'key' def __init__(self, parent): self.parent_property = parent self.is_class_level = False self.parent = self.parent_property.parent self.key = self.parent_property.key def init_class_attribute(self, mapper): pass def setup_query(self, context, entity, path, loadopt, adapter, **kwargs): """Establish column and other state for a given QueryContext. This method fulfills the contract specified by MapperProperty.setup(). StrategizedProperty delegates its setup() method directly to this method. """ def create_row_processor(self, context, path, loadopt, mapper, result, adapter, populators): """Establish row processing functions for a given QueryContext. This method fulfills the contract specified by MapperProperty.create_row_processor(). StrategizedProperty delegates its create_row_processor() method directly to this method. """ def __str__(self): return str(self.parent_property)
osh/gnuradio
refs/heads/master
docs/sphinx/hieroglyph/test/test_comments.py
72
import unittest from hieroglyph.hieroglyph import parse_hieroglyph_text from hieroglyph.errors import HieroglyphError class CommentTests(unittest.TestCase): def test_comment1(self): source = """Fetches rows from a Bigtable. This is a continuation of the opening paragraph. Retrieves rows pertaining to the given keys from the Table instance represented by big_table. Silly things may happen if other_silly_variable is not None. Args: big_table: An open Bigtable Table instance. keys: A sequence of strings representing the key of each table row to fetch. other_silly_variable (str): Another optional variable, that has a much longer name than the other args, and which does nothing. Returns: A dict mapping keys to the corresponding table row data fetched. Each row is represented as a tuple of strings. For example: {'Serak': ('Rigel VII', 'Preparer'), 'Zim': ('Irk', 'Invader'), 'Lrrr': ('Omicron Persei 8', 'Emperor')} If a key from the keys argument is missing from the dictionary, then that row was not found in the table. Raises: IOError: An error occurred accessing the bigtable.Table object. """ expected = """ Fetches rows from a Bigtable. This is a continuation of the opening paragraph. Retrieves rows pertaining to the given keys from the Table instance represented by big_table. Silly things may happen if other_silly_variable is not None. :param big_table: An open Bigtable Table instance. :param keys: A sequence of strings representing the key of each table row to fetch. :param other_silly_variable: Another optional variable, that has a much longer name than the other args, and which does nothing. :type other_silly_variable: str :returns: A dict mapping keys to the corresponding table row data fetched. Each row is represented as a tuple of strings. For example: {'Serak': ('Rigel VII', 'Preparer'), 'Zim': ('Irk', 'Invader'), 'Lrrr': ('Omicron Persei 8', 'Emperor')} If a key from the keys argument is missing from the dictionary, then that row was not found in the table. :raises: IOError - An error occurred accessing the bigtable.Table object. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment2(self): source = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. Note: This method uses immediate execution. Args: predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. Returns: True if all elements in the sequence meet the predicate condition, otherwise False. Raises: ValueError: If the Queryable is closed() TypeError: If predicate is not callable. """ expected = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. .. note:: This method uses immediate execution. :param predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. :returns: True if all elements in the sequence meet the predicate condition, otherwise False. :raises: * ValueError - If the Queryable is closed() * TypeError - If predicate is not callable. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment3(self): source = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. Note: This method uses immediate execution. Args: predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. Returns: True if all elements in the sequence meet the predicate condition, otherwise False. Raises: ValueError: If the Queryable is closed() TypeError: If predicate is not callable. """ expected = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. .. note:: This method uses immediate execution. :param predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. :returns: True if all elements in the sequence meet the predicate condition, otherwise False. :raises: * ValueError - If the Queryable is closed() * TypeError - If predicate is not callable. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment4(self): source_lines = [u'Determine if all elements in the source sequence satisfy a condition.', u'', u'All of the source sequence will be consumed.', u'', u'Note: This method uses immediate execution.', u'', u'Args:', u' predicate: An optional single argument function used to test each', u' elements. If omitted, the bool() function is used resulting in', u' the elements being tested directly.', u'', u'Returns:', u' True if all elements in the sequence meet the predicate condition,', u' otherwise False.', u'', u'Raises:', u' ValueError: If the Queryable is closed()', u' TypeError: If predicate is not callable.', u''] expected = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. .. note:: This method uses immediate execution. :param predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. :returns: True if all elements in the sequence meet the predicate condition, otherwise False. :raises: * ValueError - If the Queryable is closed() * TypeError - If predicate is not callable. """ actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment5(self): source_lines = [u'An empty Queryable.', u'', u'Note: The same empty instance will be returned each time.', u'', u'Returns: A Queryable over an empty sequence.', u''] expected = """An empty Queryable. .. note:: The same empty instance will be returned each time. :returns: A Queryable over an empty sequence. """ actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment6(self): source_lines = [u'A convenience factory for creating Records.', u'', u'Args:', u' **kwargs: Each keyword argument will be used to initialise an', u' attribute with the same name as the argument and the given', u' value.', u'', u'Returns:', u' A Record which has a named attribute for each of the keyword arguments.', u''] expected = """A convenience factory for creating Records. :param \*\*kwargs: Each keyword argument will be used to initialise an attribute with the same name as the argument and the given value. :returns: A Record which has a named attribute for each of the keyword arguments. """ actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment7(self): source = """Projects each element of a sequence to an intermediate new sequence, flattens the resulting sequences into one sequence and optionally transforms the flattened sequence using a selector function. Note: This method uses deferred execution. Args: collection_selector: A unary function mapping each element of the source iterable into an intermediate sequence. The single argument of the collection_selector is the value of an element from the source sequence. The return value should be an iterable derived from that element value. The default collection_selector, which is the identity function, assumes that each element of the source sequence is itself iterable. result_selector: An optional unary function mapping the elements in the flattened intermediate sequence to corresponding elements of the result sequence. The single argument of the result_selector is the value of an element from the flattened intermediate sequence. The return value should be the corresponding value in the result sequence. The default result_selector is the identity function. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If either collection_selector or result_selector are not callable. """ expected = """ Projects each element of a sequence to an intermediate new sequence, flattens the resulting sequences into one sequence and optionally transforms the flattened sequence using a selector function. .. note:: This method uses deferred execution. :param collection_selector: A unary function mapping each element of the source iterable into an intermediate sequence. The single argument of the collection_selector is the value of an element from the source sequence. The return value should be an iterable derived from that element value. The default collection_selector, which is the identity function, assumes that each element of the source sequence is itself iterable. :param result_selector: An optional unary function mapping the elements in the flattened intermediate sequence to corresponding elements of the result sequence. The single argument of the result_selector is the value of an element from the flattened intermediate sequence. The return value should be the corresponding value in the result sequence. The default result_selector is the identity function. :returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. :raises: * ValueError - If this Queryable has been closed. * TypeError - If either collection_selector or result_selector are not callable. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment8(self): source = """A convenience factory for creating Records. Args: **kwargs: Each keyword argument will be used to initialise an attribute with the same name as the argument and the given value. Returns: A Record which has a named attribute for each of the keyword arguments. """ expected = """A convenience factory for creating Records. :param \*\*kwargs: Each keyword argument will be used to initialise an attribute with the same name as the argument and the given value. :returns: A Record which has a named attribute for each of the keyword arguments. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment9(self): source_lines = [u'Parse a single line of a tree to determine depth and node.', u'', u'Args:', u' This line is missing an argument name.', u' ', u'Returns:', u' A 2-tuple containing the tree 0 based tree depth as the first', u' element and the node description as the second element.', u'', u'Raises:', u' ValueError: If line does not have the expected form.', u''] self.assertRaises(HieroglyphError, lambda: parse_hieroglyph_text(source_lines)) def test_comment10(self): source = """ Execute the command described by concatenating the string function arguments with the p4 -s global scripting flag and return the results in a dictionary. For example, to run the command:: p4 -s fstat -T depotFile foo.h call:: p4('fstat', '-T', 'depotFile', 'foo.h') Args: args: The arguments to the p4 command as a list of objects which will be converted to strings. Returns: A dictionary of lists where each key in the dictionary is the field name from the command output, and each value is a list of output lines in order. Raises: PerforceError: If the command could not be run or if the command reported an error. """ expected = """ Execute the command described by concatenating the string function arguments with the p4 -s global scripting flag and return the results in a dictionary. For example, to run the command:: p4 -s fstat -T depotFile foo.h call:: p4('fstat', '-T', 'depotFile', 'foo.h') :param args: The arguments to the p4 command as a list of objects which will be converted to strings. :returns: A dictionary of lists where each key in the dictionary is the field name from the command output, and each value is a list of output lines in order. :raises: PerforceError - If the command could not be run or if the command reported an error. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment11(self): source = """Projects each element of a sequence to an intermediate new sequence, flattens the resulting sequences into one sequence and optionally transforms the flattened sequence using a selector function. Warning: This method may explode at short notice. Args: collection_selector: A unary function mapping each element of the source iterable into an intermediate sequence. The single argument of the collection_selector is the value of an element from the source sequence. The return value should be an iterable derived from that element value. The default collection_selector, which is the identity function, assumes that each element of the source sequence is itself iterable. result_selector: An optional unary function mapping the elements in the flattened intermediate sequence to corresponding elements of the result sequence. The single argument of the result_selector is the value of an element from the flattened intermediate sequence. The return value should be the corresponding value in the result sequence. The default result_selector is the identity function. Returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. Raises: ValueError: If this Queryable has been closed. TypeError: If either collection_selector or result_selector are not callable. """ expected = """ Projects each element of a sequence to an intermediate new sequence, flattens the resulting sequences into one sequence and optionally transforms the flattened sequence using a selector function. .. warning:: This method may explode at short notice. :param collection_selector: A unary function mapping each element of the source iterable into an intermediate sequence. The single argument of the collection_selector is the value of an element from the source sequence. The return value should be an iterable derived from that element value. The default collection_selector, which is the identity function, assumes that each element of the source sequence is itself iterable. :param result_selector: An optional unary function mapping the elements in the flattened intermediate sequence to corresponding elements of the result sequence. The single argument of the result_selector is the value of an element from the flattened intermediate sequence. The return value should be the corresponding value in the result sequence. The default result_selector is the identity function. :returns: A Queryable over a generated sequence whose elements are the result of applying the one-to-many collection_selector to each element of the source sequence, concatenating the results into an intermediate sequence, and then mapping each of those elements through the result_selector into the result sequence. :raises: * ValueError - If this Queryable has been closed. * TypeError - If either collection_selector or result_selector are not callable. """ source_lines = source.splitlines() actual_lines = parse_hieroglyph_text(source_lines) expected_lines = expected.splitlines() self.assertEqual(len(actual_lines), len(expected_lines)) for actual_line, result_line in zip(actual_lines, expected_lines): if len(actual_line.strip()) == 0: self.assertTrue(len(result_line.strip()) == 0) else: self.assertEqual(actual_line, result_line) def test_comment12(self): source = """Determine if all elements in the source sequence satisfy a condition. All of the source sequence will be consumed. Note: This method uses immediate execution. Args: predicate: An optional single argument function used to test each elements. If omitted, the bool() function is used resulting in the elements being tested directly. Returns: True if all elements in the sequence meet the predicate condition, otherwise False. Raises: This is not a proper exception description """ source_lines = source.splitlines() self.assertRaises(HieroglyphError, lambda: parse_hieroglyph_text(source_lines))
NCI-Cloud/horizon
refs/heads/nci/kilo
horizon/conf/__init__.py
77
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from django.utils.functional import empty # noqa from django.utils.functional import LazyObject # noqa class LazySettings(LazyObject): def _setup(self, name=None): from django.conf import settings from horizon.conf.default import HORIZON_CONFIG as DEFAULT_CONFIG # noqa HORIZON_CONFIG = copy.copy(DEFAULT_CONFIG) HORIZON_CONFIG.update(settings.HORIZON_CONFIG) # Ensure we always have our exception configuration... for exc_category in ['unauthorized', 'not_found', 'recoverable']: if exc_category not in HORIZON_CONFIG['exceptions']: default_exc_config = DEFAULT_CONFIG['exceptions'][exc_category] HORIZON_CONFIG['exceptions'][exc_category] = default_exc_config # Ensure our password validator always exists... if 'regex' not in HORIZON_CONFIG['password_validator']: default_pw_regex = DEFAULT_CONFIG['password_validator']['regex'] HORIZON_CONFIG['password_validator']['regex'] = default_pw_regex if 'help_text' not in HORIZON_CONFIG['password_validator']: default_pw_help = DEFAULT_CONFIG['password_validator']['help_text'] HORIZON_CONFIG['password_validator']['help_text'] = default_pw_help self._wrapped = HORIZON_CONFIG def __getitem__(self, name, fallback=None): if self._wrapped is empty: self._setup(name) return self._wrapped.get(name, fallback) HORIZON_CONFIG = LazySettings()
xq262144/hue
refs/heads/master
desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl3/shared/date_time.py
55
# file openpyxl/shared/date_time.py # Copyright (c) 2010 openpyxl # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # @license: http://www.opensource.org/licenses/mit-license.php # @author: Eric Gazoni """Manage Excel date weirdness.""" # Python stdlib imports from math import floor import calendar import datetime import time import re # constants W3CDTF_FORMAT = '%Y-%m-%dT%H:%M:%SZ' RE_W3CDTF = '(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(.(\d{2}))?Z' EPOCH = datetime.datetime.utcfromtimestamp(0) def datetime_to_W3CDTF(dt): """Convert from a datetime to a timestamp string.""" return datetime.datetime.strftime(dt, W3CDTF_FORMAT) def W3CDTF_to_datetime(formatted_string): """Convert from a timestamp string to a datetime object.""" match = re.match(RE_W3CDTF,formatted_string) digits = list(map(int, match.groups()[:6])) return datetime.datetime(*digits) class SharedDate(object): """Date formatting utilities for Excel with shared state. Excel has a two primary date tracking schemes: Windows - Day 1 == 1900-01-01 Mac - Day 1 == 1904-01-01 SharedDate stores which system we are using and converts dates between Python and Excel accordingly. """ CALENDAR_WINDOWS_1900 = 1900 CALENDAR_MAC_1904 = 1904 datetime_object_type = 'DateTime' def __init__(self): self.excel_base_date = self.CALENDAR_WINDOWS_1900 def datetime_to_julian(self, date): """Convert from python datetime to excel julian date representation.""" if isinstance(date, datetime.datetime): return self.to_julian(date.year, date.month, date.day, \ hours=date.hour, minutes=date.minute, seconds=date.second) elif isinstance(date, datetime.date): return self.to_julian(date.year, date.month, date.day) def to_julian(self, year, month, day, hours=0, minutes=0, seconds=0): """Convert from Python date to Excel JD.""" # explicitly disallow bad years # Excel 2000 treats JD=0 as 1/0/1900 (buggy, disallow) # Excel 2000 treats JD=2958466 as a bad date (Y10K bug!) if year < 1900 or year > 10000: msg = 'Year not supported by Excel: %s' % year raise ValueError(msg) if self.excel_base_date == self.CALENDAR_WINDOWS_1900: # Fudge factor for the erroneous fact that the year 1900 is # treated as a Leap Year in MS Excel. This affects every date # following 28th February 1900 if year == 1900 and month <= 2: excel_1900_leap_year = False else: excel_1900_leap_year = True excel_base_date = 2415020 else: raise NotImplementedError('Mac dates are not yet supported.') #excel_base_date = 2416481 #excel_1900_leap_year = False # Julian base date adjustment if month > 2: month = month - 3 else: month = month + 9 year -= 1 # Calculate the Julian Date, then subtract the Excel base date # JD 2415020 = 31 - Dec - 1899 -> Excel Date of 0 century, decade = int(str(year)[:2]), int(str(year)[2:]) excel_date = floor(146097 * century / 4) + \ floor((1461 * decade) / 4) + floor((153 * month + 2) / 5) + \ day + 1721119 - excel_base_date if excel_1900_leap_year: excel_date += 1 # check to ensure that we exclude 2/29/1900 as a possible value if self.excel_base_date == self.CALENDAR_WINDOWS_1900 \ and excel_date == 60: msg = 'Error: Excel believes 1900 was a leap year' raise ValueError(msg) excel_time = ((hours * 3600) + (minutes * 60) + seconds) / 86400 return excel_date + excel_time def from_julian(self, value=0): """Convert from the Excel JD back to a date""" if self.excel_base_date == self.CALENDAR_WINDOWS_1900: excel_base_date = 25569 if value < 60: excel_base_date -= 1 elif value == 60: msg = 'Error: Excel believes 1900 was a leap year' raise ValueError(msg) else: raise NotImplementedError('Mac dates are not yet supported.') #excel_base_date = 24107 if value >= 1: utc_days = value - excel_base_date return EPOCH + datetime.timedelta(days=utc_days) elif value >= 0: hours = floor(value * 24) mins = floor(value * 24 * 60) - floor(hours * 60) secs = floor(value * 24 * 60 * 60) - floor(hours * 60 * 60) - \ floor(mins * 60) return datetime.time(int(hours), int(mins), int(secs)) else: msg = 'Negative dates (%s) are not supported' % value raise ValueError(msg)
crcresearch/osf.io
refs/heads/develop
api/tokens/__init__.py
12133432
waustin/django-simple-faq
refs/heads/master
faq/__init__.py
12133432
tttthemanCorp/CardmeleonAppEngine
refs/heads/master
django/db/backends/postgresql/client.py
638
import os import sys from django.db.backends import BaseDatabaseClient class DatabaseClient(BaseDatabaseClient): executable_name = 'psql' def runshell(self): settings_dict = self.connection.settings_dict args = [self.executable_name] if settings_dict['USER']: args += ["-U", settings_dict['USER']] if settings_dict['HOST']: args.extend(["-h", settings_dict['HOST']]) if settings_dict['PORT']: args.extend(["-p", str(settings_dict['PORT'])]) args += [settings_dict['NAME']] if os.name == 'nt': sys.exit(os.system(" ".join(args))) else: os.execvp(self.executable_name, args)
bsmr-erlang/otp
refs/heads/maint
lib/asn1/test/asn1_SUITE_data/Seq.py
92
Seq DEFINITIONS IMPLICIT TAGS ::= BEGIN IMPORTS Set1 FROM SeqSetLib; Seq ::= SEQUENCE { bool BOOLEAN, boolCon [20] BOOLEAN, boolPri [PRIVATE 21] BOOLEAN, boolApp [APPLICATION 22] BOOLEAN, boolExpCon [30] EXPLICIT BOOLEAN, boolExpPri [PRIVATE 31] EXPLICIT BOOLEAN, boolExpApp [APPLICATION 32] EXPLICIT BOOLEAN } Seq1 ::= SEQUENCE { bool1 BOOLEAN, int1 INTEGER, seq1 SeqIn } Seq2 ::= SEQUENCE { seq2 SeqIn, bool2 BOOLEAN, int2 INTEGER } Seq3 ::= SEQUENCE { bool3 BOOLEAN, seq3 SeqIn, int3 INTEGER } Seq4 ::= SEQUENCE { seq41 SeqIn, seq42 SeqIn, seq43 SeqIn } SeqDef1 ::= SET { bool1 BOOLEAN DEFAULT TRUE, int1 INTEGER, seq1 SeqIn DEFAULT {} } SeqDef2 ::= SET { seq2 SeqIn DEFAULT {}, bool2 BOOLEAN, int2 INTEGER } SeqDef3 ::= SET { bool3 BOOLEAN DEFAULT TRUE, seq3 SeqIn DEFAULT {}, int3 INTEGER DEFAULT 17 } SeqOpt1 ::= SET { bool1 BOOLEAN OPTIONAL, int1 INTEGER, seq1 SeqIn OPTIONAL } SeqOpt2 ::= SET { seq2 SeqIn OPTIONAL, bool2 BOOLEAN, int2 INTEGER } SeqOpt3 ::= SET { bool3 BOOLEAN OPTIONAL, seq3 SeqIn OPTIONAL, int3 INTEGER OPTIONAL } SeqIn ::= SEQUENCE { boolIn BOOLEAN OPTIONAL, intIn INTEGER DEFAULT 12 } SeqS1 ::= SEQUENCE { boolS1 BOOLEAN, intS1 INTEGER, seqS1 SEQUENCE { boolIn BOOLEAN, intIn INTEGER } } SeqS2 ::= SEQUENCE { seqS2 SEQUENCE { boolIn BOOLEAN, intIn INTEGER }, boolS2 BOOLEAN, intS2 INTEGER } SeqS3 ::= SEQUENCE { boolS3 BOOLEAN, seqS3 SEQUENCE { boolIn BOOLEAN, intIn INTEGER }, intS3 INTEGER } SeqImp1 ::= SET { set Set1, bool BOOLEAN, int INTEGER } SeqImp2 ::= SET { bool BOOLEAN, set Set1, int INTEGER } SeqImp3 ::= SET { bool BOOLEAN, int INTEGER, set Set1 } SeqCompOf ::= SEQUENCE { ..., COMPONENTS OF SeqS3 } END
FinalAngel/django-cms
refs/heads/release/3.4.x
cms/test_utils/project/pluginapp/plugins/style/cms_plugins.py
7
# -*- coding: utf-8 -*- from cms.plugin_pool import plugin_pool from cms.plugin_base import CMSPluginBase from .models import Style class StylePlugin(CMSPluginBase): model = Style name = 'Style' render_template = 'pluginapp/style/style.html' allow_children = True fieldsets = ( (None, { 'fields': ( 'label', ('class_name', 'tag_type'), ) }), ('Advanced settings', { 'classes': ('collapse',), 'fields': ( 'additional_classes', ), }), ) plugin_pool.register_plugin(StylePlugin)
bruderstein/PythonScript
refs/heads/master
PythonLib/full/encodings/iso8859_7.py
272
""" Python Character Mapping Codec iso8859_7 generated from 'MAPPINGS/ISO8859/8859-7.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-7', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\u2018' # 0xA1 -> LEFT SINGLE QUOTATION MARK '\u2019' # 0xA2 -> RIGHT SINGLE QUOTATION MARK '\xa3' # 0xA3 -> POUND SIGN '\u20ac' # 0xA4 -> EURO SIGN '\u20af' # 0xA5 -> DRACHMA SIGN '\xa6' # 0xA6 -> BROKEN BAR '\xa7' # 0xA7 -> SECTION SIGN '\xa8' # 0xA8 -> DIAERESIS '\xa9' # 0xA9 -> COPYRIGHT SIGN '\u037a' # 0xAA -> GREEK YPOGEGRAMMENI '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xac' # 0xAC -> NOT SIGN '\xad' # 0xAD -> SOFT HYPHEN '\ufffe' '\u2015' # 0xAF -> HORIZONTAL BAR '\xb0' # 0xB0 -> DEGREE SIGN '\xb1' # 0xB1 -> PLUS-MINUS SIGN '\xb2' # 0xB2 -> SUPERSCRIPT TWO '\xb3' # 0xB3 -> SUPERSCRIPT THREE '\u0384' # 0xB4 -> GREEK TONOS '\u0385' # 0xB5 -> GREEK DIALYTIKA TONOS '\u0386' # 0xB6 -> GREEK CAPITAL LETTER ALPHA WITH TONOS '\xb7' # 0xB7 -> MIDDLE DOT '\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS '\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS '\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF '\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS '\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS '\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS '\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA '\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA '\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA '\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA '\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON '\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA '\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA '\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA '\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA '\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA '\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA '\u039c' # 0xCC -> GREEK CAPITAL LETTER MU '\u039d' # 0xCD -> GREEK CAPITAL LETTER NU '\u039e' # 0xCE -> GREEK CAPITAL LETTER XI '\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON '\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI '\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO '\ufffe' '\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA '\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU '\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON '\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI '\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI '\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI '\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA '\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA '\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA '\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS '\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS '\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS '\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS '\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS '\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA '\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA '\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA '\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA '\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON '\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA '\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA '\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA '\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA '\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA '\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA '\u03bc' # 0xEC -> GREEK SMALL LETTER MU '\u03bd' # 0xED -> GREEK SMALL LETTER NU '\u03be' # 0xEE -> GREEK SMALL LETTER XI '\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON '\u03c0' # 0xF0 -> GREEK SMALL LETTER PI '\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO '\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA '\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA '\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU '\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON '\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI '\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI '\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI '\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA '\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA '\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA '\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS '\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS '\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS '\ufffe' ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
michael-dev2rights/ansible
refs/heads/ansible-d2r
lib/ansible/modules/cloud/centurylink/clc_server_snapshot.py
56
#!/usr/bin/python # # Copyright (c) 2015 CenturyLink # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: clc_server_snapshot short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud. description: - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud. version_added: "2.0" options: server_ids: description: - The list of CLC server Ids. required: True expiration_days: description: - The number of days to keep the server snapshot before it expires. default: 7 required: False state: description: - The state to insure that the provided resources are in. default: 'present' required: False choices: ['present', 'absent', 'restore'] wait: description: - Whether to wait for the provisioning tasks to finish before returning. default: True required: False choices: [True, False] requirements: - python = 2.7 - requests >= 2.5.0 - clc-sdk author: "CLC Runner (@clc-runner)" notes: - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. ''' EXAMPLES = ''' # Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - name: Create server snapshot clc_server_snapshot: server_ids: - UC1TEST-SVR01 - UC1TEST-SVR02 expiration_days: 10 wait: True state: present - name: Restore server snapshot clc_server_snapshot: server_ids: - UC1TEST-SVR01 - UC1TEST-SVR02 wait: True state: restore - name: Delete server snapshot clc_server_snapshot: server_ids: - UC1TEST-SVR01 - UC1TEST-SVR02 wait: True state: absent ''' RETURN = ''' server_ids: description: The list of server ids that are changed returned: success type: list sample: [ "UC1TEST-SVR01", "UC1TEST-SVR02" ] ''' __version__ = '${version}' import os from distutils.version import LooseVersion try: import requests except ImportError: REQUESTS_FOUND = False else: REQUESTS_FOUND = True # # Requires the clc-python-sdk. # sudo pip install clc-sdk # try: import clc as clc_sdk from clc import CLCException except ImportError: CLC_FOUND = False clc_sdk = None else: CLC_FOUND = True from ansible.module_utils.basic import AnsibleModule class ClcSnapshot: clc = clc_sdk module = None def __init__(self, module): """ Construct module """ self.module = module if not CLC_FOUND: self.module.fail_json( msg='clc-python-sdk required for this module') if not REQUESTS_FOUND: self.module.fail_json( msg='requests library is required for this module') if requests.__version__ and LooseVersion( requests.__version__) < LooseVersion('2.5.0'): self.module.fail_json( msg='requests library version should be >= 2.5.0') self._set_user_agent(self.clc) def process_request(self): """ Process the request - Main Code Path :return: Returns with either an exit_json or fail_json """ p = self.module.params server_ids = p['server_ids'] expiration_days = p['expiration_days'] state = p['state'] request_list = [] changed = False changed_servers = [] self._set_clc_credentials_from_env() if state == 'present': changed, request_list, changed_servers = self.ensure_server_snapshot_present( server_ids=server_ids, expiration_days=expiration_days) elif state == 'absent': changed, request_list, changed_servers = self.ensure_server_snapshot_absent( server_ids=server_ids) elif state == 'restore': changed, request_list, changed_servers = self.ensure_server_snapshot_restore( server_ids=server_ids) self._wait_for_requests_to_complete(request_list) return self.module.exit_json( changed=changed, server_ids=changed_servers) def ensure_server_snapshot_present(self, server_ids, expiration_days): """ Ensures the given set of server_ids have the snapshots created :param server_ids: The list of server_ids to create the snapshot :param expiration_days: The number of days to keep the snapshot :return: (changed, request_list, changed_servers) changed: A flag indicating whether any change was made request_list: the list of clc request objects from CLC API call changed_servers: The list of servers ids that are modified """ request_list = [] changed = False servers = self._get_servers_from_clc( server_ids, 'Failed to obtain server list from the CLC API') servers_to_change = [ server for server in servers if len( server.GetSnapshots()) == 0] for server in servers_to_change: changed = True if not self.module.check_mode: request = self._create_server_snapshot(server, expiration_days) request_list.append(request) changed_servers = [ server.id for server in servers_to_change if server.id] return changed, request_list, changed_servers def _create_server_snapshot(self, server, expiration_days): """ Create the snapshot for the CLC server :param server: the CLC server object :param expiration_days: The number of days to keep the snapshot :return: the create request object from CLC API Call """ result = None try: result = server.CreateSnapshot( delete_existing=True, expiration_days=expiration_days) except CLCException as ex: self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format( server.id, ex.response_text )) return result def ensure_server_snapshot_absent(self, server_ids): """ Ensures the given set of server_ids have the snapshots removed :param server_ids: The list of server_ids to delete the snapshot :return: (changed, request_list, changed_servers) changed: A flag indicating whether any change was made request_list: the list of clc request objects from CLC API call changed_servers: The list of servers ids that are modified """ request_list = [] changed = False servers = self._get_servers_from_clc( server_ids, 'Failed to obtain server list from the CLC API') servers_to_change = [ server for server in servers if len( server.GetSnapshots()) > 0] for server in servers_to_change: changed = True if not self.module.check_mode: request = self._delete_server_snapshot(server) request_list.append(request) changed_servers = [ server.id for server in servers_to_change if server.id] return changed, request_list, changed_servers def _delete_server_snapshot(self, server): """ Delete snapshot for the CLC server :param server: the CLC server object :return: the delete snapshot request object from CLC API """ result = None try: result = server.DeleteSnapshot() except CLCException as ex: self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format( server.id, ex.response_text )) return result def ensure_server_snapshot_restore(self, server_ids): """ Ensures the given set of server_ids have the snapshots restored :param server_ids: The list of server_ids to delete the snapshot :return: (changed, request_list, changed_servers) changed: A flag indicating whether any change was made request_list: the list of clc request objects from CLC API call changed_servers: The list of servers ids that are modified """ request_list = [] changed = False servers = self._get_servers_from_clc( server_ids, 'Failed to obtain server list from the CLC API') servers_to_change = [ server for server in servers if len( server.GetSnapshots()) > 0] for server in servers_to_change: changed = True if not self.module.check_mode: request = self._restore_server_snapshot(server) request_list.append(request) changed_servers = [ server.id for server in servers_to_change if server.id] return changed, request_list, changed_servers def _restore_server_snapshot(self, server): """ Restore snapshot for the CLC server :param server: the CLC server object :return: the restore snapshot request object from CLC API """ result = None try: result = server.RestoreSnapshot() except CLCException as ex: self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format( server.id, ex.response_text )) return result def _wait_for_requests_to_complete(self, requests_lst): """ Waits until the CLC requests are complete if the wait argument is True :param requests_lst: The list of CLC request objects :return: none """ if not self.module.params['wait']: return for request in requests_lst: request.WaitUntilComplete() for request_details in request.requests: if request_details.Status() != 'succeeded': self.module.fail_json( msg='Unable to process server snapshot request') @staticmethod def define_argument_spec(): """ This function defines the dictionary object required for package module :return: the package dictionary object """ argument_spec = dict( server_ids=dict(type='list', required=True), expiration_days=dict(default=7), wait=dict(default=True), state=dict( default='present', choices=[ 'present', 'absent', 'restore']), ) return argument_spec def _get_servers_from_clc(self, server_list, message): """ Internal function to fetch list of CLC server objects from a list of server ids :param server_list: The list of server ids :param message: The error message to throw in case of any error :return the list of CLC server objects """ try: return self.clc.v2.Servers(server_list).servers except CLCException as ex: return self.module.fail_json(msg=message + ': %s' % ex) def _set_clc_credentials_from_env(self): """ Set the CLC Credentials on the sdk by reading environment variables :return: none """ env = os.environ v2_api_token = env.get('CLC_V2_API_TOKEN', False) v2_api_username = env.get('CLC_V2_API_USERNAME', False) v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) clc_alias = env.get('CLC_ACCT_ALIAS', False) api_url = env.get('CLC_V2_API_URL', False) if api_url: self.clc.defaults.ENDPOINT_URL_V2 = api_url if v2_api_token and clc_alias: self.clc._LOGIN_TOKEN_V2 = v2_api_token self.clc._V2_ENABLED = True self.clc.ALIAS = clc_alias elif v2_api_username and v2_api_passwd: self.clc.v2.SetCredentials( api_username=v2_api_username, api_passwd=v2_api_passwd) else: return self.module.fail_json( msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " "environment variables") @staticmethod def _set_user_agent(clc): if hasattr(clc, 'SetRequestsSession'): agent_string = "ClcAnsibleModule/" + __version__ ses = requests.Session() ses.headers.update({"Api-Client": agent_string}) ses.headers['User-Agent'] += " " + agent_string clc.SetRequestsSession(ses) def main(): """ Main function :return: None """ module = AnsibleModule( argument_spec=ClcSnapshot.define_argument_spec(), supports_check_mode=True ) clc_snapshot = ClcSnapshot(module) clc_snapshot.process_request() if __name__ == '__main__': main()
carltongibson/django-allauth
refs/heads/master
manage.py
74
#!/usr/bin/env python import os os.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings' from django.core import management if __name__ == "__main__": management.execute_from_command_line()
sumerc/yappi
refs/heads/master
tests/save_stats.py
2
import time import yappi import _yappi timings = {"a_1":4, "b_1":1} _yappi._set_test_timings(timings) def profile(func): def wrapped(*args, **kwargs): yappi.start() result = func(*args, **kwargs) yappi.stop() prof_file = "%s.%s" % (func.__name__, time.time()) #prof_file = "callgrind.a.1" yappi.get_func_stats().save(prof_file, "ystat") return result return wrapped def b(): pass @profile def a(): b() a()