code
stringlengths
10
805k
def_use_chains
sequencelengths
0
667
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging from webkitpy.tool.steps.abstractstep import AbstractStep from webkitpy.tool.steps.options import Options _log = logging.getLogger(__name__) class Update(AbstractStep): @classmethod def options(cls): return AbstractStep.options() + [ Options.non_interactive, Options.update, Options.quiet, ] def run(self, state): if not self._options.update: return _log.info("Updating working directory") self._tool.executive.run_and_throw_if_fail(self._update_command(), quiet=self._options.quiet, cwd=self._tool.scm().checkout_root) def _update_command(self): update_command = self._tool.deprecated_port().update_webkit_command(self._options.non_interactive) return update_command
[ [ [ 1540, 1547 ], [ 1663, 1670 ] ], [ [ 1594, 1606 ], [ 1706, 1718 ], [ 1775, 1787 ] ], [ [ 1647, 1654 ], [ 1814, 1821 ], [ 1851, 1858 ], [ 1879, 1886 ] ], [ [ 1656, 1660 ], [ 1995, 1999 ] ], [ [ 1699, 1705 ] ] ]
# Daniel Mc Callion # This program prints the summer months months = ("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December") summer = months[4:7] for month in summer: print(month)
[ [ [ 61, 67 ], [ 204, 210 ] ], [ [ 195, 201 ], [ 230, 236 ] ], [ [ 221, 226 ], [ 248, 253 ] ] ]
class FieldC(): def __init__(self, name, column_type, primary_key, default): self.name = name self.column_type = column_type self.primary_key = primary_key self.default = default def __str__(self): return '<%s, %s:%s>' % (self.__class__.__name__, self.column_type, self.name) class StringFieldC(FieldC): def __init__(self, name=None, primary_key=False, default=None, ddl='varchat(255)'): super().__init__(name, ddl, primary_key, default) class TinyIntFieldC(FieldC): def __init__(self, name=None, default=0): super().__init__(name, 'tinyint', False, default) class IntFieldC(FieldC): def __init__(self, name=None, primary_key=False, default=0): super().__init__(name, 'int', primary_key, default) class BigIntFieldC(FieldC): def __init__(self, name=None, primary_key=False, default=0): super().__init__(name, 'bigint', primary_key, default) class DoubleFieldC(FieldC): def __init__(self, name=None, primary_key=False, default=0.0): super().__init__(name, 'double', primary_key, default) class TextFieldC(FieldC): def __init__(self, name=None, default=None): super().__init__(name, 'text', False, default)
[ [ [ 6, 12 ], [ 357, 363 ], [ 539, 545 ], [ 675, 681 ], [ 835, 841 ], [ 998, 1004 ], [ 1161, 1167 ] ], [ [ 344, 356 ] ], [ [ 525, 538 ] ], [ [ 665, 674 ] ], [ [ 822, 834 ] ], [ [ 985, 997 ] ], [ [ 1150, 1160 ] ] ]
from django.contrib.auth import get_user_model from django.test import TestCase class UsersManagersTests(TestCase): """ Test user creation manager """ def test_create_user(self): """ Creates a new user with email as primary identifier instead of username """ User = get_user_model() user = User.objects.create_user(email="[email protected]", password="foo") self.assertEqual(user.email, "[email protected]") self.assertTrue(user.is_active) self.assertFalse(user.is_staff) self.assertFalse(user.is_superuser) try: # username is None for the AbstractUser option # username does not exist for the AbstractBaseUser option self.assertIsNone(user.username) except AttributeError: pass with self.assertRaises(TypeError): User.objects.create_user() with self.assertRaises(TypeError): User.objects.create_user(email="") with self.assertRaises(ValueError): User.objects.create_user(email="", password="foo") def test_create_superuser(self): """ Creates a superuser with the custom user model """ User = get_user_model() admin_user = User.objects.create_superuser(email="[email protected]", password="foo") self.assertEqual(admin_user.email, "[email protected]") self.assertTrue(admin_user.is_active) self.assertTrue(admin_user.is_staff) self.assertTrue(admin_user.is_superuser) try: # username is None for the AbstractUser option # username does not exist for the AbstractBaseUser option self.assertIsNone(admin_user.username) except AttributeError: pass with self.assertRaises(ValueError): User.objects.create_superuser(email="[email protected]", password="foo", is_superuser=False)
[ [ [ 32, 46 ], [ 317, 331 ], [ 1241, 1255 ] ], [ [ 71, 79 ], [ 107, 115 ] ], [ [ 88, 106 ] ] ]
import pysam from optparse import OptionParser from x_gene_annotation import * class mRNA_Transfer(): def call_transfer_mut(self, sf_rna, sf_dna_up, sf_dna_bottom, sf_candidate): m_rna_vars = self.load_variants(sf_rna) m_DNA_RNA_ovlp_vars = self.get_overlap_variants(sf_dna_bottom, m_rna_vars) m_candidates = self.get_mut_exclusive_var(sf_dna_up, m_DNA_RNA_ovlp_vars) self.get_sub_var(sf_rna, m_candidates, sf_candidate) def load_variants(self, sf_vcfFile): vcf = pysam.VariantFile(sf_vcfFile) m_variants = {} for unique_id, var in enumerate(vcf.fetch()): chrm = var.chrom start = var.pos s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0]) m_variants[s_id] = 1 return m_variants def get_overlap_variants(self, sf_vcfFile, m_existing_vars): vcf = pysam.VariantFile(sf_vcfFile) m_variants = {} for unique_id, var in enumerate(vcf.fetch()): chrm = var.chrom start = var.pos s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0]) if s_id in m_existing_vars: m_variants[s_id] = 1 return m_variants def get_mut_exclusive_var(self, sf_vcfFile, m_existing_vars): vcf = pysam.VariantFile(sf_vcfFile) m_variants = {} for unique_id, var in enumerate(vcf.fetch()): chrm = var.chrom start = var.pos s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0]) # if s_id not in m_existing_vars: m_variants[s_id] = 1 m_mut_exc_var = {} for s_id in m_existing_vars: if s_id not in m_variants: m_mut_exc_var[s_id] = 1 return m_mut_exc_var def get_sub_var(self, sf_vcfFile, m_existing_vars, sf_sub_vcf): vcf = pysam.VariantFile(sf_vcfFile) vcf_out = pysam.VariantFile(sf_sub_vcf, 'w', header=vcf.header) for unique_id, var in enumerate(vcf.fetch()): chrm = var.chrom start = var.pos s_id = chrm + "~" + str(start) + "~" + str(var.ref) + "_" + str(var.alts[0]) if s_id in m_existing_vars: vcf_out.write(var) #### #### ##parse the options: def parse_option(): parser = OptionParser() parser.add_option("-p", "--path", dest="wfolder", type="string", help="Working folder") parser.add_option("--gene", dest="gene", default="", help="Gene Annotation file", metavar="FILE") parser.add_option("--rna", dest="rna", help="RNA mutation vcf file ", metavar="FILE") parser.add_option("--phase", dest="phase", help="Mutation phasing", metavar="FILE") parser.add_option("--dna_up", dest="dna_up", help="DNA mutation file of scion", metavar="FILE") parser.add_option("--dna_bottom", dest="dna_bottom", help="DNA mutation file of root ", metavar="FILE") parser.add_option("-c", dest="cutoff", type="int", default=0, help="cutoff of minimum supporting reads") parser.add_option("-o", "--output", dest="output", help="candidate mutation file", metavar="FILE") (options, args) = parser.parse_args() return (options, args) #### if __name__ == '__main__': (options, args) = parse_option() sf_rna_mut=options.rna sf_dna_up=options.dna_up sf_dna_bottom=options.dna_bottom sf_candidates=options.output rna_transfer=mRNA_Transfer() rna_transfer.call_transfer_mut(sf_rna_mut, sf_dna_up, sf_dna_bottom, sf_candidates) sf_gene_annotation = options.gene UP_DOWN_GENE=1500 if sf_gene_annotation !="": gff = GFF3(sf_gene_annotation) iextnd = UP_DOWN_GENE gff.load_gene_annotation_with_extnd(iextnd) gff.index_gene_annotation_interval_tree() gff.annotate_results(sf_candidates, sf_candidates+".with_gene_annotation") ####
[ [ [ 7, 12 ], [ 515, 520 ], [ 908, 913 ], [ 1346, 1351 ], [ 1935, 1940 ], [ 1983, 1988 ] ], [ [ 34, 46 ], [ 2376, 2388 ] ], [ [ 77, 78 ], [ 3854, 3858 ] ], [ [ 86, 99 ], [ 3643, 3656 ] ], [ [ 2347, 2359 ], [ 3484, 3496 ] ], [ [ 3467, 3474 ], [ 3515, 3522 ], [ 3541, 3548 ], [ 3574, 3581 ], [ 3611, 3618 ], [ 3773, 3780 ] ], [ [ 3476, 3480 ] ], [ [ 3504, 3514 ], [ 3694, 3704 ] ], [ [ 3531, 3540 ], [ 3706, 3715 ] ], [ [ 3560, 3573 ], [ 3717, 3730 ] ], [ [ 3597, 3610 ], [ 3732, 3745 ], [ 4040, 4053 ], [ 4055, 4068 ] ], [ [ 3630, 3642 ], [ 3663, 3675 ] ], [ [ 3752, 3770 ], [ 3815, 3833 ], [ 3859, 3877 ] ], [ [ 3790, 3802 ], [ 3896, 3908 ] ], [ [ 3848, 3851 ], [ 3917, 3920 ], [ 3969, 3972 ], [ 4019, 4022 ] ], [ [ 3887, 3893 ], [ 3953, 3959 ] ] ]
# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import pytest from ....testing import example_data from ...niftyreg import get_custom_path from ..asl import FitAsl from ...niftyreg.tests.test_regutils import no_nifty_tool @pytest.mark.skipif( no_nifty_tool(cmd='fit_asl'), reason="niftyfit is not installed") def test_fit_asl(): """ Testing FitAsl interface.""" # Create the test node fit_asl = FitAsl() # Check if the command is properly defined cmd = get_custom_path('fit_asl', env_dir='NIFTYFIT_DIR') assert fit_asl.cmd == cmd # test raising error with mandatory args absent with pytest.raises(ValueError): fit_asl.run() # Tests on the interface: # Runs cbf fitting assuming all tissue is GM! in_file = example_data('asl.nii.gz') fit_asl.inputs.source_file = in_file cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} -syn {syn}' expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, cbf='asl_cbf.nii.gz', error='asl_error.nii.gz', syn='asl_syn.nii.gz', ) assert fit_asl.cmdline == expected_cmd # Runs cbf fitting using IR/SR T1 data to estimate the local T1 and uses # the segmentation data to fit tissue specific blood flow parameters # (lambda,transit times,T1) fit_asl2 = FitAsl(sig=True) in_file = example_data('asl.nii.gz') t1map = example_data('T1map.nii.gz') seg = example_data('segmentation0.nii.gz') fit_asl2.inputs.source_file = in_file fit_asl2.inputs.t1map = t1map fit_asl2.inputs.seg = seg cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} \ -seg {seg} -sig -syn {syn} -t1map {t1map}' expected_cmd = cmd_tmp.format( cmd=cmd, in_file=in_file, t1map=t1map, seg=seg, cbf='asl_cbf.nii.gz', error='asl_error.nii.gz', syn='asl_syn.nii.gz', ) assert fit_asl2.cmdline == expected_cmd
[ [ [ 146, 152 ], [ 318, 324 ], [ 716, 722 ] ], [ [ 178, 190 ], [ 860, 872 ], [ 1456, 1468 ], [ 1495, 1507 ], [ 1534, 1546 ] ], [ [ 215, 230 ], [ 573, 588 ] ], [ [ 250, 256 ], [ 506, 512 ], [ 1425, 1431 ] ], [ [ 301, 314 ], [ 342, 355 ] ], [ [ 412, 424 ] ] ]
# automatically generated by the FlatBuffers compiler, do not modify # namespace: tflite import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class DimensionMetadata(object): __slots__ = ['_tab'] @classmethod def GetRootAsDimensionMetadata(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = DimensionMetadata() x.Init(buf, n + offset) return x @classmethod def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # DimensionMetadata def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # DimensionMetadata def Format(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) return 0 # DimensionMetadata def DenseSize(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # DimensionMetadata def ArraySegmentsType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) return 0 # DimensionMetadata def ArraySegments(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: from flatbuffers.table import Table obj = Table(bytearray(), 0) self._tab.Union(obj, o) return obj return None # DimensionMetadata def ArrayIndicesType(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) return 0 # DimensionMetadata def ArrayIndices(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: from flatbuffers.table import Table obj = Table(bytearray(), 0) self._tab.Union(obj, o) return obj return None def DimensionMetadataStart(builder): builder.StartObject(6) def DimensionMetadataAddFormat(builder, format): builder.PrependInt8Slot(0, format, 0) def DimensionMetadataAddDenseSize(builder, denseSize): builder.PrependInt32Slot(1, denseSize, 0) def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): builder.PrependUint8Slot(2, arraySegmentsType, 0) def DimensionMetadataAddArraySegments(builder, arraySegments): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0) def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): builder.PrependUint8Slot(4, arrayIndicesType, 0) def DimensionMetadataAddArrayIndices(builder, arrayIndices): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0) def DimensionMetadataEnd(builder): return builder.EndObject()
[ [ [ 98, 109 ], [ 317, 328 ], [ 340, 351 ], [ 580, 591 ], [ 755, 766 ], [ 848, 859 ], [ 968, 979 ], [ 1102, 1113 ], [ 1222, 1233 ], [ 1365, 1376 ], [ 1485, 1496 ], [ 1624, 1635 ], [ 1948, 1959 ], [ 2069, 2080 ], [ 2207, 2218 ], [ 2930, 2941 ], [ 3214, 3225 ] ], [ [ 141, 153 ], [ 159, 171 ] ], [ [ 154, 156 ] ], [ [ 181, 198 ], [ 393, 410 ] ], [ [ 2467, 2489 ] ], [ [ 2527, 2553 ] ], [ [ 2614, 2643 ] ], [ [ 2711, 2748 ] ], [ [ 2832, 2865 ] ], [ [ 3000, 3036 ] ], [ [ 3118, 3150 ] ], [ [ 3283, 3303 ] ] ]
# -*- coding: utf-8 -*- from django.utils.translation import ugettext_lazy from openslides.utils.personal_info import PersonalInfo from .models import Item class AgendaPersonalInfo(PersonalInfo): """ Class for personal info block for the agenda app. """ headline = ugettext_lazy('I am on the list of speakers of the following items') default_weight = 10 def get_queryset(self): return Item.objects.filter( speaker__person=self.request.user, speaker__begin_time=None)
[ [ [ 62, 75 ], [ 286, 299 ] ], [ [ 120, 132 ], [ 186, 198 ] ], [ [ 154, 158 ], [ 423, 427 ] ], [ [ 167, 185 ] ] ]
def test(): from tensorflow.keras import datasets assert model.get_layer("class_prediction").get_config()["units"]==43, "Check the number of output classes" assert model.get_layer("class_prediction").get_config()["activation"]=="softmax", "Check your activation function" assert model.output[0].name== 'class_prediction/Identity:0', "How does the output look like?" assert model.output[2].name== 'y1_prediction/Identity:0', "How does the output look like?" assert model.output[3].name== 'x2_prediction/Identity:0', "How does the output look like?" assert model.output[4].name== 'y2_prediction/Identity:0', "How does the output look like?" assert model.get_layer("y1_prediction").get_config()["units"]==1, "Check the number of outputs" assert model.get_layer("x2_prediction").get_config()["units"]==1, "Check the number of outputs" assert model.get_layer("y2_prediction").get_config()["units"]==1, "Check the number of outputs" assert model.get_layer("x1_prediction").get_config()["units"]==1, "Check the number of outputs" __msg__.good("WELL DONE!")
[ [ [ 4, 8 ] ] ]
from rest_framework import serializers from django.contrib.auth import get_user_model from .models import CustomUser User = get_user_model() class TokenSerializer(serializers.Serializer): """ This serializer serializes the token data """ access = serializers.CharField(max_length=255) refresh = serializers.CharField(max_length=255) class UserSerializer(serializers.ModelSerializer): """ Serializes User class """ class Meta: model = User fields = "__all__" def create(self, validated_data): user = User(email=validated_data["email"]) user.set_password(validated_data['password']) user.save() return user
[ [ [ 27, 38 ], [ 166, 177 ], [ 266, 277 ], [ 318, 329 ], [ 379, 390 ] ], [ [ 71, 85 ], [ 125, 139 ] ], [ [ 106, 116 ] ], [ [ 118, 122 ], [ 483, 487 ], [ 569, 573 ] ], [ [ 150, 165 ] ], [ [ 364, 378 ] ] ]
#!/usr/bin/env python # -*- coding: utf-8 -*- import re from setuptools import setup, find_packages version = None with open('jaeger_client/__init__.py', 'r') as f: for line in f: m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line) if m: version = m.group(2) break assert version is not None, \ 'Could not determine version number from jaeger_client/__init__.py' setup( name='jaeger-client', version=version, url='https://github.com/jaegertracing/jaeger-client-python', description='Jaeger Python OpenTracing Tracer implementation', author='Yuri Shkuro', author_email='[email protected]', packages=find_packages(exclude=['crossdock', 'tests', 'example', 'tests.*']), include_package_data=True, license='Apache License 2.0', zip_safe=False, keywords='jaeger, tracing, opentracing', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], install_requires=[ 'threadloop>=1,<2', 'thrift', 'tornado>=4.3', 'opentracing>=2.1,<3.0', ], # Uncomment below if need to test with unreleased version of opentracing # dependency_links=[ # 'git+ssh://[email protected]/opentracing/opentracing-python.git@BRANCHNAME#egg=opentracing', # ], test_suite='tests', extras_require={ ':python_version<"3"': [ 'futures', ], 'tests': [ 'mock==1.0.1', 'pycurl>=7.43,<8', # pinned to avoid RemovedInPytest4Warning 'pytest>=3.7.0,<3.8.0', 'pytest-cov==2.5.1', 'coverage<4.4', # can remove after https://bitbucket.org/ned/coveragepy/issues/581/44b1-44-breaking-in-ci 'pytest-timeout==1.3.1', 'pytest-tornado', # pin <3.2 as otherwise it requires pytest>=3.8 'pytest-benchmark[histogram]>=3.0.0rc1,<3.2', 'pytest-localserver', 'flake8', 'flake8-quotes', 'codecov', 'tchannel>=0.27;python_version=="2.7"', # This is only used in python 2 'opentracing_instrumentation>=3,<4', 'prometheus_client==0.3.1', ] }, )
[ [ [ 53, 55 ], [ 198, 200 ] ], [ [ 80, 85 ], [ 424, 429 ] ], [ [ 87, 100 ], [ 681, 694 ] ], [ [ 102, 109 ], [ 328, 335 ], [ 469, 476 ] ], [ [ 164, 165 ], [ 183, 184 ] ], [ [ 175, 179 ], [ 249, 253 ] ], [ [ 194, 195 ], [ 266, 267 ], [ 291, 292 ] ], [ [ 281, 288 ], [ 328, 335 ], [ 469, 476 ] ] ]
# Copyright (C) 2012-2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Configuration for libvirt objects. Classes to represent the configuration of various libvirt objects and support conversion to/from XML. These classes are solely concerned by providing direct Object <-> XML document conversions. No policy or operational decisions should be made by code in these classes. Such policy belongs in the 'designer.py' module which provides simplified helpers for populating up config object instances. """ import time from collections import OrderedDict from lxml import etree from oslo_utils import strutils from oslo_utils import units from nova import exception from nova.i18n import _ from nova.pci import utils as pci_utils from nova.virt import hardware # Namespace to use for Nova specific metadata items in XML NOVA_NS = "http://openstack.org/xmlns/libvirt/nova/1.0" class LibvirtConfigObject(object): def __init__(self, **kwargs): super(LibvirtConfigObject, self).__init__() self.root_name = kwargs.get("root_name") self.ns_prefix = kwargs.get('ns_prefix') self.ns_uri = kwargs.get('ns_uri') def _new_node(self, node_name, **kwargs): if self.ns_uri is None: return etree.Element(node_name, **kwargs) else: return etree.Element("{" + self.ns_uri + "}" + node_name, nsmap={self.ns_prefix: self.ns_uri}, **kwargs) def _text_node(self, node_name, value, **kwargs): child = self._new_node(node_name, **kwargs) child.text = str(value) return child def format_dom(self): return self._new_node(self.root_name) def parse_str(self, xmlstr): self.parse_dom(etree.fromstring(xmlstr)) def parse_dom(self, xmldoc): if self.root_name != xmldoc.tag: msg = (_("Root element name should be '%(name)s' not '%(tag)s'") % {'name': self.root_name, 'tag': xmldoc.tag}) raise exception.InvalidInput(msg) def to_xml(self, pretty_print=True): root = self.format_dom() xml_str = etree.tostring(root, encoding='unicode', pretty_print=pretty_print) return xml_str class LibvirtConfigCaps(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCaps, self).__init__(root_name="capabilities", **kwargs) self.host = None self.guests = [] def parse_dom(self, xmldoc): super(LibvirtConfigCaps, self).parse_dom(xmldoc) for c in xmldoc: if c.tag == "host": host = LibvirtConfigCapsHost() host.parse_dom(c) self.host = host elif c.tag == "guest": guest = LibvirtConfigCapsGuest() guest.parse_dom(c) self.guests.append(guest) def format_dom(self): caps = super(LibvirtConfigCaps, self).format_dom() if self.host: caps.append(self.host.format_dom()) for g in self.guests: caps.append(g.format_dom()) return caps class LibvirtConfigDomainCaps(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigDomainCaps, self).__init__( root_name="domainCapabilities", **kwargs) self._features = None self._machine = None self._alias = None self._devices = None def parse_dom(self, xmldoc): super(LibvirtConfigDomainCaps, self).parse_dom(xmldoc) for c in xmldoc: if c.tag == "features": features = LibvirtConfigDomainCapsFeatures() features.parse_dom(c) self._features = features elif c.tag == "machine": self._machine = c.text elif c.tag == "devices": devices = LibvirtConfigDomainCapsDevices() devices.parse_dom(c) self._devices = devices @property def features(self): if self._features is None: return [] return self._features.features @property def machine_type(self): if self._machine is None: return "" return self._machine @property def machine_type_alias(self): if self._alias is None: return self._machine return self._alias @machine_type_alias.setter def machine_type_alias(self, alias): self._alias = alias @property def devices(self): if self._devices is None: return [] return self._devices class LibvirtConfigDomainCapsVideoModels(LibvirtConfigObject): def __init__(self, **kwargs): super().__init__(root_name='video', **kwargs) self.supported = False self.models = set() def parse_dom(self, xmldoc): super().parse_dom(xmldoc) if xmldoc.get('supported') == 'yes': self.supported = True self.models = {str(node) for node in xmldoc.xpath("//enum[@name='modelType']/value/text()")} class LibvirtConfigDomainCapsDiskBuses(LibvirtConfigObject): def __init__(self, **kwargs): super().__init__(root_name='disk', **kwargs) self.supported = False self.buses = set() def parse_dom(self, xmldoc): super(LibvirtConfigDomainCapsDiskBuses, self).parse_dom(xmldoc) if xmldoc.get('supported') == 'yes': self.supported = True self.buses = {str(node) for node in xmldoc.xpath("//enum[@name='bus']/value/text()")} class LibvirtConfigDomainCapsDevices(LibvirtConfigObject): DEVICE_PARSERS = { 'video': LibvirtConfigDomainCapsVideoModels, 'disk': LibvirtConfigDomainCapsDiskBuses, } def __init__(self, **kwargs): super().__init__(root_name='devices', **kwargs) self.devices = set() def parse_dom(self, xmldoc): super().parse_dom(xmldoc) for c in list(xmldoc): device = self.DEVICE_PARSERS.get(c.tag) if device: device = device() device.parse_dom(c) self.devices.add(device) def _get_device(self, device_type): for device in self.devices: if type(device) == self.DEVICE_PARSERS.get(device_type): return device return None @property def disk(self): return self._get_device('disk') @property def video(self): return self._get_device('video') class LibvirtConfigDomainCapsFeatures(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigDomainCapsFeatures, self).__init__( root_name="features", **kwargs) self.features = [] def parse_dom(self, xmldoc): super(LibvirtConfigDomainCapsFeatures, self).parse_dom(xmldoc) for c in xmldoc: feature = None if c.tag == "sev": feature = LibvirtConfigDomainCapsFeatureSev() if feature: feature.parse_dom(c) self.features.append(feature) # There are many other features and domain capabilities, # but we don't need to regenerate the XML (it's read-only # data provided by libvirtd), so there's no point parsing # them until we actually need their values. # For the same reason, we do not need a format_dom() method, but # it's a bug if this ever gets called and we inherited one from # the base class, so override that to watch out for accidental # calls. def format_dom(self): raise RuntimeError(_('BUG: tried to generate domainCapabilities XML')) class LibvirtConfigDomainCapsFeatureSev(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigDomainCapsFeatureSev, self).__init__( root_name='sev', **kwargs) self.supported = False self.cbitpos = None self.reduced_phys_bits = None def parse_dom(self, xmldoc): super(LibvirtConfigDomainCapsFeatureSev, self).parse_dom(xmldoc) if xmldoc.get('supported') == 'yes': self.supported = True for c in list(xmldoc): if c.tag == 'reducedPhysBits': self.reduced_phys_bits = int(c.text) elif c.tag == 'cbitpos': self.cbitpos = int(c.text) class LibvirtConfigCapsNUMATopology(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsNUMATopology, self).__init__( root_name="topology", **kwargs) self.cells = [] def parse_dom(self, xmldoc): super(LibvirtConfigCapsNUMATopology, self).parse_dom(xmldoc) xmlcells = xmldoc[0] for xmlcell in xmlcells: cell = LibvirtConfigCapsNUMACell() cell.parse_dom(xmlcell) self.cells.append(cell) def format_dom(self): topo = super(LibvirtConfigCapsNUMATopology, self).format_dom() cells = etree.Element("cells") cells.set("num", str(len(self.cells))) topo.append(cells) for cell in self.cells: cells.append(cell.format_dom()) return topo class LibvirtConfigCapsNUMACell(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsNUMACell, self).__init__(root_name="cell", **kwargs) self.id = None self.memory = 0 self.mempages = [] self.cpus = [] def parse_dom(self, xmldoc): super(LibvirtConfigCapsNUMACell, self).parse_dom(xmldoc) self.id = int(xmldoc.get("id")) for c in xmldoc: if c.tag == "memory": self.memory = int(c.text) elif c.tag == "pages": pages = LibvirtConfigCapsNUMAPages() pages.parse_dom(c) self.mempages.append(pages) elif c.tag == "cpus": for c2 in c: cpu = LibvirtConfigCapsNUMACPU() cpu.parse_dom(c2) self.cpus.append(cpu) def format_dom(self): cell = super(LibvirtConfigCapsNUMACell, self).format_dom() cell.set("id", str(self.id)) mem = etree.Element("memory") mem.set("unit", "KiB") mem.text = str(self.memory) cell.append(mem) for pages in self.mempages: cell.append(pages.format_dom()) cpus = etree.Element("cpus") cpus.set("num", str(len(self.cpus))) for cpu in self.cpus: cpus.append(cpu.format_dom()) cell.append(cpus) return cell class LibvirtConfigCapsNUMACPU(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsNUMACPU, self).__init__(root_name="cpu", **kwargs) self.id = None self.socket_id = None self.core_id = None self.siblings = None def parse_dom(self, xmldoc): super(LibvirtConfigCapsNUMACPU, self).parse_dom(xmldoc) self.id = int(xmldoc.get("id")) if xmldoc.get("socket_id") is not None: self.socket_id = int(xmldoc.get("socket_id")) if xmldoc.get("core_id") is not None: self.core_id = int(xmldoc.get("core_id")) if xmldoc.get("siblings") is not None: self.siblings = hardware.parse_cpu_spec( xmldoc.get("siblings")) def format_dom(self): cpu = super(LibvirtConfigCapsNUMACPU, self).format_dom() cpu.set("id", str(self.id)) if self.socket_id is not None: cpu.set("socket_id", str(self.socket_id)) if self.core_id is not None: cpu.set("core_id", str(self.core_id)) if self.siblings is not None: cpu.set("siblings", hardware.format_cpu_spec(self.siblings)) return cpu class LibvirtConfigCapsNUMAPages(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsNUMAPages, self).__init__( root_name="pages", **kwargs) self.size = None self.total = None def parse_dom(self, xmldoc): super(LibvirtConfigCapsNUMAPages, self).parse_dom(xmldoc) self.size = int(xmldoc.get("size")) self.total = int(xmldoc.text) def format_dom(self): pages = super(LibvirtConfigCapsNUMAPages, self).format_dom() pages.text = str(self.total) pages.set("size", str(self.size)) pages.set("unit", "KiB") return pages class LibvirtConfigCapsHost(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsHost, self).__init__(root_name="host", **kwargs) self.cpu = None self.uuid = None self.topology = None def parse_dom(self, xmldoc): super(LibvirtConfigCapsHost, self).parse_dom(xmldoc) for c in xmldoc: if c.tag == "cpu": cpu = LibvirtConfigCPU() cpu.parse_dom(c) self.cpu = cpu elif c.tag == "uuid": self.uuid = c.text elif c.tag == "topology": self.topology = LibvirtConfigCapsNUMATopology() self.topology.parse_dom(c) def format_dom(self): caps = super(LibvirtConfigCapsHost, self).format_dom() if self.uuid: caps.append(self._text_node("uuid", self.uuid)) if self.cpu: caps.append(self.cpu.format_dom()) if self.topology: caps.append(self.topology.format_dom()) return caps class LibvirtConfigCapsGuest(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsGuest, self).__init__(root_name="guest", **kwargs) self.arch = None self.ostype = None # Map domain types such as 'qemu' and 'kvm' to # LibvirtConfigCapsGuestDomain instances. self.domains = OrderedDict() self.default_domain = None def parse_dom(self, xmldoc): super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc) for child in xmldoc: if child.tag == "os_type": self.ostype = child.text elif child.tag == "arch": self.parse_arch(child) def parse_arch(self, xmldoc): self.arch = xmldoc.get("name") # NOTE(aspiers): The data relating to each <domain> element # under <arch> (such as <emulator> and many <machine> # elements) is structured in a slightly odd way. There is one # "default" domain such as # # <domain type='qemu'/> # # which has no child elements, and all its data is provided in # sibling elements. Then others such as # # <domain type='kvm'> # # will have their <emulator> and <machine> elements as # children. So we need to handle the two cases separately. self.default_domain = LibvirtConfigCapsGuestDomain() for child in xmldoc: if child.tag == "domain": if list(child): # This domain has children, so create a new instance, # parse it, and register it in the dict of domains. domain = LibvirtConfigCapsGuestDomain() domain.parse_dom(child) self.domains[domain.domtype] = domain else: # This is the childless <domain/> element for the # default domain self.default_domain.parse_domain(child) self.domains[self.default_domain.domtype] = \ self.default_domain else: # Sibling element of the default domain self.default_domain.parse_child(child) def format_dom(self): caps = super(LibvirtConfigCapsGuest, self).format_dom() if self.ostype is not None: caps.append(self._text_node("os_type", self.ostype)) if self.arch: arch = self.format_arch() caps.append(arch) return caps def format_arch(self): arch = etree.Element("arch", name=self.arch) for c in self.default_domain.format_dom(): arch.append(c) arch.append(self._new_node("domain", type=self.default_domain.domtype)) for domtype, domain in self.domains.items(): if domtype == self.default_domain.domtype: # We've already added this domain at the top level continue arch.append(domain.format_dom()) return arch class LibvirtConfigCapsGuestDomain(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsGuestDomain, self).__init__( root_name="domain", **kwargs) self.domtype = None # Track <emulator> values, which we need in order to be able # to call virConnectGetDomainCapabilities() - typically # something like '/usr/bin/qemu-system-i386'. self.emulator = None self.machines = {} self.aliases = {} def parse_dom(self, xmldoc): super(LibvirtConfigCapsGuestDomain, self).parse_dom(xmldoc) self.parse_domain(xmldoc) for c in xmldoc: self.parse_child(c) def parse_child(self, xmldoc): if xmldoc.tag == "emulator": self.emulator = xmldoc.text elif xmldoc.tag == "machine": self.parse_machine(xmldoc) def parse_domain(self, xmldoc): self.domtype = xmldoc.get("type") if self.domtype is None: raise exception.InvalidInput( "Didn't find domain type in %s", xmldoc) def parse_machine(self, xmldoc): if 'canonical' in xmldoc.attrib: self.aliases[xmldoc.text] = xmldoc.attrib else: self.machines[xmldoc.text] = xmldoc.attrib def format_dom(self): domain = super(LibvirtConfigCapsGuestDomain, self).format_dom() if self.domtype is not None: domain.set("type", self.domtype) if self.emulator is not None: domain.append(self._text_node("emulator", self.emulator)) for mach_type, machine in self.machines.items(): domain.append(self._text_node("machine", mach_type, **machine)) for alias, machine in self.aliases.items(): domain.append(self._text_node("machine", alias, **machine)) return domain class LibvirtConfigGuestTimer(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestTimer, self).__init__(root_name="timer", **kwargs) self.name = "platform" self.track = None self.tickpolicy = None self.present = None def format_dom(self): tm = super(LibvirtConfigGuestTimer, self).format_dom() tm.set("name", self.name) if self.track is not None: tm.set("track", self.track) if self.tickpolicy is not None: tm.set("tickpolicy", self.tickpolicy) if self.present is not None: if self.present: tm.set("present", "yes") else: tm.set("present", "no") return tm class LibvirtConfigGuestClock(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestClock, self).__init__(root_name="clock", **kwargs) self.offset = "utc" self.adjustment = None self.timezone = None self.timers = [] def format_dom(self): clk = super(LibvirtConfigGuestClock, self).format_dom() clk.set("offset", self.offset) if self.adjustment: clk.set("adjustment", self.adjustment) elif self.timezone: clk.set("timezone", self.timezone) for tm in self.timers: clk.append(tm.format_dom()) return clk def add_timer(self, tm): self.timers.append(tm) class LibvirtConfigCPUFeature(LibvirtConfigObject): def __init__(self, name=None, **kwargs): super(LibvirtConfigCPUFeature, self).__init__(root_name='feature', **kwargs) self.name = name self.policy = "require" def parse_dom(self, xmldoc): super(LibvirtConfigCPUFeature, self).parse_dom(xmldoc) self.name = xmldoc.get("name") self.policy = xmldoc.get("policy", "require") def format_dom(self): ft = super(LibvirtConfigCPUFeature, self).format_dom() ft.set("name", self.name) return ft def __eq__(self, obj): return obj.name == self.name def __ne__(self, obj): return obj.name != self.name def __hash__(self): return hash(self.name) class LibvirtConfigCPU(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCPU, self).__init__(root_name='cpu', **kwargs) self.arch = None self.vendor = None self.model = None self.sockets = None self.cores = None self.threads = None self.features = set() def parse_dom(self, xmldoc): super(LibvirtConfigCPU, self).parse_dom(xmldoc) for c in xmldoc: if c.tag == "arch": self.arch = c.text elif c.tag == "model": self.model = c.text elif c.tag == "vendor": self.vendor = c.text elif c.tag == "topology": self.sockets = int(c.get("sockets")) self.cores = int(c.get("cores")) self.threads = int(c.get("threads")) elif c.tag == "feature": f = LibvirtConfigCPUFeature() f.parse_dom(c) if f.policy != "disable": self.add_feature(f) def format_dom(self): cpu = super(LibvirtConfigCPU, self).format_dom() if self.arch is not None: cpu.append(self._text_node("arch", self.arch)) if self.model is not None: cpu.append(self._text_node("model", self.model)) if self.vendor is not None: cpu.append(self._text_node("vendor", self.vendor)) if (self.sockets is not None and self.cores is not None and self.threads is not None): top = etree.Element("topology") top.set("sockets", str(self.sockets)) top.set("cores", str(self.cores)) top.set("threads", str(self.threads)) cpu.append(top) # sorting the features to allow more predictable tests for f in sorted(self.features, key=lambda x: x.name): if f.policy != "disable": cpu.append(f.format_dom()) return cpu def add_feature(self, feat): self.features.add(feat) class LibvirtConfigGuestCPUFeature(LibvirtConfigCPUFeature): def __init__(self, name=None, **kwargs): super(LibvirtConfigGuestCPUFeature, self).__init__(name, **kwargs) self.policy = "require" def format_dom(self): ft = super(LibvirtConfigGuestCPUFeature, self).format_dom() ft.set("policy", self.policy) return ft class LibvirtConfigGuestCPUNUMACell(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestCPUNUMACell, self).__init__(root_name="cell", **kwargs) self.id = None self.cpus = None self.memory = None self.memAccess = None def parse_dom(self, xmldoc): if xmldoc.get("id") is not None: self.id = int(xmldoc.get("id")) if xmldoc.get("memory") is not None: self.memory = int(xmldoc.get("memory")) if xmldoc.get("cpus") is not None: self.cpus = hardware.parse_cpu_spec(xmldoc.get("cpus")) self.memAccess = xmldoc.get("memAccess") def format_dom(self): cell = super(LibvirtConfigGuestCPUNUMACell, self).format_dom() if self.id is not None: cell.set("id", str(self.id)) if self.cpus is not None: cell.set("cpus", hardware.format_cpu_spec(self.cpus)) if self.memory is not None: cell.set("memory", str(self.memory)) if self.memAccess is not None: cell.set("memAccess", self.memAccess) return cell class LibvirtConfigGuestCPUNUMA(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestCPUNUMA, self).__init__(root_name="numa", **kwargs) self.cells = [] def parse_dom(self, xmldoc): super(LibvirtConfigGuestCPUNUMA, self).parse_dom(xmldoc) for child in xmldoc: if child.tag == "cell": cell = LibvirtConfigGuestCPUNUMACell() cell.parse_dom(child) self.cells.append(cell) def format_dom(self): numa = super(LibvirtConfigGuestCPUNUMA, self).format_dom() for cell in self.cells: numa.append(cell.format_dom()) return numa class LibvirtConfigGuestCPU(LibvirtConfigCPU): def __init__(self, **kwargs): super(LibvirtConfigGuestCPU, self).__init__(**kwargs) self.mode = None self.match = "exact" self.numa = None def parse_dom(self, xmldoc): super(LibvirtConfigGuestCPU, self).parse_dom(xmldoc) self.mode = xmldoc.get('mode') self.match = xmldoc.get('match') for child in xmldoc: if child.tag == "numa": numa = LibvirtConfigGuestCPUNUMA() numa.parse_dom(child) self.numa = numa def format_dom(self): cpu = super(LibvirtConfigGuestCPU, self).format_dom() if self.mode: cpu.set("mode", self.mode) cpu.set("match", self.match) if self.numa is not None: cpu.append(self.numa.format_dom()) return cpu class LibvirtConfigGuestSMBIOS(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestSMBIOS, self).__init__(root_name="smbios", **kwargs) self.mode = "sysinfo" def format_dom(self): smbios = super(LibvirtConfigGuestSMBIOS, self).format_dom() smbios.set("mode", self.mode) return smbios class LibvirtConfigGuestSysinfo(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestSysinfo, self).__init__(root_name="sysinfo", **kwargs) self.type = "smbios" self.bios_vendor = None self.bios_version = None self.system_manufacturer = None self.system_product = None self.system_version = None self.system_serial = None self.system_uuid = None self.system_family = None def format_dom(self): sysinfo = super(LibvirtConfigGuestSysinfo, self).format_dom() sysinfo.set("type", self.type) bios = etree.Element("bios") system = etree.Element("system") if self.bios_vendor is not None: bios.append(self._text_node("entry", self.bios_vendor, name="vendor")) if self.bios_version is not None: bios.append(self._text_node("entry", self.bios_version, name="version")) if self.system_manufacturer is not None: system.append(self._text_node("entry", self.system_manufacturer, name="manufacturer")) if self.system_product is not None: system.append(self._text_node("entry", self.system_product, name="product")) if self.system_version is not None: system.append(self._text_node("entry", self.system_version, name="version")) if self.system_serial is not None: system.append(self._text_node("entry", self.system_serial, name="serial")) if self.system_uuid is not None: system.append(self._text_node("entry", self.system_uuid, name="uuid")) if self.system_family is not None: system.append(self._text_node("entry", self.system_family, name="family")) if len(list(bios)) > 0: sysinfo.append(bios) if len(list(system)) > 0: sysinfo.append(system) return sysinfo class LibvirtConfigGuestDevice(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestDevice, self).__init__(**kwargs) @property def uses_virtio(self): return False class LibvirtConfigGuestVTPM(LibvirtConfigGuestDevice): def __init__(self, vtpm_config, vtpm_secret_uuid, **kwargs): super(LibvirtConfigGuestVTPM, self).__init__(root_name="tpm", **kwargs) self.version = vtpm_config.version self.model = vtpm_config.model self.secret_uuid = vtpm_secret_uuid def format_dom(self): # <tpm model='$model'> dev = super(LibvirtConfigGuestVTPM, self).format_dom() dev.set("model", self.model) # <backend type='emulator' version='$version'> back = etree.Element("backend") back.set("type", "emulator") back.set("version", self.version) # <encryption secret='$secret_uuid'/> enc = etree.Element("encryption") enc.set("secret", self.secret_uuid) back.append(enc) dev.append(back) return dev class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestDisk, self).__init__(root_name="disk", **kwargs) self.source_type = "file" self.source_device = "disk" self.driver_name = None self.driver_format = None self.driver_cache = None self.driver_discard = None self.driver_io = None self.driver_iommu = False self.source_path = None self.source_protocol = None self.source_name = None self.source_hosts = [] self.source_ports = [] self.target_dev = None self.target_path = None self.target_bus = None self.auth_username = None self.auth_secret_type = None self.auth_secret_uuid = None self.serial = None self.disk_read_bytes_sec = None self.disk_read_iops_sec = None self.disk_write_bytes_sec = None self.disk_write_iops_sec = None self.disk_total_bytes_sec = None self.disk_total_iops_sec = None self.disk_read_bytes_sec_max = None self.disk_write_bytes_sec_max = None self.disk_total_bytes_sec_max = None self.disk_read_iops_sec_max = None self.disk_write_iops_sec_max = None self.disk_total_iops_sec_max = None self.disk_size_iops_sec = None self.logical_block_size = None self.physical_block_size = None self.readonly = False self.shareable = False self.snapshot = None self.backing_store = None self.device_addr = None self.boot_order = None self.mirror = None self.encryption = None def _format_iotune(self, dev): iotune = etree.Element("iotune") if self.disk_read_bytes_sec is not None: iotune.append(self._text_node("read_bytes_sec", self.disk_read_bytes_sec)) if self.disk_read_iops_sec is not None: iotune.append(self._text_node("read_iops_sec", self.disk_read_iops_sec)) if self.disk_write_bytes_sec is not None: iotune.append(self._text_node("write_bytes_sec", self.disk_write_bytes_sec)) if self.disk_write_iops_sec is not None: iotune.append(self._text_node("write_iops_sec", self.disk_write_iops_sec)) if self.disk_total_bytes_sec is not None: iotune.append(self._text_node("total_bytes_sec", self.disk_total_bytes_sec)) if self.disk_total_iops_sec is not None: iotune.append(self._text_node("total_iops_sec", self.disk_total_iops_sec)) if self.disk_read_bytes_sec_max is not None: iotune.append(self._text_node("read_bytes_sec_max", self.disk_read_bytes_sec_max)) if self.disk_write_bytes_sec_max is not None: iotune.append(self._text_node("write_bytes_sec_max", self.disk_write_bytes_sec_max)) if self.disk_total_bytes_sec_max is not None: iotune.append(self._text_node("total_bytes_sec_max", self.disk_total_bytes_sec_max)) if self.disk_read_iops_sec_max is not None: iotune.append(self._text_node("read_iops_sec_max", self.disk_read_iops_sec_max)) if self.disk_write_iops_sec_max is not None: iotune.append(self._text_node("write_iops_sec_max", self.disk_write_iops_sec_max)) if self.disk_total_iops_sec_max is not None: iotune.append(self._text_node("total_iops_sec_max", self.disk_total_iops_sec_max)) if self.disk_size_iops_sec is not None: iotune.append(self._text_node("size_iops_sec", self.disk_size_iops_sec)) if len(iotune) > 0: dev.append(iotune) @property def uses_virtio(self): return 'virtio' == self.target_bus def format_dom(self): dev = super(LibvirtConfigGuestDisk, self).format_dom() dev.set("type", self.source_type) dev.set("device", self.source_device) if any((self.driver_name, self.driver_format, self.driver_cache, self.driver_discard, self.driver_iommu)): drv = etree.Element("driver") if self.driver_name is not None: drv.set("name", self.driver_name) if self.driver_format is not None: drv.set("type", self.driver_format) if self.driver_cache is not None: drv.set("cache", self.driver_cache) if self.driver_discard is not None: drv.set("discard", self.driver_discard) if self.driver_io is not None: drv.set("io", self.driver_io) if self.driver_iommu: drv.set("iommu", "on") dev.append(drv) if self.source_type == "file": dev.append(etree.Element("source", file=self.source_path)) elif self.source_type == "block": dev.append(etree.Element("source", dev=self.source_path)) elif self.source_type == "mount": dev.append(etree.Element("source", dir=self.source_path)) elif self.source_type == "network" and self.source_protocol: source = etree.Element("source", protocol=self.source_protocol) if self.source_name is not None: source.set('name', self.source_name) hosts_info = zip(self.source_hosts, self.source_ports) for name, port in hosts_info: host = etree.Element('host', name=name) if port is not None: host.set('port', port) source.append(host) dev.append(source) if self.auth_secret_type is not None: auth = etree.Element("auth") auth.set("username", self.auth_username) auth.append(etree.Element("secret", type=self.auth_secret_type, uuid=self.auth_secret_uuid)) dev.append(auth) if self.source_type == "mount": dev.append(etree.Element("target", dir=self.target_path)) else: dev.append(etree.Element("target", dev=self.target_dev, bus=self.target_bus)) if self.serial is not None: dev.append(self._text_node("serial", self.serial)) self._format_iotune(dev) # Block size tuning if (self.logical_block_size is not None or self.physical_block_size is not None): blockio = etree.Element("blockio") if self.logical_block_size is not None: blockio.set('logical_block_size', self.logical_block_size) if self.physical_block_size is not None: blockio.set('physical_block_size', self.physical_block_size) dev.append(blockio) if self.readonly: dev.append(etree.Element("readonly")) if self.shareable: dev.append(etree.Element("shareable")) if self.boot_order: dev.append(etree.Element("boot", order=self.boot_order)) if self.device_addr: dev.append(self.device_addr.format_dom()) if self.encryption: dev.append(self.encryption.format_dom()) return dev def parse_dom(self, xmldoc): super(LibvirtConfigGuestDisk, self).parse_dom(xmldoc) self.source_type = xmldoc.get('type') self.snapshot = xmldoc.get('snapshot') for c in xmldoc: if c.tag == 'driver': self.driver_name = c.get('name') self.driver_format = c.get('type') self.driver_cache = c.get('cache') self.driver_discard = c.get('discard') self.driver_io = c.get('io') self.driver_iommu = c.get('iommu', '') == "on" elif c.tag == 'source': if self.source_type == 'file': self.source_path = c.get('file') elif self.source_type == 'block': self.source_path = c.get('dev') elif self.source_type == 'mount': self.source_path = c.get('dir') elif self.source_type == 'network': self.source_protocol = c.get('protocol') self.source_name = c.get('name') for sub in c: if sub.tag == 'host': self.source_hosts.append(sub.get('name')) self.source_ports.append(sub.get('port')) elif c.tag == 'serial': self.serial = c.text elif c.tag == 'target': if self.source_type == 'mount': self.target_path = c.get('dir') else: self.target_dev = c.get('dev') self.target_bus = c.get('bus', None) elif c.tag == 'backingStore': b = LibvirtConfigGuestDiskBackingStore() b.parse_dom(c) self.backing_store = b elif c.tag == 'readonly': self.readonly = True elif c.tag == 'shareable': self.shareable = True elif c.tag == 'address': obj = LibvirtConfigGuestDeviceAddress.parse_dom(c) self.device_addr = obj elif c.tag == 'boot': self.boot_order = c.get('order') elif c.tag == 'mirror': m = LibvirtConfigGuestDiskMirror() m.parse_dom(c) self.mirror = m elif c.tag == 'encryption': e = LibvirtConfigGuestDiskEncryption() e.parse_dom(c) self.encryption = e class LibvirtConfigGuestDiskBackingStore(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestDiskBackingStore, self).__init__( root_name="backingStore", **kwargs) self.index = None self.source_type = None self.source_file = None self.source_protocol = None self.source_name = None self.source_hosts = [] self.source_ports = [] self.driver_name = None self.driver_format = None self.backing_store = None def parse_dom(self, xmldoc): super(LibvirtConfigGuestDiskBackingStore, self).parse_dom(xmldoc) self.source_type = xmldoc.get('type') self.index = xmldoc.get('index') for c in xmldoc: if c.tag == 'driver': self.driver_name = c.get('name') self.driver_format = c.get('type') elif c.tag == 'source': self.source_file = c.get('file') self.source_protocol = c.get('protocol') self.source_name = c.get('name') for d in c: if d.tag == 'host': self.source_hosts.append(d.get('name')) self.source_ports.append(d.get('port')) elif c.tag == 'backingStore': if len(c): self.backing_store = LibvirtConfigGuestDiskBackingStore() self.backing_store.parse_dom(c) class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject): """Disk class for handling disk information in snapshots. Similar to LibvirtConfigGuestDisk, but used to represent disk entities in <domainsnapshot> structures rather than real devices. These typically have fewer members, and different expectations for which fields are required. """ def __init__(self, **kwargs): super(LibvirtConfigGuestSnapshotDisk, self).__init__(root_name="disk", **kwargs) self.source_type = None self.source_device = None self.name = None self.snapshot = None self.driver_name = None self.driver_format = None self.driver_cache = None self.source_path = None self.source_protocol = None self.source_name = None self.source_hosts = [] self.source_ports = [] self.target_dev = None self.target_path = None self.target_bus = None self.auth_username = None self.auth_secret_type = None self.auth_secret_uuid = None self.serial = None def format_dom(self): dev = super(LibvirtConfigGuestSnapshotDisk, self).format_dom() if self.name: dev.attrib['name'] = self.name if self.snapshot: dev.attrib['snapshot'] = self.snapshot if self.source_type: dev.set("type", self.source_type) if self.source_device: dev.set("device", self.source_device) if (self.driver_name is not None or self.driver_format is not None or self.driver_cache is not None): drv = etree.Element("driver") if self.driver_name is not None: drv.set("name", self.driver_name) if self.driver_format is not None: drv.set("type", self.driver_format) if self.driver_cache is not None: drv.set("cache", self.driver_cache) dev.append(drv) if self.source_type == "file": dev.append(etree.Element("source", file=self.source_path)) elif self.source_type == "block": dev.append(etree.Element("source", dev=self.source_path)) elif self.source_type == "mount": dev.append(etree.Element("source", dir=self.source_path)) elif self.source_type == "network": source = etree.Element("source", protocol=self.source_protocol) if self.source_name is not None: source.set('name', self.source_name) hosts_info = zip(self.source_hosts, self.source_ports) for name, port in hosts_info: host = etree.Element('host', name=name) if port is not None: host.set('port', port) source.append(host) dev.append(source) if self.auth_secret_type is not None: auth = etree.Element("auth") auth.set("username", self.auth_username) auth.append(etree.Element("secret", type=self.auth_secret_type, uuid=self.auth_secret_uuid)) dev.append(auth) if self.source_type == "mount": dev.append(etree.Element("target", dir=self.target_path)) else: if self.target_bus and self.target_dev: dev.append(etree.Element("target", dev=self.target_dev, bus=self.target_bus)) return dev def parse_dom(self, xmldoc): super(LibvirtConfigGuestSnapshotDisk, self).parse_dom(xmldoc) self.source_type = xmldoc.get('type') self.snapshot = xmldoc.get('snapshot') for c in xmldoc: if c.tag == 'driver': self.driver_name = c.get('name') self.driver_format = c.get('type') self.driver_cache = c.get('cache') elif c.tag == 'source': if self.source_type == 'file': self.source_path = c.get('file') elif self.source_type == 'block': self.source_path = c.get('dev') elif self.source_type == 'mount': self.source_path = c.get('dir') elif self.source_type == 'network': self.source_protocol = c.get('protocol') self.source_name = c.get('name') for sub in c: if sub.tag == 'host': self.source_hosts.append(sub.get('name')) self.source_ports.append(sub.get('port')) elif c.tag == 'serial': self.serial = c.text for c in xmldoc: if c.tag == 'target': if self.source_type == 'mount': self.target_path = c.get('dir') else: self.target_dev = c.get('dev') self.target_bus = c.get('bus', None) class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestFilesys, self).__init__(root_name="filesystem", **kwargs) self.source_type = "mount" self.source_dir = None self.source_file = None self.source_dev = None self.target_dir = "/" self.driver_type = "loop" self.driver_format = "raw" def format_dom(self): dev = super(LibvirtConfigGuestFilesys, self).format_dom() dev.set("type", self.source_type) if self.source_type == "file": dev.append(etree.Element("driver", type = self.driver_type, format = self.driver_format)) dev.append(etree.Element("source", file=self.source_file)) elif self.source_type == "block": dev.append(etree.Element("source", dev=self.source_dev)) else: dev.append(etree.Element("source", dir=self.source_dir)) dev.append(etree.Element("target", dir=self.target_dir)) return dev def parse_dom(self, xmldoc): super(LibvirtConfigGuestFilesys, self).parse_dom(xmldoc) self.source_type = xmldoc.get('type') for c in xmldoc: if c.tag == 'driver': if self.source_type == 'file': self.driver_type = c.get('type') self.driver_format = c.get('format') elif c.tag == 'source': if self.source_type == 'file': self.source_file = c.get('file') elif self.source_type == 'block': self.source_dev = c.get('dev') else: self.source_dir = c.get('dir') elif c.tag == 'target': self.target_dir = c.get('dir') class LibvirtConfigGuestDiskEncryptionSecret(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestDiskEncryptionSecret, self).__init__(**kwargs) self.type = None self.uuid = None def parse_dom(self, xmldoc): self.type = xmldoc.get('type') self.uuid = xmldoc.get('uuid') def format_dom(self): obj = etree.Element("secret") obj.set("type", self.type) obj.set("uuid", self.uuid) return obj class LibvirtConfigGuestDiskEncryption(LibvirtConfigObject): """https://libvirt.org/formatstorageencryption.html """ def __init__(self, **kwargs): super(LibvirtConfigGuestDiskEncryption, self).__init__(**kwargs) self.format = None self.secret = None def parse_dom(self, xmldoc): self.format = xmldoc.get('format') for c in xmldoc: if c.tag == 'secret': m = LibvirtConfigGuestDiskEncryptionSecret() m.parse_dom(c) self.secret = m def format_dom(self): obj = etree.Element("encryption") obj.set("format", self.format) obj.append(self.secret.format_dom()) return obj class LibvirtConfigGuestDiskMirror(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestDiskMirror, self).__init__(**kwargs) self.ready = None def parse_dom(self, xmldoc): self.ready = xmldoc.get('ready') class LibvirtConfigGuestIDMap(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestIDMap, self).__init__(**kwargs) self.start = 0 self.target = 0 self.count = 10000 def parse_dom(self, xmldoc): self.start = int(xmldoc.get('start')) self.target = int(xmldoc.get('target')) self.count = int(xmldoc.get('count')) def format_dom(self): obj = super(LibvirtConfigGuestIDMap, self).format_dom() obj.set("start", str(self.start)) obj.set("target", str(self.target)) obj.set("count", str(self.count)) return obj class LibvirtConfigGuestUIDMap(LibvirtConfigGuestIDMap): def __init__(self, **kwargs): super(LibvirtConfigGuestUIDMap, self).__init__(root_name="uid", **kwargs) class LibvirtConfigGuestGIDMap(LibvirtConfigGuestIDMap): def __init__(self, **kwargs): super(LibvirtConfigGuestGIDMap, self).__init__(root_name="gid", **kwargs) class LibvirtConfigGuestDeviceAddress(LibvirtConfigObject): def __init__(self, type=None, **kwargs): super(LibvirtConfigGuestDeviceAddress, self).__init__( root_name='address', **kwargs) self.type = type def format_dom(self): xml = super(LibvirtConfigGuestDeviceAddress, self).format_dom() xml.set("type", self.type) return xml @staticmethod def parse_dom(xmldoc): addr_type = xmldoc.get('type') if addr_type == 'pci': obj = LibvirtConfigGuestDeviceAddressPCI() elif addr_type == 'drive': obj = LibvirtConfigGuestDeviceAddressDrive() else: return None obj.parse_dom(xmldoc) return obj class LibvirtConfigGuestDeviceAddressDrive(LibvirtConfigGuestDeviceAddress): def __init__(self, **kwargs): super(LibvirtConfigGuestDeviceAddressDrive, self).\ __init__(type='drive', **kwargs) self.controller = None self.bus = None self.target = None self.unit = None def format_dom(self): xml = super(LibvirtConfigGuestDeviceAddressDrive, self).format_dom() if self.controller is not None: xml.set("controller", str(self.controller)) if self.bus is not None: xml.set("bus", str(self.bus)) if self.target is not None: xml.set("target", str(self.target)) if self.unit is not None: xml.set("unit", str(self.unit)) return xml def parse_dom(self, xmldoc): self.controller = xmldoc.get('controller') self.bus = xmldoc.get('bus') self.target = xmldoc.get('target') self.unit = xmldoc.get('unit') def format_address(self): return None class LibvirtConfigGuestDeviceAddressPCI(LibvirtConfigGuestDeviceAddress): def __init__(self, **kwargs): super(LibvirtConfigGuestDeviceAddressPCI, self).\ __init__(type='pci', **kwargs) self.domain = None self.bus = None self.slot = None self.function = None def format_dom(self): xml = super(LibvirtConfigGuestDeviceAddressPCI, self).format_dom() if self.domain is not None: xml.set("domain", str(self.domain)) if self.bus is not None: xml.set("bus", str(self.bus)) if self.slot is not None: xml.set("slot", str(self.slot)) if self.function is not None: xml.set("function", str(self.function)) return xml def parse_dom(self, xmldoc): self.domain = xmldoc.get('domain') self.bus = xmldoc.get('bus') self.slot = xmldoc.get('slot') self.function = xmldoc.get('function') def format_address(self): if self.domain is not None: return pci_utils.get_pci_address(self.domain[2:], self.bus[2:], self.slot[2:], self.function[2:]) class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestInterface, self).__init__( root_name="interface", **kwargs) self.net_type = None self.target_dev = None self.model = None self.mac_addr = None self.script = None self.source_dev = None self.source_mode = "private" self.vporttype = None self.vportparams = [] self.filtername = None self.filterparams = [] self.driver_name = None self.driver_iommu = False self.vhostuser_mode = None self.vhostuser_path = None self.vhostuser_type = None self.vhost_queues = None self.vhost_rx_queue_size = None self.vhost_tx_queue_size = None self.vif_inbound_peak = None self.vif_inbound_burst = None self.vif_inbound_average = None self.vif_outbound_peak = None self.vif_outbound_burst = None self.vif_outbound_average = None self.vlan = None self.device_addr = None self.mtu = None def __eq__(self, other): if not isinstance(other, LibvirtConfigGuestInterface): return False # NOTE(arches) Skip checking target_dev for vhostuser # vif type; target_dev is not a valid value for vhostuser. # NOTE(gibi): For macvtap cases the domain has a target_dev # generated by libvirt. It is not set by the vif driver code # so it is not in config returned by the vif driver so we # should not match on that. return ( self.mac_addr == other.mac_addr and self.net_type == other.net_type and self.source_dev == other.source_dev and (self.net_type == 'vhostuser' or not self.target_dev or self.target_dev == other.target_dev) and self.vhostuser_path == other.vhostuser_path) @property def uses_virtio(self): return 'virtio' == self.model def format_dom(self): dev = super(LibvirtConfigGuestInterface, self).format_dom() dev.set("type", self.net_type) if self.net_type == "hostdev": dev.set("managed", "yes") dev.append(etree.Element("mac", address=self.mac_addr)) if self.model: dev.append(etree.Element("model", type=self.model)) drv_elem = None if (self.driver_name or self.driver_iommu or self.net_type == "vhostuser"): drv_elem = etree.Element("driver") if self.driver_name and self.net_type != "vhostuser": # For vhostuser interface we should not set the driver name. drv_elem.set("name", self.driver_name) if self.driver_iommu: drv_elem.set("iommu", "on") if drv_elem is not None: if self.vhost_queues is not None: drv_elem.set('queues', str(self.vhost_queues)) if self.vhost_rx_queue_size is not None: drv_elem.set('rx_queue_size', str(self.vhost_rx_queue_size)) if self.vhost_tx_queue_size is not None: drv_elem.set('tx_queue_size', str(self.vhost_tx_queue_size)) if (drv_elem.get('name') or drv_elem.get('queues') or drv_elem.get('rx_queue_size') or drv_elem.get('tx_queue_size') or drv_elem.get('iommu')): # Append the driver element into the dom only if name # or queues or tx/rx or iommu attributes are set. dev.append(drv_elem) if self.net_type == "ethernet": if self.script is not None: dev.append(etree.Element("script", path=self.script)) if self.mtu is not None: dev.append(etree.Element("mtu", size=str(self.mtu))) elif self.net_type == "direct": dev.append(etree.Element("source", dev=self.source_dev, mode=self.source_mode)) elif self.net_type == "hostdev": source_elem = etree.Element("source") domain, bus, slot, func = \ pci_utils.get_pci_address_fields(self.source_dev) addr_elem = etree.Element("address", type='pci') addr_elem.set("domain", "0x%s" % (domain)) addr_elem.set("bus", "0x%s" % (bus)) addr_elem.set("slot", "0x%s" % (slot)) addr_elem.set("function", "0x%s" % (func)) source_elem.append(addr_elem) dev.append(source_elem) elif self.net_type == "vhostuser": dev.append(etree.Element("source", type=self.vhostuser_type, mode=self.vhostuser_mode, path=self.vhostuser_path)) elif self.net_type == "bridge": dev.append(etree.Element("source", bridge=self.source_dev)) if self.script is not None: dev.append(etree.Element("script", path=self.script)) if self.mtu is not None: dev.append(etree.Element("mtu", size=str(self.mtu))) else: dev.append(etree.Element("source", bridge=self.source_dev)) if self.vlan and self.net_type in ("direct", "hostdev"): vlan_elem = etree.Element("vlan") tag_elem = etree.Element("tag", id=str(self.vlan)) vlan_elem.append(tag_elem) dev.append(vlan_elem) if self.target_dev is not None: dev.append(etree.Element("target", dev=self.target_dev)) if self.vporttype is not None: vport = etree.Element("virtualport", type=self.vporttype) for p in self.vportparams: param = etree.Element("parameters") param.set(p['key'], p['value']) vport.append(param) dev.append(vport) if self.filtername is not None: filter = etree.Element("filterref", filter=self.filtername) for p in self.filterparams: filter.append(etree.Element("parameter", name=p['key'], value=p['value'])) dev.append(filter) if self.vif_inbound_average or self.vif_outbound_average: bandwidth = etree.Element("bandwidth") if self.vif_inbound_average is not None: vif_inbound = etree.Element("inbound", average=str(self.vif_inbound_average)) if self.vif_inbound_peak is not None: vif_inbound.set("peak", str(self.vif_inbound_peak)) if self.vif_inbound_burst is not None: vif_inbound.set("burst", str(self.vif_inbound_burst)) bandwidth.append(vif_inbound) if self.vif_outbound_average is not None: vif_outbound = etree.Element("outbound", average=str(self.vif_outbound_average)) if self.vif_outbound_peak is not None: vif_outbound.set("peak", str(self.vif_outbound_peak)) if self.vif_outbound_burst is not None: vif_outbound.set("burst", str(self.vif_outbound_burst)) bandwidth.append(vif_outbound) dev.append(bandwidth) return dev def parse_dom(self, xmldoc): super(LibvirtConfigGuestInterface, self).parse_dom(xmldoc) self.net_type = xmldoc.get('type') for c in xmldoc: if c.tag == 'mac': self.mac_addr = c.get('address') elif c.tag == 'model': self.model = c.get('type') elif c.tag == 'driver': self.driver_name = c.get('name') self.driver_iommu = (c.get('iommu', '') == 'on') self.vhost_queues = c.get('queues') self.vhost_rx_queue_size = c.get('rx_queue_size') self.vhost_tx_queue_size = c.get('tx_queue_size') elif c.tag == 'source': if self.net_type == 'direct': self.source_dev = c.get('dev') self.source_mode = c.get('mode', 'private') elif self.net_type == 'vhostuser': self.vhostuser_type = c.get('type') self.vhostuser_mode = c.get('mode') self.vhostuser_path = c.get('path') elif self.net_type == 'hostdev': for sub in c: if sub.tag == 'address' and sub.get('type') == 'pci': # strip the 0x prefix on each attribute since # format_dom puts them back on - note that # LibvirtConfigGuestHostdevPCI does not do this... self.source_dev = ( pci_utils.get_pci_address( sub.get('domain')[2:], sub.get('bus')[2:], sub.get('slot')[2:], sub.get('function')[2:] ) ) else: self.source_dev = c.get('bridge') elif c.tag == 'target': self.target_dev = c.get('dev') elif c.tag == 'script': self.script = c.get('path') elif c.tag == 'vlan': # NOTE(mriedem): The vlan element can have multiple tag # sub-elements but we're currently only storing a single tag # id in the vlan attribute. for sub in c: if sub.tag == 'tag' and sub.get('id'): self.vlan = int(sub.get('id')) break elif c.tag == 'virtualport': self.vporttype = c.get('type') for sub in c: if sub.tag == 'parameters': for k, v in dict(sub.attrib).items(): self.add_vport_param(k, v) elif c.tag == 'filterref': self.filtername = c.get('filter') for sub in c: if sub.tag == 'parameter': self.add_filter_param(sub.get('name'), sub.get('value')) elif c.tag == 'bandwidth': for sub in c: # Note that only average is mandatory, burst and peak are # optional (and all are ints). if sub.tag == 'inbound': self.vif_inbound_average = int(sub.get('average')) if sub.get('burst'): self.vif_inbound_burst = int(sub.get('burst')) if sub.get('peak'): self.vif_inbound_peak = int(sub.get('peak')) elif sub.tag == 'outbound': self.vif_outbound_average = int(sub.get('average')) if sub.get('burst'): self.vif_outbound_burst = int(sub.get('burst')) if sub.get('peak'): self.vif_outbound_peak = int(sub.get('peak')) elif c.tag == 'address': obj = LibvirtConfigGuestDeviceAddress.parse_dom(c) self.device_addr = obj elif c.tag == 'mtu': self.mtu = int(c.get('size')) def add_filter_param(self, key, value): self.filterparams.append({'key': key, 'value': value}) def add_vport_param(self, key, value): self.vportparams.append({'key': key, 'value': value}) class LibvirtConfigGuestInput(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestInput, self).__init__(root_name="input", **kwargs) self.type = "tablet" self.bus = "usb" self.driver_iommu = False def format_dom(self): dev = super(LibvirtConfigGuestInput, self).format_dom() dev.set("type", self.type) dev.set("bus", self.bus) if self.driver_iommu: dev.append(etree.Element('driver', iommu="on")) return dev class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestGraphics, self).__init__(root_name="graphics", **kwargs) self.type = "vnc" self.autoport = True self.keymap = None self.listen = None def format_dom(self): dev = super(LibvirtConfigGuestGraphics, self).format_dom() dev.set("type", self.type) if self.autoport: dev.set("autoport", "yes") else: dev.set("autoport", "no") if self.keymap: dev.set("keymap", self.keymap) if self.listen: dev.set("listen", self.listen) return dev class LibvirtConfigSeclabel(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigSeclabel, self).__init__(root_name="seclabel", **kwargs) self.type = 'dynamic' self.baselabel = None def format_dom(self): seclabel = super(LibvirtConfigSeclabel, self).format_dom() seclabel.set('type', self.type) if self.baselabel: seclabel.append(self._text_node("baselabel", self.baselabel)) return seclabel class LibvirtConfigGuestVideo(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestVideo, self).__init__(root_name="video", **kwargs) self.type = 'cirrus' self.vram = None self.heads = None self.driver_iommu = False @property def uses_virtio(self): return 'virtio' == self.type def format_dom(self): dev = super(LibvirtConfigGuestVideo, self).format_dom() model = etree.Element("model") model.set("type", self.type) if self.vram: model.set("vram", str(self.vram)) if self.heads: model.set("heads", str(self.heads)) dev.append(model) if self.driver_iommu: dev.append(etree.Element("driver", iommu="on")) return dev class LibvirtConfigMemoryBalloon(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigMemoryBalloon, self).__init__( root_name='memballoon', **kwargs) self.model = None self.period = None self.driver_iommu = False @property def uses_virtio(self): return 'virtio' == self.model def format_dom(self): dev = super(LibvirtConfigMemoryBalloon, self).format_dom() dev.set('model', str(self.model)) if self.period is not None: dev.append(etree.Element('stats', period=str(self.period))) if self.driver_iommu: dev.append(etree.Element('driver', iommu='on')) return dev class LibvirtConfigGuestController(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestController, self).__init__(root_name="controller", **kwargs) self.type = None self.index = None self.model = None self.driver_iommu = False @property def uses_virtio(self): model_is_virtio = 'virtio-scsi' == self.model type_is_virtio = 'virtio-serial' == self.type return model_is_virtio or type_is_virtio def format_dom(self): controller = super(LibvirtConfigGuestController, self).format_dom() controller.set("type", self.type) if self.index is not None: controller.set("index", str(self.index)) if self.model: controller.set("model", str(self.model)) if self.driver_iommu: controller.append(etree.Element("driver", iommu="on")) return controller class LibvirtConfigGuestUSBHostController(LibvirtConfigGuestController): def __init__(self, **kwargs): super(LibvirtConfigGuestUSBHostController, self).__init__(**kwargs) self.type = 'usb' class LibvirtConfigGuestPCIeRootController(LibvirtConfigGuestController): def __init__(self, **kwargs): super(LibvirtConfigGuestPCIeRootController, self).\ __init__(**kwargs) self.type = 'pci' self.model = 'pcie-root' class LibvirtConfigGuestPCIeRootPortController(LibvirtConfigGuestController): def __init__(self, **kwargs): super(LibvirtConfigGuestPCIeRootPortController, self).\ __init__(**kwargs) self.type = 'pci' self.model = 'pcie-root-port' class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestHostdev, self).\ __init__(root_name="hostdev", **kwargs) self.mode = kwargs.get('mode') self.type = kwargs.get('type') # managed attribute is only used by PCI devices but mediated devices # need to say managed=no self.managed = kwargs.get('managed', 'yes') def format_dom(self): dev = super(LibvirtConfigGuestHostdev, self).format_dom() dev.set("mode", self.mode) dev.set("type", self.type) dev.set("managed", self.managed) return dev def parse_dom(self, xmldoc): super(LibvirtConfigGuestHostdev, self).parse_dom(xmldoc) self.mode = xmldoc.get('mode') self.type = xmldoc.get('type') self.managed = xmldoc.get('managed') return list(xmldoc) class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev): def __init__(self, **kwargs): super(LibvirtConfigGuestHostdevPCI, self).\ __init__(mode='subsystem', type='pci', **kwargs) # These are returned from libvirt as hexadecimal strings with 0x prefix # even if they have a different meaningful range: domain 16 bit, # bus 8 bit, slot 5 bit, and function 3 bit # On the other hand nova generates these values without the 0x prefix self.domain = None self.bus = None self.slot = None self.function = None def __eq__(self, other): if not isinstance(other, LibvirtConfigGuestHostdevPCI): return False # NOTE(gibi): nova generates hexa string without 0x prefix but # libvirt uses that prefix when returns the config so we need to # normalize the strings before comparison return ( int(self.domain, 16) == int(other.domain, 16) and int(self.bus, 16) == int(other.bus, 16) and int(self.slot, 16) == int(other.slot, 16) and int(self.function, 16) == int(other.function, 16)) def format_dom(self): dev = super(LibvirtConfigGuestHostdevPCI, self).format_dom() address = etree.Element( "address", domain=self.domain if self.domain.startswith('0x') else '0x' + self.domain, bus=self.bus if self.bus.startswith('0x') else '0x' + self.bus, slot=self.slot if self.slot.startswith('0x') else '0x' + self.slot, function=self.function if self.function.startswith('0x') else '0x' + self.function) source = etree.Element("source") source.append(address) dev.append(source) return dev def parse_dom(self, xmldoc): childs = super(LibvirtConfigGuestHostdevPCI, self).parse_dom(xmldoc) for c in childs: if c.tag == "source": for sub in c: if sub.tag == 'address': self.domain = sub.get('domain') self.bus = sub.get('bus') self.slot = sub.get('slot') self.function = sub.get('function') class LibvirtConfigGuestHostdevMDEV(LibvirtConfigGuestHostdev): def __init__(self, **kwargs): super(LibvirtConfigGuestHostdevMDEV, self).__init__( mode='subsystem', type='mdev', managed='no', **kwargs) # model attribute is only supported by mediated devices self.model = kwargs.get('model', 'vfio-pci') self.uuid = None def format_dom(self): dev = super(LibvirtConfigGuestHostdevMDEV, self).format_dom() if self.model: dev.set("model", self.model) address = etree.Element("address", uuid=self.uuid) source = etree.Element("source") source.append(address) dev.append(source) return dev def parse_dom(self, xmldoc): children = super(LibvirtConfigGuestHostdevMDEV, self).parse_dom(xmldoc) if xmldoc.get('model'): self.model = xmldoc.get('model') for c in children: if c.tag == "source": for sub in c: if sub.tag == 'address': self.uuid = sub.get('uuid') return class LibvirtConfigGuestCharBase(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestCharBase, self).__init__(**kwargs) self.type = "pty" self.source_path = None self.listen_port = None self.listen_host = None self.log = None def format_dom(self): dev = super(LibvirtConfigGuestCharBase, self).format_dom() dev.set("type", self.type) if self.type == "file": dev.append(etree.Element("source", path=self.source_path)) elif self.type == "unix": dev.append(etree.Element("source", mode="bind", path=self.source_path)) elif self.type == "tcp": dev.append(etree.Element("source", mode="bind", host=self.listen_host, service=str(self.listen_port))) if self.log: dev.append(self.log.format_dom()) return dev class LibvirtConfigGuestChar(LibvirtConfigGuestCharBase): def __init__(self, **kwargs): super(LibvirtConfigGuestChar, self).__init__(**kwargs) self.target_port = None self.target_type = None def format_dom(self): dev = super(LibvirtConfigGuestChar, self).format_dom() if self.target_port is not None or self.target_type is not None: target = etree.Element("target") if self.target_port is not None: target.set("port", str(self.target_port)) if self.target_type is not None: target.set("type", self.target_type) dev.append(target) return dev class LibvirtConfigGuestCharDeviceLog(LibvirtConfigObject): """Represents a sub-element to a character device.""" def __init__(self, **kwargs): super(LibvirtConfigGuestCharDeviceLog, self).__init__(root_name="log", **kwargs) self.file = None self.append = "off" def parse_dom(self, xmldoc): super(LibvirtConfigGuestCharDeviceLog, self).parse_dom(xmldoc) self.file = xmldoc.get("file") self.append = xmldoc.get("append") def format_dom(self): log = super(LibvirtConfigGuestCharDeviceLog, self).format_dom() log.set("file", self.file) log.set("append", self.append) return log class LibvirtConfigGuestSerial(LibvirtConfigGuestChar): def __init__(self, **kwargs): super(LibvirtConfigGuestSerial, self).__init__(root_name="serial", **kwargs) class LibvirtConfigGuestConsole(LibvirtConfigGuestChar): def __init__(self, **kwargs): super(LibvirtConfigGuestConsole, self).__init__(root_name="console", **kwargs) class LibvirtConfigGuestChannel(LibvirtConfigGuestCharBase): def __init__(self, **kwargs): super(LibvirtConfigGuestChannel, self).__init__(root_name="channel", **kwargs) self.target_type = "virtio" self.target_name = None def format_dom(self): dev = super(LibvirtConfigGuestChannel, self).format_dom() target = etree.Element("target", type=self.target_type) if self.target_name is not None: target.set("name", self.target_name) dev.append(target) return dev class LibvirtConfigGuestWatchdog(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestWatchdog, self).__init__(root_name="watchdog", **kwargs) self.model = 'i6300esb' self.action = 'reset' def format_dom(self): dev = super(LibvirtConfigGuestWatchdog, self).format_dom() dev.set('model', self.model) dev.set('action', self.action) return dev class LibvirtConfigGuestCPUTuneVCPUPin(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestCPUTuneVCPUPin, self).__init__( root_name="vcpupin", **kwargs) self.id = None self.cpuset = None def format_dom(self): root = super(LibvirtConfigGuestCPUTuneVCPUPin, self).format_dom() root.set("vcpu", str(self.id)) if self.cpuset is not None: root.set("cpuset", hardware.format_cpu_spec(self.cpuset)) return root class LibvirtConfigGuestCPUTuneEmulatorPin(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestCPUTuneEmulatorPin, self).__init__( root_name="emulatorpin", **kwargs) self.cpuset = None def format_dom(self): root = super(LibvirtConfigGuestCPUTuneEmulatorPin, self).format_dom() if self.cpuset is not None: root.set("cpuset", hardware.format_cpu_spec(self.cpuset)) return root class LibvirtConfigGuestCPUTuneVCPUSched(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestCPUTuneVCPUSched, self).__init__( root_name="vcpusched", **kwargs) self.vcpus = None self.scheduler = None self.priority = None def format_dom(self): root = super(LibvirtConfigGuestCPUTuneVCPUSched, self).format_dom() if self.vcpus is not None: root.set("vcpus", hardware.format_cpu_spec(self.vcpus)) if self.scheduler is not None: root.set("scheduler", self.scheduler) if self.priority is not None: root.set("priority", str(self.priority)) return root class LibvirtConfigGuestCPUTune(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestCPUTune, self).__init__(root_name="cputune", **kwargs) self.shares = None self.quota = None self.period = None self.vcpupin = [] self.emulatorpin = None self.vcpusched = [] def format_dom(self): root = super(LibvirtConfigGuestCPUTune, self).format_dom() if self.shares is not None: root.append(self._text_node("shares", str(self.shares))) if self.quota is not None: root.append(self._text_node("quota", str(self.quota))) if self.period is not None: root.append(self._text_node("period", str(self.period))) if self.emulatorpin is not None: root.append(self.emulatorpin.format_dom()) for vcpu in self.vcpupin: root.append(vcpu.format_dom()) for sched in self.vcpusched: root.append(sched.format_dom()) return root class LibvirtConfigGuestMemoryBacking(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestMemoryBacking, self).__init__( root_name="memoryBacking", **kwargs) self.hugepages = [] self.sharedpages = True self.locked = False self.filesource = False self.sharedaccess = False self.allocateimmediate = False self.discard = False def format_dom(self): root = super(LibvirtConfigGuestMemoryBacking, self).format_dom() if self.hugepages: hugepages = etree.Element("hugepages") for item in self.hugepages: hugepages.append(item.format_dom()) root.append(hugepages) if not self.sharedpages: root.append(etree.Element("nosharepages")) if self.locked: root.append(etree.Element("locked")) if self.filesource: root.append(etree.Element("source", type="file")) if self.sharedaccess: root.append(etree.Element("access", mode="shared")) if self.allocateimmediate: root.append(etree.Element("allocation", mode="immediate")) if self.discard: root.append(etree.Element("discard")) return root class LibvirtConfigGuestMemoryBackingPage(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestMemoryBackingPage, self).__init__( root_name="page", **kwargs) self.size_kb = None self.nodeset = None def format_dom(self): page = super(LibvirtConfigGuestMemoryBackingPage, self).format_dom() page.set("size", str(self.size_kb)) page.set("nodeset", hardware.format_cpu_spec(self.nodeset)) page.set("unit", "KiB") return page class LibvirtConfigGuestMemoryTune(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestMemoryTune, self).__init__( root_name="memtune", **kwargs) self.hard_limit = None self.soft_limit = None self.swap_hard_limit = None self.min_guarantee = None def format_dom(self): root = super(LibvirtConfigGuestMemoryTune, self).format_dom() if self.hard_limit is not None: root.append(self._text_node("hard_limit", str(self.hard_limit), unit="KiB")) if self.soft_limit is not None: root.append(self._text_node("soft_limit", str(self.soft_limit), unit="KiB")) if self.swap_hard_limit is not None: root.append(self._text_node("swap_hard_limit", str(self.swap_hard_limit), unit="KiB")) if self.min_guarantee is not None: root.append(self._text_node("min_guarantee", str(self.min_guarantee), unit="KiB")) return root class LibvirtConfigGuestNUMATuneMemory(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestNUMATuneMemory, self).__init__( root_name="memory", **kwargs) self.mode = "strict" self.nodeset = [] def format_dom(self): root = super(LibvirtConfigGuestNUMATuneMemory, self).format_dom() root.set("mode", self.mode) root.set("nodeset", hardware.format_cpu_spec(self.nodeset)) return root class LibvirtConfigGuestNUMATuneMemNode(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestNUMATuneMemNode, self).__init__( root_name="memnode", **kwargs) self.cellid = 0 self.mode = "strict" self.nodeset = [] def format_dom(self): root = super(LibvirtConfigGuestNUMATuneMemNode, self).format_dom() root.set("cellid", str(self.cellid)) root.set("mode", self.mode) root.set("nodeset", hardware.format_cpu_spec(self.nodeset)) return root class LibvirtConfigGuestNUMATune(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestNUMATune, self).__init__( root_name="numatune", **kwargs) self.memory = None self.memnodes = [] def format_dom(self): root = super(LibvirtConfigGuestNUMATune, self).format_dom() if self.memory is not None: root.append(self.memory.format_dom()) for node in self.memnodes: root.append(node.format_dom()) return root class LibvirtConfigGuestFeature(LibvirtConfigObject): def __init__(self, name, **kwargs): super(LibvirtConfigGuestFeature, self).__init__(root_name=name, **kwargs) class LibvirtConfigGuestFeatureACPI(LibvirtConfigGuestFeature): def __init__(self, **kwargs): super(LibvirtConfigGuestFeatureACPI, self).__init__("acpi", **kwargs) class LibvirtConfigGuestFeatureAPIC(LibvirtConfigGuestFeature): def __init__(self, **kwargs): super(LibvirtConfigGuestFeatureAPIC, self).__init__("apic", **kwargs) class LibvirtConfigGuestFeaturePAE(LibvirtConfigGuestFeature): def __init__(self, **kwargs): super(LibvirtConfigGuestFeaturePAE, self).__init__("pae", **kwargs) class LibvirtConfigGuestFeatureKvmHidden(LibvirtConfigGuestFeature): def __init__(self, **kwargs): super(LibvirtConfigGuestFeatureKvmHidden, self).__init__("kvm", **kwargs) def format_dom(self): root = super(LibvirtConfigGuestFeatureKvmHidden, self).format_dom() root.append(etree.Element("hidden", state="on")) return root class LibvirtConfigGuestFeaturePMU(LibvirtConfigGuestFeature): def __init__(self, state, **kwargs): super(LibvirtConfigGuestFeaturePMU, self).__init__("pmu", **kwargs) # NOTE(sean-k-mooney): bool_from_string is needed to handle the raw # flavor exta_sepc value. bool_from_string internally checks if the # value is already a bool and returns it. As such it's safe to use # with the image metadata property too, so we call it unconditionally. self.state = strutils.bool_from_string(state) def format_dom(self): root = super(LibvirtConfigGuestFeaturePMU, self).format_dom() root.attrib['state'] = "on" if self.state else "off" return root class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature): # QEMU requires at least this value to be set MIN_SPINLOCK_RETRIES = 4095 # The spoofed vendor_id can be any alphanumeric string SPOOFED_VENDOR_ID = "1234567890ab" def __init__(self, **kwargs): super(LibvirtConfigGuestFeatureHyperV, self).__init__("hyperv", **kwargs) self.relaxed = False self.vapic = False self.spinlocks = False self.spinlock_retries = self.MIN_SPINLOCK_RETRIES self.vendorid_spoof = False self.vendorid = self.SPOOFED_VENDOR_ID def format_dom(self): root = super(LibvirtConfigGuestFeatureHyperV, self).format_dom() if self.relaxed: root.append(etree.Element("relaxed", state="on")) if self.vapic: root.append(etree.Element("vapic", state="on")) if self.spinlocks: root.append(etree.Element("spinlocks", state="on", retries=str(self.spinlock_retries))) if self.vendorid_spoof: root.append(etree.Element("vendor_id", state="on", value=self.vendorid)) return root class LibvirtConfigGuestSEVLaunchSecurity(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestSEVLaunchSecurity, self).__init__( root_name='launchSecurity', **kwargs) self.cbitpos = None self.reduced_phys_bits = None def format_dom(self): root = super(LibvirtConfigGuestSEVLaunchSecurity, self).format_dom() root.set('type', 'sev') policy = etree.Element('policy') policy.text = '0x0033' # hardcoded default according to the spec root.append(policy) cbitpos = etree.Element('cbitpos') cbitpos.text = str(self.cbitpos) root.append(cbitpos) reducedPhysBits = etree.Element('reducedPhysBits') reducedPhysBits.text = str(self.reduced_phys_bits) root.append(reducedPhysBits) return root class LibvirtConfigGuest(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuest, self).__init__(root_name="domain", **kwargs) self.virt_type = None self.uuid = None self.name = None self.memory = 500 * units.Mi self.max_memory_size = None self.max_memory_slots = 0 self.membacking = None self.memtune = None self.numatune = None self.vcpus = 1 self.cpuset = None self.cpu = None self.cputune = None self.features = [] self.clock = None self.sysinfo = None self.os_type = None self.os_loader = None self.os_loader_type = None self.os_kernel = None self.os_initrd = None self.os_cmdline = None self.os_init_env = {} self.os_root = None self.os_init_path = None self.os_boot_dev = [] self.os_smbios = None self.os_mach_type = None self.os_bootmenu = False self.devices = [] self.metadata = [] self.idmaps = [] self.perf_events = [] self.launch_security = None def _format_basic_props(self, root): root.append(self._text_node("uuid", self.uuid)) root.append(self._text_node("name", self.name)) root.append(self._text_node("memory", self.memory)) if self.max_memory_size is not None: max_memory = self._text_node("maxMemory", self.max_memory_size) max_memory.set("slots", str(self.max_memory_slots)) root.append(max_memory) if self.membacking is not None: root.append(self.membacking.format_dom()) if self.memtune is not None: root.append(self.memtune.format_dom()) if self.numatune is not None: root.append(self.numatune.format_dom()) if self.cpuset is not None: vcpu = self._text_node("vcpu", self.vcpus) vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset)) root.append(vcpu) else: root.append(self._text_node("vcpu", self.vcpus)) if len(self.metadata) > 0: metadata = etree.Element("metadata") for m in self.metadata: metadata.append(m.format_dom()) root.append(metadata) def _format_os(self, root): os = etree.Element("os") type_node = self._text_node("type", self.os_type) if self.os_mach_type is not None: type_node.set("machine", self.os_mach_type) os.append(type_node) if self.os_kernel is not None: os.append(self._text_node("kernel", self.os_kernel)) if self.os_loader is not None: # Generate XML nodes for UEFI boot. if self.os_loader_type == "pflash": loader = self._text_node("loader", self.os_loader) loader.set("type", "pflash") loader.set("readonly", "yes") os.append(loader) else: os.append(self._text_node("loader", self.os_loader)) if self.os_initrd is not None: os.append(self._text_node("initrd", self.os_initrd)) if self.os_cmdline is not None: os.append(self._text_node("cmdline", self.os_cmdline)) if self.os_root is not None: os.append(self._text_node("root", self.os_root)) if self.os_init_path is not None: os.append(self._text_node("init", self.os_init_path)) for name, value in self.os_init_env.items(): initenv = self._text_node("initenv", value) initenv.set("name", name) os.append(initenv) for boot_dev in self.os_boot_dev: os.append(etree.Element("boot", dev=boot_dev)) if self.os_smbios is not None: os.append(self.os_smbios.format_dom()) if self.os_bootmenu: os.append(etree.Element("bootmenu", enable="yes")) root.append(os) def _format_features(self, root): if len(self.features) > 0: features = etree.Element("features") for feat in self.features: features.append(feat.format_dom()) root.append(features) def _format_devices(self, root): if len(self.devices) == 0: return devices = etree.Element("devices") for dev in self.devices: devices.append(dev.format_dom()) root.append(devices) def _format_idmaps(self, root): if len(self.idmaps) == 0: return idmaps = etree.Element("idmap") for idmap in self.idmaps: idmaps.append(idmap.format_dom()) root.append(idmaps) def _format_perf_events(self, root): if len(self.perf_events) == 0: return perfs = etree.Element("perf") for pe in self.perf_events: event = etree.Element("event", name=pe, enabled="yes") perfs.append(event) root.append(perfs) def _format_sev(self, root): if self.launch_security is not None: root.append(self.launch_security.format_dom()) def format_dom(self): root = super(LibvirtConfigGuest, self).format_dom() root.set("type", self.virt_type) self._format_basic_props(root) if self.sysinfo is not None: root.append(self.sysinfo.format_dom()) self._format_os(root) self._format_features(root) if self.cputune is not None: root.append(self.cputune.format_dom()) if self.clock is not None: root.append(self.clock.format_dom()) if self.cpu is not None: root.append(self.cpu.format_dom()) self._format_devices(root) self._format_idmaps(root) self._format_perf_events(root) self._format_sev(root) return root def _parse_basic_props(self, xmldoc): # memmbacking, memtune, numatune, metadata are skipped just because # corresponding config types do not implement parse_dom method if xmldoc.tag == 'uuid': self.uuid = xmldoc.text elif xmldoc.tag == 'name': self.name = xmldoc.text elif xmldoc.tag == 'memory': self.memory = int(xmldoc.text) elif xmldoc.tag == 'vcpu': self.vcpus = int(xmldoc.text) if xmldoc.get('cpuset') is not None: self.cpuset = hardware.parse_cpu_spec(xmldoc.get('cpuset')) def _parse_os(self, xmldoc): # smbios is skipped just because LibvirtConfigGuestSMBIOS # does not implement parse_dom method for c in xmldoc: if c.tag == 'type': self.os_type = c.text self.os_mach_type = c.get('machine') elif c.tag == 'kernel': self.os_kernel = c.text elif c.tag == 'loader': self.os_loader = c.text if c.get('type') == 'pflash': self.os_loader_type = 'pflash' elif c.tag == 'initrd': self.os_initrd = c.text elif c.tag == 'cmdline': self.os_cmdline = c.text elif c.tag == 'root': self.os_root = c.text elif c.tag == 'init': self.os_init_path = c.text elif c.tag == 'boot': self.os_boot_dev.append(c.get('dev')) elif c.tag == 'bootmenu': if c.get('enable') == 'yes': self.os_bootmenu = True elif c.tag == 'initenv': self.os_init_env[c.get('name')] = c.text def parse_dom(self, xmldoc): self.virt_type = xmldoc.get('type') # Note: This cover only for: LibvirtConfigGuestDisks # LibvirtConfigGuestFilesys # LibvirtConfigGuestHostdevPCI # LibvirtConfigGuestHostdevMDEV # LibvirtConfigGuestInterface # LibvirtConfigGuestUidMap # LibvirtConfigGuestGidMap # LibvirtConfigGuestCPU # LibvirtConfigGuestVPMEM for c in xmldoc: if c.tag == 'devices': for d in c: if d.tag == 'disk': obj = LibvirtConfigGuestDisk() obj.parse_dom(d) self.devices.append(obj) elif d.tag == 'filesystem': obj = LibvirtConfigGuestFilesys() obj.parse_dom(d) self.devices.append(obj) elif d.tag == 'hostdev' and d.get('type') == 'pci': obj = LibvirtConfigGuestHostdevPCI() obj.parse_dom(d) self.devices.append(obj) elif d.tag == 'hostdev' and d.get('type') == 'mdev': obj = LibvirtConfigGuestHostdevMDEV() obj.parse_dom(d) self.devices.append(obj) elif d.tag == 'interface': obj = LibvirtConfigGuestInterface() obj.parse_dom(d) self.devices.append(obj) elif d.tag == 'memory' and d.get('model') == 'nvdimm': obj = LibvirtConfigGuestVPMEM() obj.parse_dom(d) self.devices.append(obj) if c.tag == 'idmap': for idmap in c: obj = None if idmap.tag == 'uid': obj = LibvirtConfigGuestUIDMap() elif idmap.tag == 'gid': obj = LibvirtConfigGuestGIDMap() if obj: obj.parse_dom(idmap) self.idmaps.append(obj) elif c.tag == 'cpu': obj = LibvirtConfigGuestCPU() obj.parse_dom(c) self.cpu = obj elif c.tag == 'perf': for p in c: if p.get('enabled') and p.get('enabled') == 'yes': self.add_perf_event(p.get('name')) elif c.tag == 'os': self._parse_os(c) else: self._parse_basic_props(c) def add_device(self, dev): self.devices.append(dev) def add_perf_event(self, event): self.perf_events.append(event) def set_clock(self, clk): self.clock = clk class LibvirtConfigGuestSnapshot(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestSnapshot, self).__init__( root_name="domainsnapshot", **kwargs) self.name = None self.disks = [] def format_dom(self): ss = super(LibvirtConfigGuestSnapshot, self).format_dom() if self.name: ss.append(self._text_node("name", self.name)) disks = etree.Element('disks') for disk in self.disks: disks.append(disk.format_dom()) ss.append(disks) return ss def add_disk(self, disk): self.disks.append(disk) class LibvirtConfigNodeDevice(LibvirtConfigObject): """Libvirt Node Devices parser.""" def __init__(self, **kwargs): super(LibvirtConfigNodeDevice, self).__init__(root_name="device", **kwargs) self.name = None self.parent = None self.pci_capability = None self.mdev_information = None def parse_dom(self, xmldoc): super(LibvirtConfigNodeDevice, self).parse_dom(xmldoc) for c in xmldoc: if c.tag == "name": self.name = c.text elif c.tag == "parent": self.parent = c.text elif c.tag == "capability" and c.get("type") in ['pci', 'net']: pcicap = LibvirtConfigNodeDevicePciCap() pcicap.parse_dom(c) self.pci_capability = pcicap elif c.tag == "capability" and c.get("type") in ['mdev']: mdev_info = LibvirtConfigNodeDeviceMdevInformation() mdev_info.parse_dom(c) self.mdev_information = mdev_info class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject): """Libvirt Node Devices pci capability parser.""" def __init__(self, **kwargs): super(LibvirtConfigNodeDevicePciCap, self).__init__( root_name="capability", **kwargs) self.domain = None self.bus = None self.slot = None self.function = None self.product = None self.product_id = None self.vendor = None self.vendor_id = None self.numa_node = None self.fun_capability = [] self.mdev_capability = [] self.interface = None self.address = None self.link_state = None self.features = [] def parse_dom(self, xmldoc): super(LibvirtConfigNodeDevicePciCap, self).parse_dom(xmldoc) for c in xmldoc: if c.tag == "domain": self.domain = int(c.text) elif c.tag == "slot": self.slot = int(c.text) elif c.tag == "bus": self.bus = int(c.text) elif c.tag == "function": self.function = int(c.text) elif c.tag == "product": self.product = c.text self.product_id = int(c.get('id'), 16) elif c.tag == "vendor": self.vendor = c.text self.vendor_id = int(c.get('id'), 16) elif c.tag == "numa": self.numa_node = int(c.get('node')) elif c.tag == "interface": self.interface = c.text elif c.tag == "address": self.address = c.text elif c.tag == "link": self.link_state = c.get('state') elif c.tag == "feature": self.features.append(c.get('name')) elif c.tag == "capability" and c.get('type') in \ ('virt_functions', 'phys_function'): funcap = LibvirtConfigNodeDevicePciSubFunctionCap() funcap.parse_dom(c) self.fun_capability.append(funcap) elif c.tag == "capability" and c.get('type') in ('mdev_types',): mdevcap = LibvirtConfigNodeDeviceMdevCapableSubFunctionCap() mdevcap.parse_dom(c) self.mdev_capability.append(mdevcap) class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigNodeDevicePciSubFunctionCap, self).__init__( root_name="capability", **kwargs) self.type = None self.device_addrs = list() # list of tuple (domain,bus,slot,function) def parse_dom(self, xmldoc): super(LibvirtConfigNodeDevicePciSubFunctionCap, self).parse_dom(xmldoc) self.type = xmldoc.get("type") for c in xmldoc: if c.tag == "address": self.device_addrs.append((int(c.get('domain'), 16), int(c.get('bus'), 16), int(c.get('slot'), 16), int(c.get('function'), 16))) class LibvirtConfigNodeDeviceMdevCapableSubFunctionCap(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigNodeDeviceMdevCapableSubFunctionCap, self).__init__( root_name="capability", **kwargs) # mdev_types is a list of dictionaries where each item looks like: # {'type': 'nvidia-11', 'name': 'GRID M60-0B', 'deviceAPI': 'vfio-pci', # 'availableInstances': 16} self.mdev_types = list() def parse_dom(self, xmldoc): super(LibvirtConfigNodeDeviceMdevCapableSubFunctionCap, self).parse_dom(xmldoc) for c in xmldoc: if c.tag == "type": mdev_type = {'type': c.get('id')} for e in c: mdev_type[e.tag] = (int(e.text) if e.tag == 'availableInstances' else e.text) self.mdev_types.append(mdev_type) class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigNodeDeviceMdevInformation, self).__init__( root_name="capability", **kwargs) self.type = None self.iommu_group = None def parse_dom(self, xmldoc): super(LibvirtConfigNodeDeviceMdevInformation, self).parse_dom(xmldoc) for c in xmldoc: if c.tag == "type": self.type = c.get('id') if c.tag == "iommuGroup": self.iommu_group = int(c.get('number')) class LibvirtConfigGuestRng(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestRng, self).__init__(root_name="rng", **kwargs) self.device_model = 'virtio' self.model = 'random' self.backend = None self.rate_period = None self.rate_bytes = None self.driver_iommu = False @property def uses_virtio(self): return 'virtio' == self.device_model def format_dom(self): dev = super(LibvirtConfigGuestRng, self).format_dom() dev.set('model', self.device_model) backend = etree.Element("backend") backend.set("model", self.model) backend.text = self.backend if self.rate_period and self.rate_bytes: rate = etree.Element("rate") rate.set("period", str(self.rate_period)) rate.set("bytes", str(self.rate_bytes)) dev.append(rate) dev.append(backend) if self.driver_iommu: dev.append(etree.Element('driver', iommu="on")) return dev class LibvirtConfigGuestMetaNovaInstance(LibvirtConfigObject): def __init__(self): super(LibvirtConfigGuestMetaNovaInstance, self).__init__(root_name="instance", ns_prefix="nova", ns_uri=NOVA_NS) self.package = None self.flavor = None self.name = None self.creationTime = None self.owner = None self.roottype = None self.rootid = None def format_dom(self): meta = super(LibvirtConfigGuestMetaNovaInstance, self).format_dom() pkg = self._new_node("package") pkg.set("version", self.package) meta.append(pkg) if self.name is not None: meta.append(self._text_node("name", self.name)) if self.creationTime is not None: timestr = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(self.creationTime)) meta.append(self._text_node("creationTime", timestr)) if self.flavor is not None: meta.append(self.flavor.format_dom()) if self.owner is not None: meta.append(self.owner.format_dom()) if self.roottype is not None and self.rootid is not None: root = self._new_node("root") root.set("type", self.roottype) root.set("uuid", str(self.rootid)) meta.append(root) return meta class LibvirtConfigGuestMetaNovaFlavor(LibvirtConfigObject): def __init__(self): super(LibvirtConfigGuestMetaNovaFlavor, self).__init__(root_name="flavor", ns_prefix="nova", ns_uri=NOVA_NS) self.name = None self.memory = None self.disk = None self.swap = None self.ephemeral = None self.vcpus = None def format_dom(self): meta = super(LibvirtConfigGuestMetaNovaFlavor, self).format_dom() meta.set("name", self.name) if self.memory is not None: meta.append(self._text_node("memory", str(self.memory))) if self.disk is not None: meta.append(self._text_node("disk", str(self.disk))) if self.swap is not None: meta.append(self._text_node("swap", str(self.swap))) if self.ephemeral is not None: meta.append(self._text_node("ephemeral", str(self.ephemeral))) if self.vcpus is not None: meta.append(self._text_node("vcpus", str(self.vcpus))) return meta class LibvirtConfigGuestMetaNovaOwner(LibvirtConfigObject): def __init__(self): super(LibvirtConfigGuestMetaNovaOwner, self).__init__(root_name="owner", ns_prefix="nova", ns_uri=NOVA_NS) self.userid = None self.username = None self.projectid = None self.projectname = None def format_dom(self): meta = super(LibvirtConfigGuestMetaNovaOwner, self).format_dom() if self.userid is not None and self.username is not None: user = self._text_node("user", self.username) user.set("uuid", self.userid) meta.append(user) if self.projectid is not None and self.projectname is not None: project = self._text_node("project", self.projectname) project.set("uuid", self.projectid) meta.append(project) return meta class LibvirtConfigSecret(LibvirtConfigObject): def __init__(self): super(LibvirtConfigSecret, self).__init__(root_name="secret") self.ephemeral = False self.private = False self.description = None self.uuid = None self.usage_type = None self.usage_id = None def get_yes_no_str(self, value): if value: return 'yes' return 'no' def format_dom(self): root = super(LibvirtConfigSecret, self).format_dom() root.set("ephemeral", self.get_yes_no_str(self.ephemeral)) root.set("private", self.get_yes_no_str(self.private)) if self.description is not None: root.append(self._text_node("description", str(self.description))) if self.uuid is not None: root.append(self._text_node("uuid", str(self.uuid))) usage = self._new_node("usage") usage.set("type", self.usage_type) if self.usage_type in ('ceph', 'vtpm'): usage.append(self._text_node('name', str(self.usage_id))) elif self.usage_type == 'iscsi': usage.append(self._text_node('target', str(self.usage_id))) elif self.usage_type == 'volume': usage.append(self._text_node('volume', str(self.usage_id))) root.append(usage) return root class LibvirtConfigGuestVPMEM(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestVPMEM, self).__init__( root_name="memory", **kwargs) self.model = "nvdimm" self.access = "shared" self.source_path = kwargs.get("devpath", "") self.align_size = kwargs.get("align_kb", 0) self.pmem = True self.target_size = kwargs.get("size_kb", 0) self.target_node = 0 self.label_size = 2 * units.Ki def format_dom(self): memory = super(LibvirtConfigGuestVPMEM, self).format_dom() memory.set("model", self.model) memory.set("access", self.access) source = etree.Element("source") source.append(self._text_node("path", self.source_path)) source.append(self._text_node("alignsize", self.align_size)) if self.pmem is True: source.append(etree.Element("pmem")) target = etree.Element("target") target.append(self._text_node("size", self.target_size)) target.append(self._text_node("node", self.target_node)) label = etree.Element("label") label.append(self._text_node("size", self.label_size)) target.append(label) memory.append(source) memory.append(target) return memory def parse_dom(self, xmldoc): super(LibvirtConfigGuestVPMEM, self).parse_dom(xmldoc) self.model = xmldoc.get("model") self.access = xmldoc.get("access") for c in list(xmldoc): if c.tag == "source": for sub in list(c): if sub.tag == "path": self.source_path = sub.text if sub.tag == "alignsize": self.align_size = sub.text elif c.tag == "target": for sub in list(c): if sub.tag == "size": self.target_size = sub.text class LibvirtConfigGuestSound(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestSound, self).__init__(root_name="sound", **kwargs) self.model = "ich6" self.codec_type = "micro" #self.address_type = "pci" #self.address_domain = "0x0000" #self.address_bus = "0x00" #self.address_slot = "0x04" #self.address_function = "0x0" def format_dom(self): dev = super(LibvirtConfigGuestSound, self).format_dom() dev.set("model", self.model) drv_codec = etree.Element("codec") drv_codec.set("type", self.codec_type) #drv_address = etree.Element("address") #drv_address.set("type", self.address_type) #drv_address.set("domain", self.address_domain) #drv_address.set("bus", self.address_bus) #drv_address.set("slot", self.address_slot) #drv_address.set("function", self.address_function) dev.append(drv_codec) return dev
[ [ [ 1062, 1066 ], [ 111758, 111762 ], [ 111829, 111833 ] ], [ [ 1092, 1103 ], [ 14829, 14840 ] ], [ [ 1121, 1126 ], [ 1792, 1797 ], [ 1860, 1865 ], [ 2314, 2319 ], [ 2697, 2702 ], [ 9701, 9706 ], [ 10976, 10981 ], [ 11189, 11194 ], [ 17071, 17076 ], [ 23438, 23443 ], [ 28244, 28249 ], [ 28283, 28288 ], [ 30637, 30642 ], [ 30809, 30814 ], [ 32761, 32766 ], [ 35452, 35457 ], [ 36125, 36130 ], [ 36238, 36243 ], [ 36350, 36355 ], [ 36487, 36492 ], [ 36772, 36777 ], [ 37018, 37023 ], [ 37117, 37122 ], [ 37329, 37334 ], [ 37413, 37418 ], [ 37809, 37814 ], [ 38175, 38180 ], [ 38252, 38257 ], [ 38332, 38337 ], [ 44264, 44269 ], [ 44699, 44704 ], [ 44812, 44817 ], [ 44924, 44929 ], [ 45036, 45041 ], [ 45321, 45326 ], [ 45567, 45572 ], [ 45666, 45671 ], [ 45878, 45883 ], [ 46018, 46023 ], [ 48312, 48317 ], [ 48451, 48456 ], [ 48564, 48569 ], [ 48647, 48652 ], [ 48712, 48717 ], [ 49919, 49924 ], [ 50621, 50626 ], [ 57476, 57481 ], [ 57567, 57572 ], [ 57773, 57778 ], [ 58962, 58967 ], [ 59069, 59074 ], [ 59174, 59179 ], [ 59347, 59352 ], [ 59501, 59506 ], [ 59892, 59897 ], [ 60132, 60137 ], [ 60248, 60253 ], [ 60355, 60360 ], [ 60434, 60439 ], [ 60573, 60578 ], [ 60618, 60623 ], [ 60795, 60800 ], [ 60901, 60906 ], [ 61014, 61019 ], [ 61218, 61223 ], [ 61339, 61344 ], [ 61610, 61615 ], [ 61720, 61725 ], [ 62187, 62192 ], [ 67601, 67606 ], [ 69491, 69496 ], [ 69773, 69778 ], [ 70404, 70409 ], [ 70506, 70511 ], [ 71451, 71456 ], [ 74497, 74502 ], [ 74958, 74963 ], [ 76072, 76077 ], [ 76130, 76135 ], [ 77138, 77143 ], [ 77243, 77248 ], [ 77396, 77401 ], [ 78058, 78063 ], [ 79963, 79968 ], [ 84116, 84121 ], [ 84327, 84332 ], [ 84406, 84411 ], [ 84483, 84488 ], [ 84575, 84580 ], [ 84674, 84679 ], [ 84770, 84775 ], [ 89563, 89568 ], [ 91149, 91154 ], [ 91234, 91239 ], [ 91321, 91326 ], [ 91491, 91496 ], [ 92050, 92055 ], [ 92195, 92200 ], [ 92317, 92322 ], [ 94715, 94720 ], [ 94905, 94910 ], [ 96288, 96293 ], [ 96468, 96473 ], [ 96630, 96635 ], [ 96890, 96895 ], [ 97129, 97134 ], [ 97376, 97381 ], [ 97454, 97459 ], [ 103721, 103726 ], [ 110443, 110448 ], [ 110614, 110619 ], [ 110854, 110859 ], [ 116436, 116441 ], [ 116650, 116655 ], [ 116691, 116696 ], [ 116861, 116866 ], [ 118326, 118331 ] ], [ [ 1150, 1158 ], [ 90131, 90139 ] ], [ [ 1182, 1187 ], [ 92788, 92793 ], [ 116232, 116237 ] ], [ [ 1206, 1215 ], [ 2576, 2585 ], [ 18545, 18554 ] ], [ [ 1238, 1239 ], [ 2434, 2435 ], [ 8317, 8318 ] ], [ [ 1261, 1279 ], [ 54960, 54969 ], [ 59427, 59436 ], [ 64175, 64184 ] ], [ [ 1302, 1310 ], [ 12134, 12142 ], [ 12598, 12606 ], [ 24932, 24940 ], [ 25281, 25289 ], [ 81140, 81148 ], [ 81651, 81659 ], [ 82210, 82218 ], [ 85259, 85267 ], [ 87087, 87095 ], [ 87647, 87655 ], [ 94512, 94520 ], [ 99008, 99016 ] ], [ [ 1372, 1379 ], [ 111185, 111192 ], [ 112616, 112623 ], [ 113725, 113732 ] ], [ [ 1436, 1455 ], [ 2847, 2866 ], [ 3790, 3809 ], [ 5291, 5310 ], [ 5774, 5793 ], [ 6283, 6302 ], [ 7231, 7250 ], [ 8411, 8430 ], [ 9102, 9121 ], [ 9930, 9949 ], [ 11408, 11427 ], [ 12694, 12713 ], [ 13346, 13365 ], [ 14456, 14475 ], [ 17572, 17591 ], [ 19430, 19449 ], [ 20247, 20266 ], [ 21025, 21044 ], [ 21836, 21855 ], [ 24339, 24358 ], [ 25547, 25566 ], [ 27168, 27187 ], [ 27586, 27605 ], [ 29889, 29908 ], [ 41093, 41112 ], [ 42565, 42584 ], [ 49581, 49600 ], [ 50073, 50092 ], [ 50790, 50809 ], [ 51049, 51068 ], [ 52160, 52179 ], [ 68442, 68461 ], [ 78374, 78393 ], [ 80684, 80703 ], [ 81245, 81264 ], [ 81754, 81773 ], [ 82483, 82502 ], [ 83572, 83591 ], [ 84861, 84880 ], [ 85389, 85408 ], [ 86702, 86721 ], [ 87190, 87209 ], [ 87743, 87762 ], [ 88272, 88291 ], [ 91655, 91674 ], [ 92494, 92513 ], [ 103303, 103322 ], [ 103961, 103980 ], [ 105056, 105075 ], [ 107385, 107404 ], [ 108231, 108250 ], [ 109212, 109231 ], [ 110954, 110973 ], [ 112389, 112408 ], [ 113500, 113519 ], [ 114417, 114436 ], [ 1514, 1533 ] ], [ [ 2829, 2846 ], [ 2918, 2935 ], [ 3134, 3151 ], [ 3558, 3575 ] ], [ [ 3766, 3789 ], [ 3861, 3884 ], [ 4119, 4142 ] ], [ [ 5256, 5290 ], [ 6345, 6379 ] ], [ [ 5741, 5773 ], [ 6397, 6429 ], [ 5990, 6022 ] ], [ [ 6252, 6282 ], [ 4510, 4540 ] ], [ [ 7199, 7230 ], [ 4257, 4288 ], [ 7302, 7333 ], [ 7470, 7501 ] ], [ [ 8377, 8410 ], [ 7637, 7670 ], [ 8482, 8515 ], [ 8717, 8750 ] ], [ [ 9072, 9101 ], [ 9173, 9202 ], [ 9349, 9378 ], [ 9634, 9663 ], [ 14010, 14039 ] ], [ [ 9904, 9929 ], [ 9486, 9511 ], [ 10001, 10026 ], [ 10273, 10298 ], [ 10877, 10902 ] ], [ [ 11383, 11407 ], [ 10722, 10746 ], [ 11479, 11503 ], [ 11761, 11785 ], [ 12246, 12270 ] ], [ [ 12667, 12693 ], [ 10525, 10551 ], [ 12765, 12791 ], [ 12950, 12976 ], [ 13134, 13160 ] ], [ [ 13324, 13345 ], [ 3258, 3279 ], [ 13417, 13438 ], [ 13662, 13683 ], [ 14133, 14154 ] ], [ [ 14433, 14455 ], [ 3408, 3430 ], [ 14527, 14549 ], [ 14926, 14948 ], [ 16772, 16794 ] ], [ [ 17543, 17571 ], [ 15858, 15886 ], [ 16163, 16191 ], [ 17642, 17670 ], [ 18078, 18106 ], [ 18878, 18906 ] ], [ [ 19406, 19429 ], [ 19501, 19524 ], [ 19787, 19810 ] ], [ [ 20223, 20246 ], [ 20318, 20341 ], [ 20602, 20625 ] ], [ [ 21001, 21024 ], [ 23968, 23991 ], [ 21107, 21130 ], [ 21338, 21361 ], [ 21527, 21550 ], [ 22784, 22807 ] ], [ [ 21819, 21835 ], [ 26288, 26304 ], [ 13788, 13804 ], [ 21907, 21923 ], [ 22255, 22271 ], [ 22970, 22986 ] ], [ [ 23939, 23967 ], [ 24054, 24082 ], [ 24194, 24222 ] ], [ [ 24309, 24338 ], [ 24410, 24439 ], [ 25073, 25102 ], [ 25957, 25986 ] ], [ [ 25521, 25546 ], [ 25618, 25643 ], [ 25817, 25842 ], [ 26115, 26140 ], [ 26747, 26772 ] ], [ [ 26266, 26287 ], [ 26356, 26377 ], [ 26532, 26553 ], [ 26893, 26914 ], [ 102663, 102684 ] ], [ [ 27143, 27167 ], [ 27239, 27263 ], [ 27446, 27470 ] ], [ [ 27560, 27585 ], [ 27657, 27682 ], [ 28142, 28167 ] ], [ [ 29864, 29888 ], [ 30105, 30129 ], [ 30983, 31007 ], [ 47676, 47700 ], [ 55222, 55246 ], [ 67100, 67124 ], [ 67693, 67717 ], [ 68990, 69014 ], [ 69865, 69889 ], [ 70599, 70623 ], [ 72304, 72328 ], [ 76676, 76700 ], [ 80182, 80206 ], [ 109816, 109840 ], [ 115770, 115794 ], [ 117732, 117756 ], [ 29960, 29984 ] ], [ [ 30082, 30104 ], [ 30212, 30234 ], [ 30483, 30505 ] ], [ [ 30960, 30982 ], [ 31059, 31081 ], [ 35171, 35193 ], [ 38612, 38634 ], [ 101011, 101033 ] ], [ [ 41058, 41092 ], [ 40253, 40287 ], [ 41163, 41197 ], [ 41632, 41666 ], [ 42437, 42471 ] ], [ [ 42534, 42564 ], [ 42946, 42976 ], [ 43740, 43770 ], [ 46194, 46224 ] ], [ [ 47650, 47675 ], [ 47752, 47777 ], [ 48160, 48185 ], [ 48826, 48851 ], [ 101204, 101229 ] ], [ [ 49542, 49580 ], [ 49651, 49689 ], [ 50476, 50514 ] ], [ [ 50040, 50072 ], [ 40948, 40980 ], [ 50207, 50239 ] ], [ [ 50761, 50789 ], [ 40794, 40822 ], [ 50861, 50889 ] ], [ [ 51025, 51048 ], [ 51691, 51714 ], [ 51922, 51945 ], [ 51120, 51143 ], [ 51465, 51488 ] ], [ [ 51666, 51690 ], [ 51766, 51790 ], [ 102357, 102381 ] ], [ [ 51897, 51921 ], [ 51997, 52021 ], [ 102459, 102483 ] ], [ [ 52128, 52159 ], [ 52906, 52937 ], [ 53946, 53977 ], [ 40571, 40602 ], [ 52241, 52272 ], [ 52405, 52436 ], [ 66691, 66722 ] ], [ [ 52869, 52905 ], [ 52735, 52771 ], [ 52988, 53024 ], [ 53237, 53273 ] ], [ [ 53911, 53945 ], [ 52645, 52679 ], [ 54028, 54062 ], [ 54271, 54305 ] ], [ [ 55194, 55221 ], [ 55298, 55325 ], [ 56394, 56421 ], [ 57292, 57319 ], [ 62679, 62706 ], [ 101847, 101874 ] ], [ [ 67076, 67099 ], [ 67176, 67199 ], [ 67435, 67458 ] ], [ [ 67666, 67692 ], [ 67769, 67795 ], [ 68058, 68084 ] ], [ [ 68420, 68441 ], [ 68513, 68534 ], [ 68749, 68770 ] ], [ [ 68966, 68989 ], [ 69066, 69089 ], [ 69430, 69453 ] ], [ [ 69838, 69864 ], [ 69940, 69966 ], [ 70256, 70282 ] ], [ [ 70570, 70598 ], [ 71559, 71587 ], [ 71772, 71800 ], [ 72041, 72069 ], [ 70675, 70703 ], [ 71133, 71161 ] ], [ [ 71523, 71558 ], [ 71639, 71674 ] ], [ [ 71735, 71771 ], [ 71852, 71888 ] ], [ [ 72000, 72040 ], [ 72121, 72161 ] ], [ [ 72278, 72303 ], [ 73220, 73245 ], [ 75560, 75585 ], [ 72379, 72404 ], [ 72757, 72782 ], [ 72981, 73006 ] ], [ [ 73191, 73219 ], [ 73296, 73324 ], [ 73875, 73903 ], [ 74429, 74457 ], [ 75116, 75144 ], [ 101424, 101452 ] ], [ [ 75530, 75559 ], [ 75636, 75665 ], [ 75939, 75968 ], [ 76290, 76319 ], [ 101648, 101677 ] ], [ [ 76649, 76675 ], [ 77681, 77707 ], [ 79576, 79602 ], [ 76752, 76778 ], [ 76999, 77025 ] ], [ [ 77658, 77680 ], [ 79105, 79127 ], [ 79339, 79361 ], [ 77759, 77781 ], [ 77920, 77942 ] ], [ [ 78342, 78373 ], [ 78503, 78534 ], [ 78741, 78772 ], [ 78927, 78958 ] ], [ [ 79080, 79104 ], [ 79179, 79203 ] ], [ [ 79313, 79338 ], [ 79413, 79438 ] ], [ [ 79550, 79575 ], [ 79654, 79679 ], [ 79899, 79924 ] ], [ [ 80155, 80181 ], [ 80257, 80283 ], [ 80499, 80525 ] ], [ [ 80651, 80683 ], [ 80755, 80787 ], [ 80959, 80991 ] ], [ [ 81208, 81244 ], [ 81316, 81352 ], [ 81505, 81541 ] ], [ [ 81719, 81753 ], [ 81825, 81859 ], [ 82068, 82102 ] ], [ [ 82457, 82482 ], [ 82554, 82579 ], [ 82897, 82922 ] ], [ [ 83540, 83571 ], [ 83643, 83674 ], [ 84012, 84043 ] ], [ [ 84825, 84860 ], [ 84932, 84967 ], [ 85130, 85165 ] ], [ [ 85360, 85388 ], [ 85460, 85488 ], [ 85730, 85758 ] ], [ [ 86669, 86701 ], [ 86773, 86805 ], [ 86969, 87001 ] ], [ [ 87156, 87189 ], [ 87261, 87294 ], [ 87483, 87516 ] ], [ [ 87716, 87742 ], [ 87814, 87840 ], [ 88005, 88031 ] ], [ [ 88246, 88271 ], [ 88511, 88536 ], [ 88750, 88775 ], [ 88988, 89013 ], [ 89229, 89254 ], [ 89658, 89683 ], [ 90382, 90407 ], [ 88349, 88374 ] ], [ [ 88481, 88510 ], [ 88588, 88617 ] ], [ [ 88720, 88749 ], [ 88827, 88856 ] ], [ [ 88959, 88987 ], [ 89065, 89093 ] ], [ [ 89194, 89228 ], [ 89306, 89340 ], [ 89487, 89521 ] ], [ [ 89629, 89657 ], [ 89742, 89770 ], [ 90212, 90240 ] ], [ [ 90350, 90381 ], [ 90640, 90671 ], [ 91047, 91078 ] ], [ [ 91619, 91654 ], [ 91726, 91761 ], [ 91944, 91979 ] ], [ [ 92475, 92493 ], [ 92565, 92583 ], [ 97746, 97764 ] ], [ [ 103276, 103302 ], [ 103374, 103400 ], [ 103576, 103602 ] ], [ [ 103937, 103960 ], [ 104071, 104094 ], [ 104361, 104384 ] ], [ [ 105026, 105055 ], [ 104677, 104706 ], [ 105181, 105210 ], [ 105756, 105785 ] ], [ [ 107344, 107384 ], [ 106962, 107002 ], [ 107455, 107495 ], [ 107739, 107779 ] ], [ [ 108182, 108230 ], [ 107195, 107243 ], [ 108301, 108349 ], [ 108714, 108762 ] ], [ [ 109173, 109211 ], [ 104888, 104926 ], [ 109282, 109320 ], [ 109517, 109555 ] ], [ [ 109794, 109815 ], [ 109892, 109913 ], [ 110338, 110359 ] ], [ [ 110919, 110953 ], [ 111015, 111049 ], [ 111438, 111472 ] ], [ [ 112356, 112388 ], [ 112450, 112482 ], [ 112832, 112864 ] ], [ [ 113468, 113499 ], [ 113561, 113592 ], [ 113901, 113932 ] ], [ [ 114397, 114416 ], [ 114478, 114497 ], [ 114874, 114893 ] ], [ [ 115746, 115769 ], [ 102072, 102095 ], [ 115845, 115868 ], [ 116291, 116314 ], [ 117108, 117131 ] ], [ [ 117708, 117731 ], [ 117808, 117831 ], [ 118225, 118248 ] ] ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/server/plugins.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import os from king_phisher import errors from king_phisher import find from king_phisher import plugins class ServerPlugin(plugins.PluginBase): """ The base object to be inherited by plugins that are loaded into the King Phisher server. This provides a convenient interface for interacting with the runtime. """ _logging_prefix = 'KingPhisher.Plugins.Server.' def __init__(self, root_config): self.root_config = root_config """A reference to the main server instance :py:attr:`~king_phisher.server.server.KingPhisherServer.config`.""" self.server = None """A reference to the :py:class:`~king_phisher.server.server.KingPhisherServer` instance. Only available if the instance has been created.""" super(ServerPlugin, self).__init__() for option in self.options: if self.config[option.name] is None: raise errors.KingPhisherPluginError(self.name, 'missing required option: ' + option.name) @property def config(self): """ A dictionary that can be used by this plugin to access it's configuration. Any changes to this configuration will be lost with the server restarts. """ config = self.root_config.get('server.plugins').get(self.name) if config is None: config = {} self.root_config.get('server.plugins')[self.name] = config return config class ServerPluginManager(plugins.PluginManagerBase): """ The manager for plugins loaded into the King Phisher server application. """ _plugin_klass = ServerPlugin def __init__(self, config): self.config = config path = self._get_path() self._server = None super(ServerPluginManager, self).__init__(path, (config,)) for plugin in config.get_if_exists('server.plugins', {}).keys(): # load the plugin try: self.load(plugin) except Exception: self.logger.critical('failed to load plugin: ' + plugin, exc_info=True) raise errors.KingPhisherPluginError(plugin, 'failed to load') # check compatibility klass = self[plugin] for req_type, req_value, req_met in klass.compatibility: req_type = req_type.lower() if req_met: self.logger.debug("plugin {0} requirement {1} ({2}) met".format(plugin, req_type, req_value)) continue self.logger.warning("plugin {0} unmet requirement {1} ({2})".format(plugin, req_type, req_value)) raise errors.KingPhisherPluginError(plugin, 'failed to meet requirement: ' + req_type) # enable the plugin try: self.enable(plugin) except errors.KingPhisherPluginError as error: raise error except Exception: self.logger.critical('failed to enable plugin: ' + plugin, exc_info=True) raise errors.KingPhisherPluginError(plugin, 'failed to enable') def _get_path(self): path = [find.find_data_directory('plugins')] extra_dirs = self.config.get_if_exists('server.plugin_directories', []) if isinstance(extra_dirs, str): extra_dirs = [extra_dirs] elif not isinstance(extra_dirs, list): raise errors.KingPhisherInputValidationError('configuration setting server.plugin_directories must be a list') for directory in extra_dirs: if not os.path.isdir(directory): continue path.append(directory) return path @property def server(self): return self._server @server.setter def server(self, value): self._server = value for _, plugin in self: plugin.server = value
[ [ [ 1590, 1592 ], [ 4641, 4643 ] ], [ [ 1619, 1625 ], [ 2416, 2422 ], [ 3431, 3437 ], [ 3869, 3875 ], [ 4015, 4021 ], [ 4180, 4186 ], [ 4495, 4501 ] ], [ [ 1651, 1655 ], [ 4271, 4275 ] ], [ [ 1681, 1688 ], [ 1709, 1716 ], [ 2903, 2910 ] ], [ [ 1696, 1708 ], [ 3032, 3044 ], [ 2305, 2317 ] ], [ [ 2883, 2902 ], [ 3153, 3172 ] ] ]
import click import sys from web3 import Web3 from plasma.client.client import Client from plasma.utils import utils @click.command() @click.option('--token_address', help="The ethereum address of the pdex token smart contract", required=True) @click.option('--root_chain_address', help="The ethereum address of the root chain smart contract", required=True) def main(token_address, root_chain_address): client = Client(root_chain_address) maker_address = '0x0af467F2f6c20e3543B8a2a453e70DF034714aEB' make_order_hex = client.get_makeorder_txn(maker_address, token_address, Web3.toWei(10, 'ether'), Web3.toWei(1, 'ether')) if make_order_hex == None: print("No valid utxos to create make order txn") sys.exit(0) make_order_hash = utils.hashPersonalMessage(make_order_hex) signature = utils.sign(make_order_hash, bytes(bytearray.fromhex('46155f862a2249f0ee6d69122ead4ec56cf12a71049a3105a90b9708d7103f77'))) client.submit_signed_makeorder_txn(maker_address, token_address, Web3.toWei(10, 'ether'), Web3.toWei(1, 'ether'), make_order_hex, signature.hex()) if __name__ == '__main__': main()
[ [ [ 7, 12 ], [ 120, 125 ], [ 137, 142 ], [ 247, 252 ] ], [ [ 20, 23 ], [ 737, 740 ] ], [ [ 41, 45 ], [ 592, 596 ], [ 617, 621 ], [ 1029, 1033 ], [ 1054, 1058 ] ], [ [ 79, 85 ], [ 419, 425 ] ], [ [ 111, 116 ], [ 780, 785 ], [ 838, 843 ] ], [ [ 365, 369 ], [ 1148, 1152 ] ] ]
from django.db import models from django.utils.translation import ugettext_lazy as _ from .feedback import Feedback class SearchResultFeedback(Feedback): """ Database model representing feedback about search results (e.g. empty results). """ search_query = models.CharField(max_length=1000, verbose_name=_("search term")) @property def object_name(self): """ This property returns the name of the object this feedback comments on. :return: The name of the object this feedback refers to :rtype: str """ return _("Search results for {}").format(self.search_query) @property def object_url(self): """ This property returns the url to the object this feedback comments on. :return: The url to the referred object :rtype: str """ return "" @property def related_feedback(self): """ This property returns all feedback entries which relate to the same object and have the same is_technical value. :return: The queryset of related feedback :rtype: ~django.db.models.query.QuerySet [ ~integreat_cms.cms.models.feedback.search_result_feedback.SearchResultFeedback ] """ return SearchResultFeedback.objects.filter( region=self.region, language=self.language, search_query=self.search_query, is_technical=self.is_technical, ) class Meta: #: The verbose name of the model verbose_name = _("search result feedback") #: The plural verbose name of the model verbose_name_plural = _("search result feedback") #: The default permissions for this model default_permissions = ()
[ [ [ 22, 28 ], [ 277, 283 ] ], [ [ 66, 84 ], [ 324, 325 ], [ 1546, 1547 ], [ 1652, 1653 ], [ 588, 589 ] ], [ [ 108, 116 ], [ 146, 154 ] ], [ [ 125, 145 ], [ 1262, 1282 ] ] ]
#!/usr/bin/env python # coding: utf-8 import codecs import sys import sklearn as sk import pandas as pd import numpy as np import math from sklearn import preprocessing from sklearn.decomposition import PCA from src.pca.algoritmo_QR import eigenvectores_eigenvalores_QR_vf from src.pca.metodo_potencia_deflation import power_iteration from src.pca.metodo_potencia_deflation import power_deflation def PCA_from_sklearn(X): """ componentes_principales(X): Función que devuelve las componentes principales. Parámetros ---------- n_components: número de componentes. svd_solver: str {‘auto’, ‘full’, ‘arpack’, ‘randomized’} Se elige 'full', lo que significa que se ejecuta completamente SVD llamando al solucionador estándar LAPACK a través de scipy.linalg.svd y se seleccionan los componentes mediante postprocessing. Atributos --------- varianza_explicada: porcentaje de varianza explicada por cada componente. valores_singulares: valores singulares correspondientes a cada componente. pca.components_: ejes principales que representan las direcciones de máxima varianza en los datos. eigenvalues: son los valores propios utilizando la matriz de covarianza. Método --------- fit_transform: ajusta el modelo a los datos y aplica la reducción de dimensionalidad en los datos. """ X = pd.DataFrame(X) n_components = len(X.columns) pca_1 = PCA(n_components, svd_solver='full') componentesprincipales_1 = pca_1.fit_transform(X) pca_1.components_ var_exp = pca_1.explained_variance_ratio_ ##Se obtiene el número de componentes a través de la varianza explicada acumulada de los componentes, la cual debe sumar 60%. var_acumulada = var_exp.cumsum() conteo = (var_acumulada) < 0.8 n_componentes = conteo.sum() + 1 pca = PCA(n_componentes, svd_solver='full') componentesprincipales = pca.fit_transform(X) pca.components_ varianza_explicada = pca.explained_variance_ratio_ eigenvalues = pca.explained_variance_ val_sing = pca.singular_values_ return pca, varianza_explicada, componentesprincipales, val_sing, pca.components_, eigenvalues def PCA_from_SVD(A): """ Función para PCA a partir de la SVD de numpy params: A matriz de datos num_componentes número de componentes deseados return: valores_singulares Los valores singulares de la descomposición SVD componentes Los coeficientes para calcular los componentes principales Z Los datos transformados (componentes principales) varianza_explicada La varianza explicada por cada componente principal """ # Centrar los datos A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame A_centered = A - A.mean(axis=0) # Calcular SVD U, S, Vt = np.linalg.svd(A_centered, full_matrices=False) # Los valores singulares valores_singulares = S # Los componentes (coeficientes) componentes = ((Vt)) # Los datos transformados (componentes principales) Z = [email protected](Vt) # La varianza explicada varianza_explicada = S**2/np.sum(S**2) # Calcula número de componentes de manera automatica de acuerdo a la variana explicada # Threshold de 60% n = A.shape[1] #numero de columnas varianza_acumulada = varianza_explicada.cumsum() conteo = (varianza_acumulada) < 0.8 num_componentes = conteo.sum() + 1 # regresar 4 objetos return valores_singulares[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes] def PCA_from_SVD_jacobi(A): """ Función para PCA a partir de la SVD params: A matriz de datos num_componentes número de componentes deseados return: valores_singulares Los valores singulares de la descomposición SVD componentes Los coeficientes para calcular los componentes principales Z Los datos transformados (componentes principales) varianza_explicada La varianza explicada por cada componente principal """ # Centrar los datos A = np.array(A) # convertir los datos a un numpy array por si vienen de un DataFrame A_centered = A - A.mean(axis=0) # Modificar esta línea de código, mandar a llamar la función creada por el equipo # Calcular SVD U, S, Vt = svd_jacobi_aprox(A_centered,1e-12,500) # Los valores singulares valores_singulares = S # Los componentes (coeficientes) componentes = ((Vt)) # Los datos transformados (componentes principales) Z = [email protected](Vt) # La varianza explicada varianza_explicada = S**2/np.sum(S**2) # Calcula número de componentes de manera automatica de acuerdo a la variana explicada # Threshold de 60% n = A.shape[1] #numero de columnas varianza_acumulada = varianza_explicada.cumsum() conteo = (varianza_acumulada) < 0.8 num_componentes = conteo.sum() + 1 # regresar 4 objetos return valores_singulares[:(num_componentes)], componentes[:(num_componentes)], Z[:,:(num_componentes)], varianza_explicada[:(num_componentes)] def PCA_from_QR_vf(data,niter = 450): """ Función para PCA a partir de los eigenvectores params: data: matriz de datos niter: número de iteraciones máximas return: componentes Los coeficientes para calcular los componentes principales (eigenvectores de la matriz de covarianzas) Z Los datos transformados (componentes principales) varianza_explicada La varianza explicada por cada componente principal Depende de la función: eigenvectores_QR """ # convertir a array A = np.array(data) # Centrar los datos mean_vec = np.mean(A, axis=0) datos_centrados = (A - mean_vec) # Matriz de Covarianzas #C = (datos_centrados.T@datos_centrados)/(datos_centrados.shape[0]-1) C = (A - mean_vec).T.dot((A - mean_vec)) / (A.shape[0]-1) # Calcular algoritmo QR E, Q = eigenvectores_eigenvalores_QR_vf(C,niter) # Los componentes (coeficientes) componentes = Q.T # Los datos transformados (componentes principales) # Aquí marcaba error al filtrar porque no se reconocia a Z como numpy array Z = datos_centrados@Q # La varianza explicada varianza_explicada = E/np.sum(E) # Calcula número de componentes de manera automatica de acuerdo a la variana explicada # Threshold de 60% n = data.shape[1] #numero de columnas varianza_acumulada = varianza_explicada.cumsum() conteo = (varianza_acumulada) < 0.8 num_componentes = conteo.sum() + 1 # regresar 4 objetos return E[:num_componentes], componentes[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes] #, varianza_acumulada, num_componentes def PCA_from_potencia(X): """ Función que calcula PCA a partir del método de la potencia y deflation de Hotteling params: A: matriz de datos return: eigenvalues Numpy array con los eigenvectores de A eigenvectors Numpy array con los correspondientes eigenvectores de A """ prop = 0 # Proporción de varianza explicada comp = 1 cur_var = 0 comp_vecs = np.zeros([X.shape[1], X.shape[1]]) # convertir a array A = np.array(X) # Centrar los datos mean_vec = np.mean(A, axis=0) datos_centrados = (A - mean_vec) #Calculamos la matriz de covarianzas cov = np.dot(X.T, X)/X.shape[0] #Aplicamos el método de la potencia evalues_pow, evectors_pow = power_deflation(cov,2000) # La varianza explicada varianza_explicada = evalues_pow/np.sum(evalues_pow) # Los datos transformados (componentes principales) Z = datos_centrados@evectors_pow # Calcula número de componentes de manera automatica de acuerdo a la variana explicada # Threshold de 80% n = X.shape[1] #numero de columnas varianza_acumulada = varianza_explicada.cumsum() conteo = (varianza_acumulada) < 0.8 num_componentes = conteo.sum() + 1 return evalues_pow[:num_componentes], evectors_pow.T[:num_componentes], Z[:,:num_componentes], varianza_explicada[:num_componentes]
[ [ [ 46, 52 ] ], [ [ 60, 63 ] ], [ [ 72, 85 ] ], [ [ 93, 105 ], [ 1392, 1394 ] ], [ [ 113, 124 ], [ 2724, 2726 ], [ 2880, 2882 ], [ 3135, 3137 ], [ 3215, 3217 ], [ 4197, 4199 ], [ 4687, 4689 ], [ 4767, 4769 ], [ 5842, 5844 ], [ 5901, 5903 ], [ 6504, 6506 ], [ 7435, 7437 ], [ 7507, 7509 ], [ 7563, 7565 ], [ 7675, 7677 ], [ 7874, 7876 ] ], [ [ 133, 137 ] ], [ [ 159, 172 ] ], [ [ 207, 210 ], [ 1454, 1457 ], [ 1869, 1872 ] ], [ [ 245, 277 ], [ 6166, 6198 ] ], [ [ 324, 339 ] ], [ [ 386, 401 ], [ 7778, 7793 ] ], [ [ 408, 424 ] ], [ [ 2220, 2232 ] ], [ [ 3696, 3715 ] ], [ [ 5260, 5274 ] ], [ [ 7006, 7023 ] ] ]
from tkinter import* import random import time root = Tk() root.geometry("1600x700+0+0") root.title("Restaurant Management System") Tops = Frame(root,bg="white",width = 1600,height=50,relief=SUNKEN) Tops.pack(side=TOP) f1 = Frame(root,width = 900,height=700,relief=SUNKEN) f1.pack(side=LEFT) f2 = Frame(root ,width = 400,height=700,relief=SUNKEN) f2.pack(side=RIGHT) #------------------TIME-------------- localtime=time.asctime(time.localtime(time.time())) #-----------------INFO TOP------------ lblinfo = Label(Tops, font=( 'aria' ,30, 'bold' ),text="Restaurant Management System",fg="steel blue",bd=10,anchor='w') lblinfo.grid(row=0,column=0) lblinfo = Label(Tops, font=( 'aria' ,20, ),text=localtime,fg="steel blue",anchor=W) lblinfo.grid(row=1,column=0) #---------------Calculator------------------ text_Input=StringVar() operator ="" txtdisplay = Entry(f2,font=('ariel' ,20,'bold'), textvariable=text_Input , bd=5 ,insertwidth=7 ,bg="white",justify='right') txtdisplay.grid(columnspan=4) def btnclick(numbers): global operator operator=operator + str(numbers) text_Input.set(operator) def clrdisplay(): global operator operator="" text_Input.set("") def eqals(): global operator sumup=str(eval(operator)) text_Input.set(sumup) operator = "" def Ref(): x=random.randint(12980, 50876) randomRef = str(x) rand.set(randomRef) cof =float(Fries.get()) colfries= float(Largefries.get()) cob= float(Burger.get()) cofi= float(Filet.get()) cochee= float(Cheese_burger.get()) codr= float(Drinks.get()) costoffries = cof*25 costoflargefries = colfries*40 costofburger = cob*35 costoffilet = cofi*50 costofcheeseburger = cochee*50 costofdrinks = codr*35 costofmeal = "Rp.",str('%.2f'% (costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)) PayTax=((costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)*0.33) Totalcost=(costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks) Ser_Charge=((costoffries + costoflargefries + costofburger + costoffilet + costofcheeseburger + costofdrinks)/99) Service="Rp.",str('%.2f'% Ser_Charge) OverAllCost="Rp.",str( PayTax + Totalcost + Ser_Charge) PaidTax="Rp.",str('%.2f'% PayTax) Service_Charge.set(Service) cost.set(costofmeal) Tax.set(PaidTax) Subtotal.set(costofmeal) Total.set(OverAllCost) def qexit(): root.destroy() def reset(): rand.set("") Fries.set("") Largefries.set("") Burger.set("") Filet.set("") Subtotal.set("") Total.set("") Service_Charge.set("") Drinks.set("") Tax.set("") cost.set("") Cheese_burger.set("") btn7=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="7",bg="powder blue", command=lambda: btnclick(7) ) btn7.grid(row=2,column=0) btn8=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="8",bg="powder blue", command=lambda: btnclick(8) ) btn8.grid(row=2,column=1) btn9=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="9",bg="powder blue", command=lambda: btnclick(9) ) btn9.grid(row=2,column=2) Addition=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="+",bg="powder blue", command=lambda: btnclick("+") ) Addition.grid(row=2,column=3) #--------------------------------------------------------------------------------------------- btn4=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="4",bg="powder blue", command=lambda: btnclick(4) ) btn4.grid(row=3,column=0) btn5=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="5",bg="powder blue", command=lambda: btnclick(5) ) btn5.grid(row=3,column=1) btn6=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="6",bg="powder blue", command=lambda: btnclick(6) ) btn6.grid(row=3,column=2) Substraction=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="-",bg="powder blue", command=lambda: btnclick("-") ) Substraction.grid(row=3,column=3) #----------------------------------------------------------------------------------------------- btn1=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="1",bg="powder blue", command=lambda: btnclick(1) ) btn1.grid(row=4,column=0) btn2=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="2",bg="powder blue", command=lambda: btnclick(2) ) btn2.grid(row=4,column=1) btn3=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="3",bg="powder blue", command=lambda: btnclick(3) ) btn3.grid(row=4,column=2) multiply=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="*",bg="powder blue", command=lambda: btnclick("*") ) multiply.grid(row=4,column=3) #------------------------------------------------------------------------------------------------ btn0=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="0",bg="powder blue", command=lambda: btnclick(0) ) btn0.grid(row=5,column=0) btnc=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="c",bg="powder blue", command=clrdisplay) btnc.grid(row=5,column=1) btnequal=Button(f2,padx=16,pady=16,bd=4,width = 16, fg="black", font=('ariel', 20 ,'bold'),text="=",bg="powder blue",command=eqals) btnequal.grid(columnspan=4) Decimal=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text=".",bg="powder blue", command=lambda: btnclick(".") ) Decimal.grid(row=5,column=2) Division=Button(f2,padx=16,pady=16,bd=4, fg="black", font=('ariel', 20 ,'bold'),text="/",bg="powder blue", command=lambda: btnclick("/") ) Division.grid(row=5,column=3) status = Label(f2,font=('aria', 15, 'bold'),width = 16, text="clifter resturant",bd=2,relief=SUNKEN) status.grid(row=7,columnspan=3) #--------------------------------------------------------------------------------------- rand = StringVar() Fries = StringVar() Largefries = StringVar() Burger = StringVar() Filet = StringVar() Subtotal = StringVar() Total = StringVar() Service_Charge = StringVar() Drinks = StringVar() Tax = StringVar() cost = StringVar() Cheese_burger = StringVar() lblreference = Label(f1, font=( 'aria' ,16, 'bold' ),text="Order No.",fg="steel blue",bd=10,anchor='w') lblreference.grid(row=0,column=0) txtreference = Entry(f1,font=('ariel' ,16,'bold'), textvariable=rand , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtreference.grid(row=0,column=1) lblfries = Label(f1, font=( 'aria' ,16, 'bold' ),text="Fries Meal",fg="steel blue",bd=10,anchor='w') lblfries.grid(row=1,column=0) txtfries = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Fries , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtfries.grid(row=1,column=1) lblLargefries = Label(f1, font=( 'aria' ,16, 'bold' ),text="Lunch Meal",fg="steel blue",bd=10,anchor='w') lblLargefries.grid(row=2,column=0) txtLargefries = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Largefries , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtLargefries.grid(row=2,column=1) lblburger = Label(f1, font=( 'aria' ,16, 'bold' ),text="Burger Meal",fg="steel blue",bd=10,anchor='w') lblburger.grid(row=3,column=0) txtburger = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Burger , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtburger.grid(row=3,column=1) lblFilet = Label(f1, font=( 'aria' ,16, 'bold' ),text="Pizza Meal",fg="steel blue",bd=10,anchor='w') lblFilet.grid(row=4,column=0) txtFilet = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Filet , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtFilet.grid(row=4,column=1) lblCheese_burger = Label(f1, font=( 'aria' ,16, 'bold' ),text="Cheese burger",fg="steel blue",bd=10,anchor='w') lblCheese_burger.grid(row=5,column=0) txtCheese_burger = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Cheese_burger , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtCheese_burger.grid(row=5,column=1) #-------------------------------------------------------------------------------------- lblDrinks = Label(f1, font=( 'aria' ,16, 'bold' ),text="Drinks",fg="steel blue",bd=10,anchor='w') lblDrinks.grid(row=0,column=2) txtDrinks = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Drinks , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtDrinks.grid(row=0,column=3) lblcost = Label(f1, font=( 'aria' ,16, 'bold' ),text="cost",fg="steel blue",bd=10,anchor='w') lblcost.grid(row=1,column=2) txtcost = Entry(f1,font=('ariel' ,16,'bold'), textvariable=cost , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtcost.grid(row=1,column=3) lblService_Charge = Label(f1, font=( 'aria' ,16, 'bold' ),text="Service Charge",fg="steel blue",bd=10,anchor='w') lblService_Charge.grid(row=2,column=2) txtService_Charge = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Service_Charge , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtService_Charge.grid(row=2,column=3) lblTax = Label(f1, font=( 'aria' ,16, 'bold' ),text="Tax",fg="steel blue",bd=10,anchor='w') lblTax.grid(row=3,column=2) txtTax = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Tax , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtTax.grid(row=3,column=3) lblSubtotal = Label(f1, font=( 'aria' ,16, 'bold' ),text="Subtotal",fg="steel blue",bd=10,anchor='w') lblSubtotal.grid(row=4,column=2) txtSubtotal = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Subtotal , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtSubtotal.grid(row=4,column=3) lblTotal = Label(f1, font=( 'aria' ,16, 'bold' ),text="Total",fg="steel blue",bd=10,anchor='w') lblTotal.grid(row=5,column=2) txtTotal = Entry(f1,font=('ariel' ,16,'bold'), textvariable=Total , bd=6,insertwidth=4,bg="powder blue" ,justify='right') txtTotal.grid(row=5,column=3) #-----------------------------------------buttons------------------------------------------ lblTotal = Label(f1,text="---------------------",fg="white") lblTotal.grid(row=6,columnspan=3) btnTotal=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="TOTAL", bg="powder blue",command=Ref) btnTotal.grid(row=7, column=1) btnreset=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="RESET", bg="powder blue",command=reset) btnreset.grid(row=7, column=2) btnexit=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="EXIT", bg="powder blue",command=qexit) btnexit.grid(row=7, column=3) def price(): roo = Tk() roo.geometry("600x220+0+0") roo.title("Price List") lblinfo = Label(roo, font=('aria', 15, 'bold'), text="ITEM", fg="black", bd=5) lblinfo.grid(row=0, column=0) lblinfo = Label(roo, font=('aria', 15,'bold'), text="_____________", fg="white", anchor=W) lblinfo.grid(row=0, column=2) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="PRICE", fg="black", anchor=W) lblinfo.grid(row=0, column=3) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Fries Meal", fg="steel blue", anchor=W) lblinfo.grid(row=1, column=0) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="25", fg="steel blue", anchor=W) lblinfo.grid(row=1, column=3) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Lunch Meal", fg="steel blue", anchor=W) lblinfo.grid(row=2, column=0) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="40", fg="steel blue", anchor=W) lblinfo.grid(row=2, column=3) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Burger Meal", fg="steel blue", anchor=W) lblinfo.grid(row=3, column=0) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="35", fg="steel blue", anchor=W) lblinfo.grid(row=3, column=3) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Pizza Meal", fg="steel blue", anchor=W) lblinfo.grid(row=4, column=0) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="50", fg="steel blue", anchor=W) lblinfo.grid(row=4, column=3) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Cheese Burger", fg="steel blue", anchor=W) lblinfo.grid(row=5, column=0) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="30", fg="steel blue", anchor=W) lblinfo.grid(row=5, column=3) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="Drinks", fg="steel blue", anchor=W) lblinfo.grid(row=6, column=0) lblinfo = Label(roo, font=('aria', 15, 'bold'), text="35", fg="steel blue", anchor=W) lblinfo.grid(row=6, column=3) roo.mainloop() btnprice=Button(f1,padx=16,pady=8, bd=10 ,fg="black",font=('ariel' ,16,'bold'),width=10, text="PRICE", bg="powder blue",command=price) btnprice.grid(row=7, column=0) root.mainloop()
[ [ [ 19, 20 ], [ 55, 57 ], [ 141, 146 ], [ 193, 199 ], [ 216, 219 ], [ 227, 232 ], [ 268, 274 ], [ 289, 293 ], [ 301, 306 ], [ 343, 349 ], [ 364, 369 ], [ 510, 515 ], [ 659, 664 ], [ 730, 731 ], [ 819, 828 ], [ 858, 863 ], [ 2816, 2822 ], [ 2976, 2982 ], [ 3136, 3142 ], [ 3300, 3306 ], [ 3560, 3566 ], [ 3720, 3726 ], [ 3880, 3886 ], [ 4048, 4054 ], [ 4314, 4320 ], [ 4474, 4480 ], [ 4634, 4640 ], [ 4798, 4804 ], [ 5061, 5067 ], [ 5221, 5227 ], [ 5375, 5381 ], [ 5535, 5541 ], [ 5704, 5710 ], [ 5873, 5878 ], [ 5957, 5963 ], [ 6094, 6103 ], [ 6114, 6123 ], [ 6139, 6148 ], [ 6160, 6169 ], [ 6180, 6189 ], [ 6203, 6212 ], [ 6223, 6232 ], [ 6252, 6261 ], [ 6273, 6282 ], [ 6291, 6300 ], [ 6310, 6319 ], [ 6338, 6347 ], [ 6367, 6372 ], [ 6505, 6510 ], [ 6661, 6666 ], [ 6792, 6797 ], [ 6950, 6955 ], [ 7091, 7096 ], [ 7256, 7261 ], [ 7390, 7395 ], [ 7545, 7550 ], [ 7676, 7681 ], [ 7837, 7842 ], [ 7987, 7992 ], [ 8245, 8250 ], [ 8374, 8379 ], [ 8528, 8533 ], [ 8651, 8656 ], [ 8811, 8816 ], [ 8964, 8969 ], [ 9133, 9138 ], [ 9253, 9258 ], [ 9405, 9410 ], [ 9540, 9545 ], [ 9699, 9704 ], [ 9825, 9830 ], [ 10070, 10075 ], [ 10164, 10170 ], [ 10329, 10335 ], [ 10495, 10501 ], [ 12673, 12679 ], [ 10674, 10676 ], [ 10753, 10758 ], [ 10870, 10875 ], [ 10948, 10949 ], [ 10999, 11004 ], [ 11070, 11071 ], [ 11121, 11126 ], [ 11202, 11203 ], [ 11253, 11258 ], [ 11326, 11327 ], [ 11377, 11382 ], [ 11458, 11459 ], [ 11509, 11514 ], [ 11582, 11583 ], [ 11633, 11638 ], [ 11715, 11716 ], [ 11766, 11771 ], [ 11839, 11840 ], [ 11890, 11895 ], [ 11971, 11972 ], [ 12022, 12027 ], [ 12095, 12096 ], [ 12146, 12151 ], [ 12230, 12231 ], [ 12281, 12286 ], [ 12354, 12355 ], [ 12405, 12410 ], [ 12482, 12483 ], [ 12533, 12538 ], [ 12606, 12607 ] ], [ [ 28, 34 ], [ 1315, 1321 ] ], [ [ 42, 46 ], [ 419, 423 ], [ 432, 436 ], [ 447, 451 ] ], [ [ 48, 52 ], [ 60, 64 ], [ 90, 94 ], [ 147, 151 ], [ 233, 237 ], [ 307, 311 ], [ 12831, 12835 ], [ 2541, 2545 ] ], [ [ 134, 138 ], [ 201, 205 ], [ 516, 520 ], [ 665, 669 ] ], [ [ 222, 224 ], [ 276, 278 ], [ 6373, 6375 ], [ 6511, 6513 ], [ 6667, 6669 ], [ 6798, 6800 ], [ 6956, 6958 ], [ 7097, 7099 ], [ 7262, 7264 ], [ 7396, 7398 ], [ 7551, 7553 ], [ 7682, 7684 ], [ 7843, 7845 ], [ 7993, 7995 ], [ 8251, 8253 ], [ 8380, 8382 ], [ 8534, 8536 ], [ 8657, 8659 ], [ 8817, 8819 ], [ 8970, 8972 ], [ 9139, 9141 ], [ 9259, 9261 ], [ 9411, 9413 ], [ 9546, 9548 ], [ 9705, 9707 ], [ 9831, 9833 ], [ 10076, 10078 ], [ 10171, 10173 ], [ 10336, 10338 ], [ 10502, 10504 ], [ 12680, 12682 ] ], [ [ 296, 298 ], [ 351, 353 ], [ 864, 866 ], [ 2823, 2825 ], [ 2983, 2985 ], [ 3143, 3145 ], [ 3307, 3309 ], [ 3567, 3569 ], [ 3727, 3729 ], [ 3887, 3889 ], [ 4055, 4057 ], [ 4321, 4323 ], [ 4481, 4483 ], [ 4641, 4643 ], [ 4805, 4807 ], [ 5068, 5070 ], [ 5228, 5230 ], [ 5382, 5384 ], [ 5542, 5544 ], [ 5711, 5713 ], [ 5879, 5881 ] ], [ [ 409, 418 ], [ 697, 706 ] ], [ [ 500, 507 ], [ 620, 627 ] ], [ [ 649, 656 ], [ 733, 740 ] ], [ [ 808, 818 ], [ 907, 917 ], [ 1085, 1095 ], [ 1169, 1179 ], [ 1257, 1267 ] ], [ [ 831, 839 ], [ 1057, 1065 ], [ 1241, 1249 ] ], [ [ 845, 855 ], [ 969, 979 ] ], [ [ 1005, 1013 ], [ 2930, 2938 ], [ 3090, 3098 ], [ 3250, 3258 ], [ 3414, 3422 ], [ 3674, 3682 ], [ 3834, 3842 ], [ 3994, 4002 ], [ 4162, 4170 ], [ 4428, 4436 ], [ 4588, 4596 ], [ 4748, 4756 ], [ 4912, 4920 ], [ 5175, 5183 ], [ 5649, 5657 ], [ 5818, 5826 ] ], [ [ 1115, 1125 ], [ 5327, 5337 ] ], [ [ 1193, 1198 ], [ 5491, 5496 ] ], [ [ 1302, 1305 ], [ 10283, 10286 ] ], [ [ 2528, 2533 ], [ 10613, 10618 ] ], [ [ 2561, 2566 ], [ 10448, 10453 ] ], [ [ 2811, 2815 ], [ 2944, 2948 ] ], [ [ 2971, 2975 ], [ 3104, 3108 ] ], [ [ 3131, 3135 ], [ 3264, 3268 ] ], [ [ 3291, 3299 ], [ 3430, 3438 ] ], [ [ 3555, 3559 ], [ 3688, 3692 ] ], [ [ 3715, 3719 ], [ 3848, 3852 ] ], [ [ 3875, 3879 ], [ 4008, 4012 ] ], [ [ 4035, 4047 ], [ 4178, 4190 ] ], [ [ 4309, 4313 ], [ 4442, 4446 ] ], [ [ 4469, 4473 ], [ 4602, 4606 ] ], [ [ 4629, 4633 ], [ 4762, 4766 ] ], [ [ 4789, 4797 ], [ 4928, 4936 ] ], [ [ 5056, 5060 ], [ 5189, 5193 ] ], [ [ 5216, 5220 ], [ 5339, 5343 ] ], [ [ 5366, 5374 ], [ 5498, 5506 ] ], [ [ 5527, 5534 ], [ 5665, 5672 ] ], [ [ 5695, 5703 ], [ 5834, 5842 ] ], [ [ 5864, 5870 ], [ 5965, 5971 ] ], [ [ 6087, 6091 ], [ 6554, 6558 ], [ 1371, 1375 ], [ 2574, 2578 ] ], [ [ 6106, 6111 ], [ 6841, 6846 ], [ 1407, 1412 ], [ 2591, 2596 ] ], [ [ 6126, 6136 ], [ 7140, 7150 ], [ 1440, 1450 ], [ 2609, 2619 ] ], [ [ 6151, 6157 ], [ 7439, 7445 ], [ 1473, 1479 ], [ 2632, 2638 ] ], [ [ 6172, 6177 ], [ 7725, 7730 ], [ 1503, 1508 ], [ 2651, 2656 ] ], [ [ 6192, 6200 ], [ 9589, 9597 ], [ 2470, 2478 ], [ 2669, 2677 ] ], [ [ 6215, 6220 ], [ 9874, 9879 ], [ 2499, 2504 ], [ 2690, 2695 ] ], [ [ 6235, 6249 ], [ 9013, 9027 ], [ 2392, 2406 ], [ 2708, 2722 ] ], [ [ 6264, 6270 ], [ 8423, 8429 ], [ 1571, 1577 ], [ 2735, 2741 ] ], [ [ 6285, 6288 ], [ 9302, 9305 ], [ 2449, 2452 ], [ 2754, 2757 ] ], [ [ 6303, 6307 ], [ 8700, 8704 ], [ 2424, 2428 ], [ 2770, 2774 ] ], [ [ 6322, 6335 ], [ 8036, 8049 ], [ 1534, 1547 ], [ 2787, 2800 ] ], [ [ 6352, 6364 ], [ 6456, 6468 ] ], [ [ 6490, 6502 ], [ 6615, 6627 ] ], [ [ 6650, 6658 ], [ 6751, 6759 ] ], [ [ 6781, 6789 ], [ 6903, 6911 ] ], [ [ 6934, 6947 ], [ 7040, 7053 ] ], [ [ 7075, 7088 ], [ 7207, 7220 ] ], [ [ 7244, 7253 ], [ 7347, 7356 ] ], [ [ 7378, 7387 ], [ 7502, 7511 ] ], [ [ 7534, 7542 ], [ 7635, 7643 ] ], [ [ 7665, 7673 ], [ 7787, 7795 ] ], [ [ 7818, 7834 ], [ 7930, 7946 ] ], [ [ 7968, 7984 ], [ 8106, 8122 ] ], [ [ 8233, 8242 ], [ 8331, 8340 ] ], [ [ 8362, 8371 ], [ 8486, 8495 ] ], [ [ 8518, 8525 ], [ 8612, 8619 ] ], [ [ 8641, 8648 ], [ 8761, 8768 ] ], [ [ 8791, 8808 ], [ 8905, 8922 ] ], [ [ 8944, 8961 ], [ 9084, 9101 ] ], [ [ 9124, 9130 ], [ 9216, 9222 ] ], [ [ 9244, 9250 ], [ 9362, 9368 ] ], [ [ 9391, 9402 ], [ 9493, 9504 ] ], [ [ 9526, 9537 ], [ 9654, 9665 ] ], [ [ 9688, 9696 ], [ 9784, 9792 ] ], [ [ 9814, 9822 ], [ 9936, 9944 ] ], [ [ 10059, 10067 ], [ 10120, 10128 ] ], [ [ 10155, 10163 ], [ 10288, 10296 ] ], [ [ 10320, 10328 ], [ 10455, 10463 ] ], [ [ 10487, 10494 ], [ 10620, 10627 ] ], [ [ 10655, 10660 ], [ 12792, 12797 ] ], [ [ 12664, 12672 ], [ 12799, 12807 ] ], [ [ 1048, 1056 ], [ 1100, 1108 ] ], [ [ 1153, 1161 ] ], [ [ 1283, 1291 ] ] ]
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType import copy as _copy class Font(_BaseLayoutHierarchyType): # class properties # -------------------- _parent_path_str = "layout.scene.xaxis.title" _path_str = "layout.scene.xaxis.title.font" _valid_props = {"color", "family", "size"} # color # ----- @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val # family # ------ @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.new_plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["family"] @family.setter def family(self, val): self["family"] = val # size # ---- @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val # Self properties description # --------------------------- @property def _prop_descriptions(self): return """\ color family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". size """ def __init__(self, arg=None, color=None, family=None, size=None, **kwargs): """ Construct a new Font object Sets this axis' title font. Note that the title's font used to be customized by the now deprecated `titlefont` attribute. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`new_plotly.graph_objs.layout.scene.x axis.title.Font` color family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". size Returns ------- Font """ super(Font, self).__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the new_plotly.graph_objs.layout.scene.xaxis.title.Font constructor must be a dict or an instance of :class:`new_plotly.graph_objs.layout.scene.xaxis.title.Font`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("color", None) _v = color if color is not None else _v if _v is not None: self["color"] = _v _v = arg.pop("family", None) _v = family if family is not None else _v if _v is not None: self["family"] = _v _v = arg.pop("size", None) _v = size if size is not None else _v if _v is not None: self["size"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
[ [ [ 33, 84 ], [ 119, 143 ] ], [ [ 92, 105 ], [ 7396, 7401 ] ], [ [ 114, 118 ], [ 7039, 7043 ] ] ]
#!/usr/bin/python # -- Content-Encoding: UTF-8 -- """ Utility methods, for compatibility between Python version :author: Thomas Calmant :copyright: Copyright 2015, isandlaTech :license: Apache License 2.0 :version: 0.2.6 .. Copyright 2015 isandlaTech Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Module version __version_info__ = (0, 2, 6) __version__ = ".".join(str(x) for x in __version_info__) # Documentation strings format __docformat__ = "restructuredtext en" # ------------------------------------------------------------------------------ import sys # ------------------------------------------------------------------------------ if sys.version_info[0] < 3: # Python 2 # pylint: disable=E1101 import types try: STRING_TYPES = ( types.StringType, types.UnicodeType ) except NameError: # Python built without unicode support STRING_TYPES = (types.StringType,) NUMERIC_TYPES = ( types.IntType, types.LongType, types.FloatType ) def to_bytes(string): """ Converts the given string into bytes """ # pylint: disable=E0602 if type(string) is unicode: return str(string) return string def from_bytes(data): """ Converts the given bytes into a string """ if type(data) is str: return data return str(data) else: # Python 3 # pylint: disable=E1101 STRING_TYPES = ( bytes, str ) NUMERIC_TYPES = ( int, float ) def to_bytes(string): """ Converts the given string into bytes """ if type(string) is bytes: return string return bytes(string, "UTF-8") def from_bytes(data): """ Converts the given bytes into a string """ if type(data) is str: return data return str(data, "UTF-8") # ------------------------------------------------------------------------------ # Common DictType = dict ListType = list TupleType = tuple ITERABLE_TYPES = ( list, set, frozenset, tuple ) VALUE_TYPES = ( bool, type(None) ) PRIMITIVE_TYPES = STRING_TYPES + NUMERIC_TYPES + VALUE_TYPES
[ [ [ 841, 857 ], [ 909, 925 ] ], [ [ 870, 881 ] ], [ [ 959, 972 ] ], [ [ 1087, 1090 ], [ 1177, 1180 ] ], [ [ 1256, 1261 ], [ 1308, 1313 ], [ 1338, 1343 ], [ 1459, 1464 ], [ 1509, 1514 ], [ 1532, 1537 ], [ 1556, 1561 ] ], [ [ 1279, 1291 ], [ 2780, 2792 ] ], [ [ 1443, 1455 ], [ 2780, 2792 ] ], [ [ 1483, 1496 ], [ 2795, 2808 ] ], [ [ 1587, 1595 ] ], [ [ 1804, 1814 ] ], [ [ 2026, 2038 ], [ 2780, 2792 ] ], [ [ 2081, 2094 ], [ 2795, 2808 ] ], [ [ 2141, 2149 ] ], [ [ 2335, 2345 ] ], [ [ 2604, 2612 ] ], [ [ 2621, 2629 ] ], [ [ 2637, 2646 ] ], [ [ 2656, 2670 ] ], [ [ 2718, 2729 ], [ 2811, 2822 ] ], [ [ 2762, 2777 ] ] ]
# Copyright 2014, Rackspace, US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from django.conf import settings from openstack_dashboard.api.rest import nova from openstack_dashboard.test import helpers as test class NovaRestTestCase(test.TestCase): # # Keypairs # @mock.patch.object(nova.api, 'nova') def test_keypair_get(self, nc): request = self.mock_rest_request() nc.keypair_list.return_value = [ mock.Mock(**{'to_dict.return_value': {'id': 'one'}}), mock.Mock(**{'to_dict.return_value': {'id': 'two'}}), ] response = nova.Keypairs().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.content, '{"items": [{"id": "one"}, {"id": "two"}]}') nc.keypair_list.assert_called_once_with(request) @mock.patch.object(nova.api, 'nova') def test_keypair_create(self, nc): request = self.mock_rest_request(body='''{"name": "Ni!"}''') new = nc.keypair_create.return_value new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'sekrit'} new.name = 'Ni!' with mock.patch.object(settings, 'DEBUG', True): response = nova.Keypairs().post(request) self.assertStatusCode(response, 201) self.assertEqual(response.content, '{"name": "Ni!", "public_key": "sekrit"}') self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21') nc.keypair_create.assert_called_once_with(request, 'Ni!') @mock.patch.object(nova.api, 'nova') def test_keypair_import(self, nc): request = self.mock_rest_request(body=''' {"name": "Ni!", "public_key": "hi"} ''') new = nc.keypair_import.return_value new.to_dict.return_value = {'name': 'Ni!', 'public_key': 'hi'} new.name = 'Ni!' with mock.patch.object(settings, 'DEBUG', True): response = nova.Keypairs().post(request) self.assertStatusCode(response, 201) self.assertEqual(response.content, '{"name": "Ni!", "public_key": "hi"}') self.assertEqual(response['location'], '/api/nova/keypairs/Ni%21') nc.keypair_import.assert_called_once_with(request, 'Ni!', 'hi') # # Availability Zones # def test_availzone_get_brief(self): self._test_availzone_get(False) def test_availzone_get_detailed(self): self._test_availzone_get(True) @mock.patch.object(nova.api, 'nova') def _test_availzone_get(self, detail, nc): if detail: request = self.mock_rest_request(GET={'detailed': 'true'}) else: request = self.mock_rest_request(GET={}) nc.availability_zone_list.return_value = [ mock.Mock(**{'to_dict.return_value': {'id': 'one'}}), mock.Mock(**{'to_dict.return_value': {'id': 'two'}}), ] response = nova.AvailabilityZones().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.content, '{"items": [{"id": "one"}, {"id": "two"}]}') nc.availability_zone_list.assert_called_once_with(request, detail) # # Limits # def test_limits_get_not_reserved(self): self._test_limits_get(False) def test_limits_get_reserved(self): self._test_limits_get(True) @mock.patch.object(nova.api, 'nova') def _test_limits_get(self, reserved, nc): if reserved: request = self.mock_rest_request(GET={'reserved': 'true'}) else: request = self.mock_rest_request(GET={}) nc.tenant_absolute_limits.return_value = {'id': 'one'} response = nova.Limits().get(request) self.assertStatusCode(response, 200) nc.tenant_absolute_limits.assert_called_once_with(request, reserved) self.assertEqual(response.content, '{"id": "one"}') # # Servers # @mock.patch.object(nova.api, 'nova') def test_server_create_missing(self, nc): request = self.mock_rest_request(body='''{"name": "hi"}''') response = nova.Servers().post(request) self.assertStatusCode(response, 400) self.assertEqual(response.content, '"missing required parameter \'source_id\'"') nc.server_create.assert_not_called() @mock.patch.object(nova.api, 'nova') def test_server_create_basic(self, nc): request = self.mock_rest_request(body='''{"name": "Ni!", "source_id": "image123", "flavor_id": "flavor123", "key_name": "sekrit", "user_data": "base64 yes", "security_groups": [{"name": "root"}]} ''') new = nc.server_create.return_value new.to_dict.return_value = {'id': 'server123'} new.id = 'server123' response = nova.Servers().post(request) self.assertStatusCode(response, 201) self.assertEqual(response.content, '{"id": "server123"}') self.assertEqual(response['location'], '/api/nova/servers/server123') nc.server_create.assert_called_once_with( request, 'Ni!', 'image123', 'flavor123', 'sekrit', 'base64 yes', [{'name': 'root'}] ) @mock.patch.object(nova.api, 'nova') def test_server_get_single(self, nc): request = self.mock_rest_request() nc.server_get.return_value.to_dict.return_value = {'name': '1'} response = nova.Server().get(request, "1") self.assertStatusCode(response, 200) nc.server_get.assert_called_once_with(request, "1") # # Extensions # @mock.patch.object(nova.api, 'nova') def _test_extension_list(self, nc): request = self.mock_rest_request() nc.list_extensions.return_value = [ mock.Mock(**{'to_dict.return_value': {'name': 'foo'}}), mock.Mock(**{'to_dict.return_value': {'name': 'bar'}}), ] response = nova.Extensions().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.content, '{"items": [{"name": "foo"}, {"name": "bar"}]}') nc.list_extensions.assert_called_once_with(request) # # Flavors # def test_get_extras_no(self): self._test_flavor_get_single(get_extras=False) def test_get_extras_yes(self): self._test_flavor_get_single(get_extras=True) def test_get_extras_default(self): self._test_flavor_get_single(get_extras=None) @mock.patch.object(nova.api, 'nova') def _test_flavor_get_single(self, nc, get_extras): if get_extras: request = self.mock_rest_request(GET={'get_extras': 'tRuE'}) elif get_extras is None: request = self.mock_rest_request() get_extras = False else: request = self.mock_rest_request(GET={'get_extras': 'fAlsE'}) nc.flavor_get.return_value.to_dict.return_value = {'name': '1'} response = nova.Flavor().get(request, "1") self.assertStatusCode(response, 200) if get_extras: self.assertEqual(response.content, '{"extras": {}, "name": "1"}') else: self.assertEqual(response.content, '{"name": "1"}') nc.flavor_get.assert_called_once_with(request, "1", get_extras=get_extras) @mock.patch.object(nova.api, 'nova') def _test_flavor_list_public(self, nc, is_public=None): if is_public: request = self.mock_rest_request(GET={'is_public': 'tRuE'}) elif is_public is None: request = self.mock_rest_request(GET={}) else: request = self.mock_rest_request(GET={'is_public': 'fAlsE'}) nc.flavor_list.return_value = [ mock.Mock(**{'to_dict.return_value': {'id': '1'}}), mock.Mock(**{'to_dict.return_value': {'id': '2'}}), ] response = nova.Flavors().get(request) self.assertStatusCode(response, 200) self.assertEqual(response.content, '{"items": [{"id": "1"}, {"id": "2"}]}') nc.flavor_list.assert_called_once_with(request, is_public=is_public, get_extras=False) def test_flavor_list_private(self): self._test_flavor_list_public(is_public=False) def test_flavor_list_public(self): self._test_flavor_list_public(is_public=True) def test_flavor_list_public_none(self): self._test_flavor_list_public(is_public=None) @mock.patch.object(nova.api, 'nova') def _test_flavor_list_extras(self, nc, get_extras=None): if get_extras: request = self.mock_rest_request(GET={'get_extras': 'tRuE'}) elif get_extras is None: request = self.mock_rest_request(GET={}) get_extras = False else: request = self.mock_rest_request(GET={'get_extras': 'fAlsE'}) nc.flavor_list.return_value = [ mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '1'}}), mock.Mock(**{'extras': {}, 'to_dict.return_value': {'id': '2'}}), ] response = nova.Flavors().get(request) self.assertStatusCode(response, 200) if get_extras: self.assertEqual(response.content, '{"items": [{"extras": {}, "id": "1"}, ' '{"extras": {}, "id": "2"}]}') else: self.assertEqual(response.content, '{"items": [{"id": "1"}, {"id": "2"}]}') nc.flavor_list.assert_called_once_with(request, is_public=None, get_extras=get_extras) def test_flavor_list_extras_no(self): self._test_flavor_list_extras(get_extras=False) def test_flavor_list_extras_yes(self): self._test_flavor_list_extras(get_extras=True) def test_flavor_list_extras_absent(self): self._test_flavor_list_extras(get_extras=None) @mock.patch.object(nova.api, 'nova') def test_flavor_extra_specs(self, nc): request = self.mock_rest_request() nc.flavor_get_extras.return_value.to_dict.return_value = {'foo': '1'} response = nova.FlavorExtraSpecs().get(request, "1") self.assertStatusCode(response, 200) nc.flavor_get_extras.assert_called_once_with(request, "1", raw=True)
[ [ [ 590, 594 ], [ 802, 806 ], [ 1369, 1373 ], [ 2071, 2075 ], [ 3014, 3018 ], [ 3927, 3931 ], [ 4491, 4495 ], [ 4899, 4903 ], [ 5771, 5775 ], [ 6156, 6160 ], [ 7043, 7047 ], [ 7912, 7916 ], [ 9090, 9094 ], [ 10565, 10569 ], [ 970, 974 ], [ 1036, 1040 ], [ 1671, 1675 ], [ 2411, 2415 ], [ 3317, 3321 ], [ 3383, 3387 ], [ 6331, 6335 ], [ 6399, 6403 ], [ 8326, 8330 ], [ 8390, 8394 ], [ 9540, 9544 ], [ 9618, 9622 ] ], [ [ 620, 628 ], [ 1689, 1697 ], [ 2429, 2437 ] ], [ [ 671, 675 ], [ 820, 824 ], [ 1387, 1391 ], [ 2089, 2093 ], [ 3032, 3036 ], [ 3945, 3949 ], [ 4509, 4513 ], [ 4917, 4921 ], [ 5789, 5793 ], [ 6174, 6178 ], [ 7061, 7065 ], [ 7930, 7934 ], [ 9108, 9112 ], [ 10583, 10587 ], [ 1119, 1123 ], [ 1738, 1742 ], [ 2478, 2482 ], [ 3466, 3470 ], [ 4250, 4254 ], [ 4660, 4664 ], [ 5379, 5383 ], [ 5984, 5988 ], [ 6484, 6488 ], [ 7521, 7525 ], [ 8471, 8475 ], [ 9713, 9717 ], [ 10785, 10789 ] ], [ [ 713, 728 ], [ 754, 758 ] ], [ [ 737, 753 ] ] ]
import IoTSensor import LORAGateway class GatewayPlacement: def __init__(self, sensor_list): self._sensor_list = sensor_list self._gateway_list = [] def add_gateway(self, gateway): self._gateway_list.append(gateway) def remove_gateway(self, gateway): self._gateway_list.remove(gateway) def sensors_covered(self): curr_placement_coverage = [] for g in self._gateway_list: curr_gateway_coverage = g.get_coverage(self._sensor_list) for s in curr_gateway_coverage: if not s.get_id() in curr_placement_coverage: curr_placement_coverage.append(s.get_id()) covers = True for s in self._sensor_list: if not s.get_id() in curr_placement_coverage: covers = False break return covers def energy_consumption(self, time): energy = 0.0 for s in self._sensor_list: energy = energy + s.get_total_consumption(time, s.get_closest_gateway(self._gateway_list)) for g in self._gateway_list: energy = energy + g.get_energy_consumption(time) return energy def get_gateways_number(self): return len(self._gateway_list)
[ [ [ 7, 16 ] ], [ [ 24, 35 ] ], [ [ 44, 60 ] ] ]
#! /usr/bin/python3 # # Copyright (c) 2017 Intel Corporation # # SPDX-License-Identifier: Apache-2.0 # """ Create and remove network tunnels to the target via the server -------------------------------------------------------------- """ from . import tc from . import ttb_client class tunnel(tc.target_extension_c): """ Extension to :py:class:`tcfl.tc.target_c` to create IP tunnels to targets with IP connectivity. Use by indicating a default IP address to use for interconnect *ic* or explicitly indicating it in the :meth:`add` function: >>> target.tunnel.ip_addr = target.addr_get(ic, "ipv4") >>> target.tunnel.add(PORT) >>> target.tunnel.remove(PORT) >>> target.tunnel.list() Note that for tunnels to work, the target has to be acquired and IP has to be up on it, which might requires it to be connected to some IP network (it can be a TCF interconnect or any other network). """ def __init__(self, target): self.target = target # Tunnels can always be added, even the target is not in an # interconnect self.ip_addr = None def _ip_addr_get(self, ip_addr): # FIXME: this shall validate the IP address using python-ipaddress if ip_addr: return ip_addr if self.ip_addr: return self.ip_addr ip_addr = self.target.rt.get( 'ipv4_addr', self.target.rt.get('ipv6_addr', None)) if ip_addr: return ip_addr raise RuntimeError( "Cannot identify any IPv4 or IPv6 address to use; " "please set it in " "`TARGET.tunnel.ip_addr = TARGET.addr_get(ic, \"ipv4\")` " "or pass it explicitly") def add(self, port, ip_addr = None, proto = None): """ Setup a TCP/UDP/SCTP v4 or v5 tunnel to the target A local port of the given protocol in the server is fowarded to the target's port. Teardown with :meth:`remove`. If the tunnel already exists, it is not recreated, but the port it uses is returned. Redirects targets TCP4 port 3000 to server_port in the server that provides ``target`` (target.kws['server']). >>> server_name = target.rtb.parsed_url.hostname >>> server_port = target.tunnel.add(3000) Now connecting to ``server_name:server_port`` takes you to the target's port 3000. :param int port: port to redirect to :param str ip_addr: (optional) target's IP address to use (it must be listed on the targets's tags *ipv4_address* or *ipv6_address*). :param str proto: (optional) Protocol to tunnel: {udp,sctp,tcp}[{4,6}] (defaults to v4 and to TCP) :returns int local_port: port in the server where to connect to in order to access the target. """ if proto == None: proto = 'tcp' else: assert isinstance(proto, str) assert isinstance(port, int) target = self.target ip_addr = self._ip_addr_get(ip_addr) r = target.rtb.rest_tb_target_ip_tunnel_add( target.rt, ip_addr, port, proto, ticket = target.ticket) self.target.report_info("%s tunnel added from %s:%d to %s:%d" % (proto, target.rtb.parsed_url.hostname, r, ip_addr, port)) return r def remove(self, port, ip_addr = None, proto = None): """ Teardown a TCP/UDP/SCTP v4 or v5 tunnel to the target previously created with :meth:`add`. :param int port: port to redirect to :param str ip_addr: (optional) target's IP address to use (it must be listed on the targets's tags *ipv4_address* or *ipv6_address*). :param str proto: (optional) Protocol to tunnel: {udp,sctp,tcp}[{4,6}] (defaults to v4 and to TCP) """ if proto == None: proto = 'tcp' else: assert isinstance(proto, str) assert isinstance(port, int) ip_addr = self._ip_addr_get(ip_addr) target = self.target target.rtb.rest_tb_target_ip_tunnel_remove( target.rt, ip_addr, port, proto, ticket = target.ticket) def list(self): """ List existing IP tunnels :returns: list of tuples (protocol, target-ip-address, port, port-in-server) """ target = self.target return target.rtb.rest_tb_target_ip_tunnel_list(target.rt, ticket = target.ticket) # FIXME: work out tcf creating target_c instances, so it is easier to # automate creating cmdline wrappers def cmdline_tunnel_add(args): rtb, rt = ttb_client._rest_target_find_by_id(args.target) port = rtb.rest_tb_target_ip_tunnel_add(rt, args.ip_addr, args.port, args.protocol, ticket = args.ticket) print("%s:%d" % (rtb.parsed_url.hostname, port)) def cmdline_tunnel_remove(args): rtb, rt = ttb_client._rest_target_find_by_id(args.target) rtb.rest_tb_target_ip_tunnel_remove(rt, args.ip_addr, args.port, args.protocol, ticket = args.ticket) def cmdline_tunnel_list(args): rtb, rt = ttb_client._rest_target_find_by_id(args.target) tunnels = rtb.rest_tb_target_ip_tunnel_list(rt, ticket = args.ticket) for tunnel in tunnels: print("%s %s:%s %s:%s" % (tunnel[0], rtb.parsed_url.hostname, tunnel[3], tunnel[1], tunnel[2])) def cmdline_setup(argsp): ap = argsp.add_parser("tunnel-add", help = "create an IP tunnel") ap.add_argument("target", metavar = "TARGET", action = "store", type = str, default = None, help = "Target's name or URL") ap.add_argument("port", metavar = "PORT", action = "store", type = int, help = "Port to tunnel to") ap.add_argument("protocol", metavar = "PROTOCOL", action = "store", nargs = "?", default = None, type = str, help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] " "(defaults to tcp and to IPv4)") ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store", nargs = "?", default = None, type = str, help = "target's IP address to tunnel to " "(default is the first IP address the target declares)") ap.set_defaults(func = cmdline_tunnel_add) ap = argsp.add_parser("tunnel-remove", help = "remove an existing IP tunnel") ap.add_argument("target", metavar = "TARGET", action = "store", type = str, default = None, help = "Target's name or URL") ap.add_argument("port", metavar = "PORT", action = "store", help = "Port to tunnel to") ap.add_argument("protocol", metavar = "PROTOCOL", action = "store", nargs = "?", default = None, help = "Protocol to tunnel {tcp,udp,sctp}[{4,6}] " "(defaults to tcp and to IPv4)") ap.add_argument("ip_addr", metavar = "IP-ADDR", action = "store", nargs = "?", default = None, help = "target's IP address to tunnel to " "(default is the first IP address the target declares)") ap.set_defaults(func = cmdline_tunnel_remove) ap = argsp.add_parser("tunnel-list", help = "List existing IP tunnels") ap.add_argument("target", metavar = "TARGET", action = "store", type = str, default = None, help = "Target's name or URL") ap.set_defaults(func = cmdline_tunnel_list)
[ [ [ 253, 255 ], [ 295, 297 ] ], [ [ 270, 280 ], [ 4788, 4798 ], [ 5135, 5145 ], [ 5415, 5425 ] ], [ [ 288, 294 ] ], [ [ 4748, 4766 ], [ 6659, 6677 ] ], [ [ 5092, 5113 ], [ 7578, 7599 ] ], [ [ 5374, 5393 ], [ 7852, 7871 ] ], [ [ 5741, 5754 ] ] ]
# np_baseball is available # Import numpy import numpy as np # Create np_height_in from np_baseball np_height_in = np_baseball[:,0] # Print out the mean of np_height_in print(np.mean(np_height_in)) # Print out the median of np_height_in print(np.median(np_height_in)) # np_baseball is available # Import numpy import numpy as np # Print mean height (first column) avg = np.mean(np_baseball[:,0]) print("Average: " + str(avg)) # Print median height. Replace 'None' med = np.median(np_baseball[:,0]) print("Median: " + str(med)) # Print out the standard deviation on height. Replace 'None' stddev = np.std(np_baseball[:,0]) print("Standard Deviation: " + str(stddev)) # Print out correlation between first and second column. Replace 'None' corr = np.corrcoef(np_baseball[:,0], np_baseball[:,1]) print("Correlation: " + str(corr)) # heights and positions are available as lists # Import numpy import numpy as np # Convert positions and heights to numpy arrays: np_positions, np_heights np_positions = np.array(positions) np_heights = np.array(heights) # Heights of the goalkeepers: gk_heights gk_heights = np_heights[np_positions == 'GK'] # Heights of the other players: other_heights other_heights = np_heights[np_positions != 'GK'] # Print out the median height of goalkeepers. Replace 'None' print("Median height of goalkeepers: " + str(np.median(gk_heights))) # Print out the median height of other players. Replace 'None' print("Median height of other players: " + str(np.median(other_heights)))
[ [ [ 53, 64 ], [ 187, 189 ], [ 259, 261 ] ], [ [ 108, 120 ], [ 195, 207 ], [ 269, 281 ] ], [ [ 342, 353 ], [ 399, 401 ], [ 504, 506 ], [ 636, 638 ], [ 789, 791 ] ], [ [ 393, 396 ], [ 450, 453 ] ], [ [ 498, 501 ], [ 556, 559 ] ], [ [ 627, 633 ], [ 697, 703 ] ], [ [ 782, 786 ], [ 866, 870 ] ], [ [ 951, 962 ], [ 1056, 1058 ], [ 1090, 1092 ], [ 1408, 1410 ], [ 1546, 1548 ] ], [ [ 1041, 1053 ], [ 1177, 1189 ], [ 1276, 1288 ] ], [ [ 1077, 1087 ], [ 1166, 1176 ], [ 1265, 1275 ] ], [ [ 1153, 1163 ], [ 1418, 1428 ] ], [ [ 1249, 1262 ], [ 1556, 1569 ] ] ]
""" Top-level URL lookup for InvenTree application. Passes URL lookup downstream to each app as required. """ from django.conf.urls import url, include from django.urls import path from django.contrib import admin from company.urls import company_urls from company.urls import manufacturer_part_urls from company.urls import supplier_part_urls from common.urls import common_urls from part.urls import part_urls from stock.urls import stock_urls from build.urls import build_urls from order.urls import order_urls from plugin.urls import get_plugin_urls from barcodes.api import barcode_api_urls from common.api import common_api_urls from part.api import part_api_urls, bom_api_urls from company.api import company_api_urls from stock.api import stock_api_urls from build.api import build_api_urls from order.api import order_api_urls from label.api import label_api_urls from report.api import report_api_urls from plugin.api import plugin_api_urls from django.conf import settings from django.conf.urls.static import static from django.views.generic.base import RedirectView from rest_framework.documentation import include_docs_urls from .views import auth_request from .views import IndexView, SearchView, DatabaseStatsView from .views import SettingsView, EditUserView, SetPasswordView, CustomEmailView, CustomConnectionsView, CustomPasswordResetFromKeyView from .views import CustomSessionDeleteView, CustomSessionDeleteOtherView from .views import CurrencyRefreshView from .views import AppearanceSelectView, SettingCategorySelectView from .views import DynamicJsView from .api import InfoView, NotFoundView from .api import ActionPluginView from users.api import user_urls admin.site.site_header = "InvenTree Admin" apipatterns = [ url(r'^barcode/', include(barcode_api_urls)), url(r'^settings/', include(common_api_urls)), url(r'^part/', include(part_api_urls)), url(r'^bom/', include(bom_api_urls)), url(r'^company/', include(company_api_urls)), url(r'^stock/', include(stock_api_urls)), url(r'^build/', include(build_api_urls)), url(r'^order/', include(order_api_urls)), url(r'^label/', include(label_api_urls)), url(r'^report/', include(report_api_urls)), url(r'^plugin/', include(plugin_api_urls)), # User URLs url(r'^user/', include(user_urls)), # Plugin endpoints url(r'^action/', ActionPluginView.as_view(), name='api-action-plugin'), # InvenTree information endpoint url(r'^$', InfoView.as_view(), name='api-inventree-info'), # Unknown endpoint url(r'^.*$', NotFoundView.as_view(), name='api-404'), ] settings_urls = [ url(r'^i18n/?', include('django.conf.urls.i18n')), url(r'^appearance/?', AppearanceSelectView.as_view(), name='settings-appearance'), url(r'^currencies-refresh/', CurrencyRefreshView.as_view(), name='settings-currencies-refresh'), url(r'^category/', SettingCategorySelectView.as_view(), name='settings-category'), # Catch any other urls url(r'^.*$', SettingsView.as_view(template_name='InvenTree/settings/settings.html'), name='settings'), ] # These javascript files are served "dynamically" - i.e. rendered on demand dynamic_javascript_urls = [ url(r'^calendar.js', DynamicJsView.as_view(template_name='js/dynamic/calendar.js'), name='calendar.js'), url(r'^nav.js', DynamicJsView.as_view(template_name='js/dynamic/nav.js'), name='nav.js'), url(r'^settings.js', DynamicJsView.as_view(template_name='js/dynamic/settings.js'), name='settings.js'), ] # These javascript files are pased through the Django translation layer translated_javascript_urls = [ url(r'^api.js', DynamicJsView.as_view(template_name='js/translated/api.js'), name='api.js'), url(r'^attachment.js', DynamicJsView.as_view(template_name='js/translated/attachment.js'), name='attachment.js'), url(r'^barcode.js', DynamicJsView.as_view(template_name='js/translated/barcode.js'), name='barcode.js'), url(r'^bom.js', DynamicJsView.as_view(template_name='js/translated/bom.js'), name='bom.js'), url(r'^build.js', DynamicJsView.as_view(template_name='js/translated/build.js'), name='build.js'), url(r'^company.js', DynamicJsView.as_view(template_name='js/translated/company.js'), name='company.js'), url(r'^filters.js', DynamicJsView.as_view(template_name='js/translated/filters.js'), name='filters.js'), url(r'^forms.js', DynamicJsView.as_view(template_name='js/translated/forms.js'), name='forms.js'), url(r'^helpers.js', DynamicJsView.as_view(template_name='js/translated/helpers.js'), name='helpers.js'), url(r'^label.js', DynamicJsView.as_view(template_name='js/translated/label.js'), name='label.js'), url(r'^model_renderers.js', DynamicJsView.as_view(template_name='js/translated/model_renderers.js'), name='model_renderers.js'), url(r'^modals.js', DynamicJsView.as_view(template_name='js/translated/modals.js'), name='modals.js'), url(r'^order.js', DynamicJsView.as_view(template_name='js/translated/order.js'), name='order.js'), url(r'^part.js', DynamicJsView.as_view(template_name='js/translated/part.js'), name='part.js'), url(r'^report.js', DynamicJsView.as_view(template_name='js/translated/report.js'), name='report.js'), url(r'^stock.js', DynamicJsView.as_view(template_name='js/translated/stock.js'), name='stock.js'), url(r'^plugin.js', DynamicJsView.as_view(template_name='js/translated/plugin.js'), name='plugin.js'), url(r'^tables.js', DynamicJsView.as_view(template_name='js/translated/tables.js'), name='tables.js'), url(r'^table_filters.js', DynamicJsView.as_view(template_name='js/translated/table_filters.js'), name='table_filters.js'), ] backendpatterns = [ # "Dynamic" javascript files which are rendered using InvenTree templating. url(r'^js/dynamic/', include(dynamic_javascript_urls)), url(r'^js/i18n/', include(translated_javascript_urls)), url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^auth/?', auth_request), url(r'^api/', include(apipatterns)), url(r'^api-doc/', include_docs_urls(title='InvenTree API')), # 3rd party endpoints url(r'^markdownx/', include('markdownx.urls')), ] frontendpatterns = [ url(r'^part/', include(part_urls)), url(r'^manufacturer-part/', include(manufacturer_part_urls)), url(r'^supplier-part/', include(supplier_part_urls)), url(r'^common/', include(common_urls)), url(r'^stock/', include(stock_urls)), url(r'^company/', include(company_urls)), url(r'^order/', include(order_urls)), url(r'^build/', include(build_urls)), url(r'^settings/', include(settings_urls)), url(r'^edit-user/', EditUserView.as_view(), name='edit-user'), url(r'^set-password/', SetPasswordView.as_view(), name='set-password'), url(r'^index/', IndexView.as_view(), name='index'), url(r'^search/', SearchView.as_view(), name='search'), url(r'^stats/', DatabaseStatsView.as_view(), name='stats'), # plugin urls get_plugin_urls(), # appends currently loaded plugin urls = None # admin sites url(r'^admin/error_log/', include('error_report.urls')), url(r'^admin/shell/', include('django_admin_shell.urls')), url(r'^admin/', admin.site.urls, name='inventree-admin'), # DB user sessions url(r'^accounts/sessions/other/delete/$', view=CustomSessionDeleteOtherView.as_view(), name='session_delete_other', ), url(r'^accounts/sessions/(?P<pk>\w+)/delete/$', view=CustomSessionDeleteView.as_view(), name='session_delete', ), # Single Sign On / allauth # overrides of urlpatterns url(r'^accounts/email/', CustomEmailView.as_view(), name='account_email'), url(r'^accounts/social/connections/', CustomConnectionsView.as_view(), name='socialaccount_connections'), url(r"^accounts/password/reset/key/(?P<uidb36>[0-9A-Za-z]+)-(?P<key>.+)/$", CustomPasswordResetFromKeyView.as_view(), name="account_reset_password_from_key"), url(r'^accounts/', include('allauth_2fa.urls')), # MFA support url(r'^accounts/', include('allauth.urls')), # included urlpatterns ] urlpatterns = [ url('', include(frontendpatterns)), url('', include(backendpatterns)), ] # Server running in "DEBUG" mode? if settings.DEBUG: # Static file access urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) # Media file access urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # Debug toolbar access (only allowed in DEBUG mode) if 'debug_toolbar' in settings.INSTALLED_APPS: import debug_toolbar urlpatterns = [ path('__debug/', include(debug_toolbar.urls)), ] + urlpatterns # Send any unknown URLs to the parts page urlpatterns += [url(r'^.*$', RedirectView.as_view(url='/index/', permanent=False), name='index')]
[ [ [ 142, 145 ], [ 1758, 1761 ], [ 1808, 1811 ], [ 1858, 1861 ], [ 1902, 1905 ], [ 1944, 1947 ], [ 1994, 1997 ], [ 2040, 2043 ], [ 2086, 2089 ], [ 2132, 2135 ], [ 2178, 2181 ], [ 2226, 2229 ], [ 2291, 2294 ], [ 2355, 2358 ], [ 2469, 2472 ], [ 2556, 2559 ], [ 2636, 2639 ], [ 2692, 2695 ], [ 2779, 2782 ], [ 2881, 2884 ], [ 2996, 2999 ], [ 3210, 3213 ], [ 3319, 3322 ], [ 3413, 3416 ], [ 3628, 3631 ], [ 3725, 3728 ], [ 3843, 3846 ], [ 3952, 3955 ], [ 4049, 4052 ], [ 4152, 4155 ], [ 4261, 4264 ], [ 4370, 4373 ], [ 4473, 4476 ], [ 4582, 4585 ], [ 4685, 4688 ], [ 4818, 4821 ], [ 4924, 4927 ], [ 5027, 5030 ], [ 5127, 5130 ], [ 5233, 5236 ], [ 5336, 5339 ], [ 5442, 5445 ], [ 5548, 5551 ], [ 5778, 5781 ], [ 5838, 5841 ], [ 5899, 5902 ], [ 5979, 5982 ], [ 6015, 6018 ], [ 6056, 6059 ], [ 6148, 6151 ], [ 6224, 6227 ], [ 6264, 6267 ], [ 6330, 6333 ], [ 6389, 6392 ], [ 6434, 6437 ], [ 6477, 6480 ], [ 6523, 6526 ], [ 6566, 6569 ], [ 6609, 6612 ], [ 6658, 6661 ], [ 6725, 6728 ], [ 6802, 6805 ], [ 6858, 6861 ], [ 6917, 6920 ], [ 7089, 7092 ], [ 7150, 7153 ], [ 7213, 7216 ], [ 7299, 7302 ], [ 7422, 7425 ], [ 7603, 7606 ], [ 7682, 7685 ], [ 7792, 7795 ], [ 7955, 7958 ], [ 8025, 8028 ], [ 8123, 8126 ], [ 8163, 8166 ], [ 8771, 8774 ] ], [ [ 147, 154 ], [ 1776, 1783 ], [ 1827, 1834 ], [ 1873, 1880 ], [ 1916, 1923 ], [ 1962, 1969 ], [ 2010, 2017 ], [ 2056, 2063 ], [ 2102, 2109 ], [ 2148, 2155 ], [ 2195, 2202 ], [ 2243, 2250 ], [ 2306, 2313 ], [ 2652, 2659 ], [ 5799, 5806 ], [ 5856, 5863 ], [ 5914, 5921 ], [ 6029, 6036 ], [ 6168, 6175 ], [ 6239, 6246 ], [ 6292, 6299 ], [ 6354, 6361 ], [ 6406, 6413 ], [ 6450, 6457 ], [ 6495, 6502 ], [ 6539, 6546 ], [ 6582, 6589 ], [ 6628, 6635 ], [ 7115, 7122 ], [ 7172, 7179 ], [ 7974, 7981 ], [ 8044, 8051 ], [ 8131, 8138 ], [ 8171, 8178 ], [ 8658, 8665 ] ], [ [ 179, 183 ], [ 8641, 8645 ] ], [ [ 211, 216 ], [ 1694, 1699 ], [ 7229, 7234 ] ], [ [ 243, 255 ], [ 6503, 6515 ] ], [ [ 281, 303 ], [ 6300, 6322 ] ], [ [ 329, 347 ], [ 6362, 6380 ] ], [ [ 373, 384 ], [ 6414, 6425 ] ], [ [ 407, 416 ], [ 6247, 6256 ] ], [ [ 440, 450 ], [ 6458, 6468 ] ], [ [ 474, 484 ], [ 6590, 6600 ] ], [ [ 508, 518 ], [ 6547, 6557 ] ], [ [ 543, 558 ], [ 7000, 7015 ] ], [ [ 585, 601 ], [ 1784, 1800 ] ], [ [ 625, 640 ], [ 1835, 1850 ] ], [ [ 662, 675 ], [ 1881, 1894 ] ], [ [ 677, 689 ], [ 1924, 1936 ] ], [ [ 714, 730 ], [ 1970, 1986 ] ], [ [ 753, 767 ], [ 2018, 2032 ] ], [ [ 790, 804 ], [ 2064, 2078 ] ], [ [ 827, 841 ], [ 2110, 2124 ] ], [ [ 864, 878 ], [ 2156, 2170 ] ], [ [ 902, 917 ], [ 2203, 2218 ] ], [ [ 941, 956 ], [ 2251, 2266 ] ], [ [ 982, 990 ], [ 8238, 8246 ], [ 8305, 8313 ], [ 8340, 8348 ], [ 8413, 8421 ], [ 8447, 8455 ], [ 8551, 8559 ] ], [ [ 1027, 1033 ], [ 8298, 8304 ], [ 8406, 8412 ] ], [ [ 1073, 1085 ], [ 8784, 8796 ] ], [ [ 1127, 1144 ], [ 6074, 6091 ] ], [ [ 1165, 1177 ], [ 5995, 6007 ] ], [ [ 1197, 1206 ], [ 6818, 6827 ] ], [ [ 1208, 1218 ], [ 6875, 6885 ] ], [ [ 1220, 1237 ], [ 6933, 6950 ] ], [ [ 1257, 1269 ], [ 3009, 3021 ] ], [ [ 1271, 1283 ], [ 6678, 6690 ] ], [ [ 1285, 1300 ], [ 6748, 6763 ] ], [ [ 1302, 1317 ], [ 7628, 7643 ] ], [ [ 1319, 1340 ], [ 7720, 7741 ] ], [ [ 1342, 1372 ], [ 7868, 7898 ] ], [ [ 1392, 1415 ], [ 7475, 7498 ] ], [ [ 1417, 1445 ], [ 7346, 7374 ] ], [ [ 1465, 1484 ], [ 2808, 2827 ] ], [ [ 1504, 1524 ], [ 2714, 2734 ] ], [ [ 1526, 1551 ], [ 2900, 2925 ] ], [ [ 1571, 1584 ], [ 3231, 3244 ], [ 3335, 3348 ], [ 3434, 3447 ], [ 3644, 3657 ], [ 3748, 3761 ], [ 3863, 3876 ], [ 3968, 3981 ], [ 4067, 4080 ], [ 4172, 4185 ], [ 4281, 4294 ], [ 4388, 4401 ], [ 4493, 4506 ], [ 4600, 4613 ], [ 4713, 4726 ], [ 4837, 4850 ], [ 4942, 4955 ], [ 5044, 5057 ], [ 5146, 5159 ], [ 5251, 5264 ], [ 5355, 5368 ], [ 5461, 5474 ], [ 5574, 5587 ] ], [ [ 1603, 1611 ], [ 2480, 2488 ] ], [ [ 1613, 1625 ], [ 2569, 2581 ] ], [ [ 1643, 1659 ], [ 2372, 2388 ] ], [ [ 1683, 1692 ], [ 2314, 2323 ] ], [ [ 1738, 1749 ], [ 6037, 6048 ] ], [ [ 2613, 2626 ], [ 6636, 6649 ] ], [ [ 3178, 3201 ], [ 5807, 5830 ] ], [ [ 3593, 3619 ], [ 5864, 5890 ] ], [ [ 5674, 5689 ], [ 8179, 8194 ] ], [ [ 6199, 6215 ], [ 8139, 8155 ] ], [ [ 8103, 8114 ], [ 8283, 8294 ], [ 8755, 8766 ] ], [ [ 8591, 8604 ], [ 8666, 8679 ] ], [ [ 8613, 8624 ], [ 8755, 8766 ] ] ]
import os import logging import pickle import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import al from al.dataset import mnist from al.model.model_zoo.simple_cnn import ConvModel from al.model.mnist import MnistLearner from al.dataset.mnist import MnistDataset from al.train.active_train import ActiveTrain from al.helpers.experiment import set_up_experiment, load_config from al.experiments import set_up_learner DATASET = 'mnist' FOLDER_PATH = os.path.dirname(__file__) OUTPUT_DIR, FIGURE_DIR, logger, logger_name = set_up_experiment( __file__, FOLDER_PATH, logging_lvl=20) logger.info('-------------------------') logger.info('--LAUNCHING EXPERIMENTS--') logger.info('-------------------------') config = load_config(FOLDER_PATH, DATASET) setupper = set_up_learner(DATASET) config['active_learning']['output_dir'] = OUTPUT_DIR config['experiment']['logger_name'] = logger_name model_name = 'simple_cnn' strategies = ['random_sampling', 'margin_sampling'] repeats = 1 score_data = {} config['active_learning']['assets_per_query'] = 20 config['active_learning']['n_iter'] = 5 config['active_learning']['init_size'] = 100 config['train_parameters']['batch_size'] = 16 config['train_parameters']['iterations'] = 100 config['experiment']['n_classes'] = 2 raw_dataset, _ = setupper(config, OUTPUT_DIR, logger, index_train=np.arange(60000)) full_train_dataset = raw_dataset.dataset first_class = 1 second_class = 2 first_classes = [] second_classes = [] p = 0.1 for i in range(len(full_train_dataset)): if full_train_dataset[i][1].numpy() == first_class: first_classes.append(i) elif full_train_dataset[i][1].numpy() == second_class and np.random.rand() < p: second_classes.append(i) train_indices = np.array(first_classes + second_classes) np.random.permutation(train_indices) for i in range(repeats): logger.info('---------------------------') logger.info(f'--------ROUND OF TRAININGS NUMBER #{i+1}--------') logger.info('---------------------------') for strategy in strategies: dataset, learner = setupper( config, OUTPUT_DIR, logger, index_train=train_indices) logger.info('---------------------------') logger.info(f'----STRATEGY : {strategy}----') logger.info('---------------------------') trainer = ActiveTrain(learner, dataset, strategy, logger_name) scores = trainer.train( config['train_parameters'], **config['active_learning']) score_data[(strategy, i)] = scores logger.info(f'----DONE----\n') logger.info('---------------------------') logger.info(f'--------DONE--------') logger.info('---------------------------\n\n\n') # data = [] # for (strategy, experiment_number), scores_experiment in score_data.items(): # for step_result in scores_experiment: # val_step_result = step_result['val'] # step = step_result['step'] # data.append( # {'strategy': strategy, # 'experiment': experiment_number, # 'step': step, # **val_step_result}) # df = pd.DataFrame(data) # plot_dir = os.path.join(os.path.dirname(__file__), 'figures') # plt.figure(num=0, figsize=(12, 5)) # sns.lineplot(x='step', y='accuracy', hue='strategy', data=df) # plt.ylabel('Accuracy') # plt.show() # plt.savefig(os.path.join(plot_dir, 'accuracy_imbalance.png'))
[ [ [ 7, 9 ], [ 495, 497 ] ], [ [ 17, 24 ] ], [ [ 32, 38 ] ], [ [ 47, 58 ], [ 1405, 1407 ], [ 1737, 1739 ], [ 1809, 1811 ], [ 1850, 1852 ] ], [ [ 66, 78 ] ], [ [ 86, 110 ] ], [ [ 118, 132 ] ], [ [ 141, 143 ] ], [ [ 167, 172 ] ], [ [ 215, 224 ] ], [ [ 252, 264 ] ], [ [ 294, 306 ] ], [ [ 341, 352 ], [ 2386, 2397 ] ], [ [ 387, 404 ], [ 567, 584 ] ], [ [ 406, 417 ], [ 763, 774 ] ], [ [ 445, 459 ], [ 808, 822 ] ], [ [ 462, 469 ], [ 788, 795 ], [ 823, 830 ] ], [ [ 481, 492 ], [ 600, 611 ], [ 775, 786 ] ], [ [ 521, 531 ], [ 875, 885 ], [ 1347, 1357 ], [ 2165, 2175 ] ], [ [ 533, 543 ] ], [ [ 545, 551 ], [ 630, 636 ], [ 671, 677 ], [ 712, 718 ], [ 1359, 1365 ], [ 1917, 1923 ], [ 1964, 1970 ], [ 2033, 2039 ], [ 2177, 2183 ], [ 2220, 2226 ], [ 2271, 2277 ], [ 2325, 2331 ], [ 2591, 2597 ], [ 2626, 2632 ], [ 2673, 2679 ], [ 2714, 2720 ] ], [ [ 553, 564 ], [ 924, 935 ], [ 2426, 2437 ] ], [ [ 754, 760 ], [ 833, 839 ], [ 886, 892 ], [ 1043, 1049 ], [ 1094, 1100 ], [ 1134, 1140 ], [ 1180, 1186 ], [ 1226, 1232 ], [ 1274, 1280 ], [ 1339, 1345 ], [ 2157, 2163 ], [ 2483, 2489 ], [ 2513, 2519 ] ], [ [ 797, 805 ], [ 1330, 1338 ], [ 2135, 2143 ] ], [ [ 936, 946 ] ], [ [ 963, 973 ], [ 2096, 2106 ] ], [ [ 1015, 1022 ], [ 1903, 1910 ] ], [ [ 1027, 1037 ], [ 2548, 2558 ] ], [ [ 1313, 1324 ], [ 1444, 1455 ] ], [ [ 1326, 1327 ] ], [ [ 1423, 1441 ], [ 1565, 1583 ], [ 1594, 1612 ], [ 1684, 1702 ] ], [ [ 1465, 1476 ], [ 1630, 1641 ] ], [ [ 1481, 1493 ], [ 1720, 1732 ] ], [ [ 1498, 1511 ], [ 1651, 1664 ], [ 1818, 1831 ] ], [ [ 1517, 1531 ], [ 1767, 1781 ], [ 1834, 1848 ] ], [ [ 1537, 1538 ], [ 1756, 1757 ] ], [ [ 1550, 1551 ], [ 1613, 1614 ], [ 1672, 1673 ], [ 1703, 1704 ], [ 1789, 1790 ] ], [ [ 1793, 1806 ], [ 1872, 1885 ], [ 2197, 2210 ] ], [ [ 1892, 1893 ], [ 2014, 2015 ], [ 2570, 2571 ] ], [ [ 2084, 2092 ], [ 2301, 2309 ], [ 2416, 2424 ], [ 2560, 2568 ] ], [ [ 2116, 2123 ], [ 2407, 2414 ] ], [ [ 2125, 2132 ], [ 2398, 2405 ] ], [ [ 2376, 2383 ], [ 2456, 2463 ] ], [ [ 2447, 2453 ], [ 2576, 2582 ] ] ]
from __future__ import annotations import asyncio import bisect import builtins import concurrent.futures import errno import heapq import logging import os import random import sys import threading import warnings import weakref from collections import defaultdict, deque, namedtuple from collections.abc import Hashable, Iterable, MutableMapping from contextlib import suppress from datetime import timedelta from inspect import isawaitable from pickle import PicklingError from typing import TYPE_CHECKING if TYPE_CHECKING: from .client import Client from tlz import first, keymap, merge, pluck # noqa: F401 from tornado.ioloop import IOLoop, PeriodicCallback import dask from dask.core import istask from dask.system import CPU_COUNT from dask.utils import ( apply, format_bytes, funcname, parse_bytes, parse_timedelta, stringify, typename, ) from . import comm, preloading, profile, system, utils from .batched import BatchedSend from .comm import connect, get_address_host from .comm.addressing import address_from_user_args, parse_address from .comm.utils import OFFLOAD_THRESHOLD from .core import ( CommClosedError, Status, coerce_to_address, error_message, pingpong, send_recv, ) from .diagnostics import nvml from .diagnostics.plugin import _get_plugin_name from .diskutils import WorkSpace from .http import get_handlers from .metrics import time from .node import ServerNode from .proctitle import setproctitle from .protocol import pickle, to_serialize from .pubsub import PubSubWorkerExtension from .security import Security from .sizeof import safe_sizeof as sizeof from .threadpoolexecutor import ThreadPoolExecutor from .threadpoolexecutor import secede as tpe_secede from .utils import ( LRU, TimeoutError, _maybe_complex, get_ip, has_arg, import_file, iscoroutinefunction, json_load_robust, key_split, log_errors, offload, parse_ports, silence_logging, thread_state, warn_on_duration, ) from .utils_comm import gather_from_workers, pack_data, retry_operation from .utils_perf import ThrottledGC, disable_gc_diagnosis, enable_gc_diagnosis from .versions import get_versions logger = logging.getLogger(__name__) LOG_PDB = dask.config.get("distributed.admin.pdb-on-err") no_value = "--no-value-sentinel--" IN_PLAY = ("waiting", "ready", "executing", "long-running") PENDING = ("waiting", "ready", "constrained") PROCESSING = ("waiting", "ready", "constrained", "executing", "long-running") READY = ("ready", "constrained") DEFAULT_EXTENSIONS = [PubSubWorkerExtension] DEFAULT_METRICS = {} DEFAULT_STARTUP_INFORMATION = {} DEFAULT_DATA_SIZE = parse_bytes( dask.config.get("distributed.scheduler.default-data-size") ) SerializedTask = namedtuple("SerializedTask", ["function", "args", "kwargs", "task"]) class TaskState: """Holds volatile state relating to an individual Dask task * **dependencies**: ``set(TaskState instances)`` The data needed by this key to run * **dependents**: ``set(TaskState instances)`` The keys that use this dependency. * **duration**: ``float`` Expected duration the a task * **priority**: ``tuple`` The priority this task given by the scheduler. Determines run order. * **state**: ``str`` The current state of the task. One of ["waiting", "ready", "executing", "fetch", "memory", "flight", "long-running", "rescheduled", "error"] * **who_has**: ``set(worker)`` Workers that we believe have this data * **coming_from**: ``str`` The worker that current task data is coming from if task is in flight * **waiting_for_data**: ``set(keys of dependencies)`` A dynamic version of dependencies. All dependencies that we still don't have for a particular key. * **resource_restrictions**: ``{str: number}`` Abstract resources required to run a task * **exception**: ``str`` The exception caused by running a task if it erred * **traceback**: ``str`` The exception caused by running a task if it erred * **type**: ``type`` The type of a particular piece of data * **suspicious_count**: ``int`` The number of times a dependency has not been where we expected it * **startstops**: ``[{startstop}]`` Log of transfer, load, and compute times for a task * **start_time**: ``float`` Time at which task begins running * **stop_time**: ``float`` Time at which task finishes running * **metadata**: ``dict`` Metadata related to task. Stored metadata should be msgpack serializable (e.g. int, string, list, dict). * **nbytes**: ``int`` The size of a particular piece of data * **annotations**: ``dict`` Task annotations Parameters ---------- key: str runspec: SerializedTask A named tuple containing the ``function``, ``args``, ``kwargs`` and ``task`` associated with this `TaskState` instance. This defaults to ``None`` and can remain empty if it is a dependency that this worker will receive from another worker. """ def __init__(self, key, runspec=None): assert key is not None self.key = key self.runspec = runspec self.dependencies = set() self.dependents = set() self.duration = None self.priority = None self.state = "new" self.who_has = set() self.coming_from = None self.waiting_for_data = set() self.resource_restrictions = None self.exception = None self.exception_text = "" self.traceback = None self.traceback_text = "" self.type = None self.suspicious_count = 0 self.startstops = list() self.start_time = None self.stop_time = None self.metadata = {} self.nbytes = None self.annotations = None self.scheduler_holds_ref = False def __repr__(self): return f"<Task {self.key!r} {self.state}>" def get_nbytes(self) -> int: nbytes = self.nbytes return nbytes if nbytes is not None else DEFAULT_DATA_SIZE class Worker(ServerNode): """Worker node in a Dask distributed cluster Workers perform two functions: 1. **Serve data** from a local dictionary 2. **Perform computation** on that data and on data from peers Workers keep the scheduler informed of their data and use that scheduler to gather data from other workers when necessary to perform a computation. You can start a worker with the ``dask-worker`` command line application:: $ dask-worker scheduler-ip:port Use the ``--help`` flag to see more options:: $ dask-worker --help The rest of this docstring is about the internal state the the worker uses to manage and track internal computations. **State** **Informational State** These attributes don't change significantly during execution. * **nthreads:** ``int``: Number of nthreads used by this worker process * **executors:** ``Dict[str, concurrent.futures.Executor]``: Executors used to perform computation. Always contains the default executor. * **local_directory:** ``path``: Path on local machine to store temporary files * **scheduler:** ``rpc``: Location of scheduler. See ``.ip/.port`` attributes. * **name:** ``string``: Alias * **services:** ``{str: Server}``: Auxiliary web servers running on this worker * **service_ports:** ``{str: port}``: * **total_out_connections**: ``int`` The maximum number of concurrent outgoing requests for data * **total_in_connections**: ``int`` The maximum number of concurrent incoming requests for data * **comm_threshold_bytes**: ``int`` As long as the total number of bytes in flight is below this threshold we will not limit the number of outgoing connections for a single tasks dependency fetch. * **batched_stream**: ``BatchedSend`` A batched stream along which we communicate to the scheduler * **log**: ``[(message)]`` A structured and queryable log. See ``Worker.story`` **Volatile State** These attributes track the progress of tasks that this worker is trying to complete. In the descriptions below a ``key`` is the name of a task that we want to compute and ``dep`` is the name of a piece of dependent data that we want to collect from others. * **tasks**: ``{key: TaskState}`` The tasks currently executing on this worker (and any dependencies of those tasks) * **data:** ``{key: object}``: Prefer using the **host** attribute instead of this, unless memory_limit and at least one of memory_target_fraction or memory_spill_fraction values are defined, in that case, this attribute is a zict.Buffer, from which information on LRU cache can be queried. * **data.memory:** ``{key: object}``: Dictionary mapping keys to actual values stored in memory. Only available if condition for **data** being a zict.Buffer is met. * **data.disk:** ``{key: object}``: Dictionary mapping keys to actual values stored on disk. Only available if condition for **data** being a zict.Buffer is met. * **data_needed**: deque(keys) The keys which still require data in order to execute, arranged in a deque * **ready**: [keys] Keys that are ready to run. Stored in a LIFO stack * **constrained**: [keys] Keys for which we have the data to run, but are waiting on abstract resources like GPUs. Stored in a FIFO deque * **executing_count**: ``int`` A count of tasks currently executing on this worker * **executed_count**: int A number of tasks that this worker has run in its lifetime * **long_running**: {keys} A set of keys of tasks that are running and have started their own long-running clients. * **has_what**: ``{worker: {deps}}`` The data that we care about that we think a worker has * **pending_data_per_worker**: ``{worker: [dep]}`` The data on each worker that we still want, prioritized as a deque * **in_flight_tasks**: ``int`` A count of the number of tasks that are coming to us in current peer-to-peer connections * **in_flight_workers**: ``{worker: {task}}`` The workers from which we are currently gathering data and the dependencies we expect from those connections * **comm_bytes**: ``int`` The total number of bytes in flight * **threads**: ``{key: int}`` The ID of the thread on which the task ran * **active_threads**: ``{int: key}`` The keys currently running on active threads * **waiting_for_data_count**: ``int`` A count of how many tasks are currently waiting for data Parameters ---------- scheduler_ip: str scheduler_port: int ip: str, optional data: MutableMapping, type, None The object to use for storage, builds a disk-backed LRU dict by default nthreads: int, optional loop: tornado.ioloop.IOLoop local_directory: str, optional Directory where we place local resources name: str, optional memory_limit: int, float, string Number of bytes of memory that this worker should use. Set to zero for no limit. Set to 'auto' to calculate as system.MEMORY_LIMIT * min(1, nthreads / total_cores) Use strings or numbers like 5GB or 5e9 memory_target_fraction: float Fraction of memory to try to stay beneath memory_spill_fraction: float Fraction of memory at which we start spilling to disk memory_pause_fraction: float Fraction of memory at which we stop running new tasks executor: concurrent.futures.Executor, dict[str, concurrent.futures.Executor], str The executor(s) to use. Depending on the type, it has the following meanings: - Executor instance: The default executor. - Dict[str, Executor]: mapping names to Executor instances. If the "default" key isn't in the dict, a "default" executor will be created using ``ThreadPoolExecutor(nthreads)``. - Str: The string "offload", which refer to the same thread pool used for offloading communications. This results in the same thread being used for deserialization and computation. resources: dict Resources that this worker has like ``{'GPU': 2}`` nanny: str Address on which to contact nanny, if it exists lifetime: str Amount of time like "1 hour" after which we gracefully shut down the worker. This defaults to None, meaning no explicit shutdown time. lifetime_stagger: str Amount of time like "5 minutes" to stagger the lifetime value The actual lifetime will be selected uniformly at random between lifetime +/- lifetime_stagger lifetime_restart: bool Whether or not to restart a worker after it has reached its lifetime Default False Examples -------- Use the command line to start a worker:: $ dask-scheduler Start scheduler at 127.0.0.1:8786 $ dask-worker 127.0.0.1:8786 Start worker at: 127.0.0.1:1234 Registered with scheduler at: 127.0.0.1:8786 See Also -------- distributed.scheduler.Scheduler distributed.nanny.Nanny """ _instances = weakref.WeakSet() _initialized_clients = weakref.WeakSet() def __init__( self, scheduler_ip=None, scheduler_port=None, scheduler_file=None, ncores=None, nthreads=None, loop=None, local_dir=None, local_directory=None, services=None, service_ports=None, service_kwargs=None, name=None, reconnect=True, memory_limit="auto", executor=None, resources=None, silence_logs=None, death_timeout=None, preload=None, preload_argv=None, security=None, contact_address=None, memory_monitor_interval="200ms", extensions=None, metrics=DEFAULT_METRICS, startup_information=DEFAULT_STARTUP_INFORMATION, data=None, interface=None, host=None, port=None, protocol=None, dashboard_address=None, dashboard=False, http_prefix="/", nanny=None, plugins=(), low_level_profiler=dask.config.get("distributed.worker.profile.low-level"), validate=None, profile_cycle_interval=None, lifetime=None, lifetime_stagger=None, lifetime_restart=None, **kwargs, ): self.tasks = dict() self.waiting_for_data_count = 0 self.has_what = defaultdict(set) self.pending_data_per_worker = defaultdict(deque) self.nanny = nanny self._lock = threading.Lock() self.data_needed = deque() # TODO: replace with heap? self.in_flight_tasks = 0 self.in_flight_workers = dict() self.total_out_connections = dask.config.get( "distributed.worker.connections.outgoing" ) self.total_in_connections = dask.config.get( "distributed.worker.connections.incoming" ) self.comm_threshold_bytes = 10e6 self.comm_nbytes = 0 self._missing_dep_flight = set() self.threads = dict() self.active_threads_lock = threading.Lock() self.active_threads = dict() self.active_keys = set() self.profile_keys = defaultdict(profile.create) self.profile_keys_history = deque(maxlen=3600) self.profile_recent = profile.create() self.profile_history = deque(maxlen=3600) self.generation = 0 self.ready = list() self.constrained = deque() self.executing_count = 0 self.executed_count = 0 self.long_running = set() self.recent_messages_log = deque( maxlen=dask.config.get("distributed.comm.recent-messages-log-length") ) self.target_message_size = 50e6 # 50 MB self.log = deque(maxlen=100000) if validate is None: validate = dask.config.get("distributed.scheduler.validate") self.validate = validate self._transitions = { # Basic state transitions ("new", "waiting"): self.transition_new_waiting, ("new", "fetch"): self.transition_new_fetch, ("waiting", "ready"): self.transition_waiting_ready, ("fetch", "flight"): self.transition_fetch_flight, ("ready", "executing"): self.transition_ready_executing, ("executing", "memory"): self.transition_executing_done, ("flight", "memory"): self.transition_flight_memory, ("flight", "fetch"): self.transition_flight_fetch, # Shouldn't be a valid transition but happens nonetheless ("ready", "memory"): self.transition_ready_memory, # Scheduler intercession (re-assignment) ("fetch", "waiting"): self.transition_fetch_waiting, ("flight", "waiting"): self.transition_flight_waiting, # Errors, long-running, constrained ("waiting", "error"): self.transition_waiting_done, ("constrained", "executing"): self.transition_constrained_executing, ("executing", "error"): self.transition_executing_done, ("executing", "rescheduled"): self.transition_executing_done, ("executing", "long-running"): self.transition_executing_long_running, ("long-running", "error"): self.transition_executing_done, ("long-running", "memory"): self.transition_executing_done, ("long-running", "rescheduled"): self.transition_executing_done, } self.incoming_transfer_log = deque(maxlen=100000) self.incoming_count = 0 self.outgoing_transfer_log = deque(maxlen=100000) self.outgoing_count = 0 self.outgoing_current_count = 0 self.repetitively_busy = 0 self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth")) self.bandwidth_workers = defaultdict( lambda: (0, 0) ) # bw/count recent transfers self.bandwidth_types = defaultdict(lambda: (0, 0)) # bw/count recent transfers self.latency = 0.001 self._client = None if profile_cycle_interval is None: profile_cycle_interval = dask.config.get("distributed.worker.profile.cycle") profile_cycle_interval = parse_timedelta(profile_cycle_interval, default="ms") self._setup_logging(logger) if local_dir is not None: warnings.warn("The local_dir keyword has moved to local_directory") local_directory = local_dir if not local_directory: local_directory = dask.config.get("temporary-directory") or os.getcwd() os.makedirs(local_directory, exist_ok=True) local_directory = os.path.join(local_directory, "dask-worker-space") with warn_on_duration( "1s", "Creating scratch directories is taking a surprisingly long time. " "This is often due to running workers on a network file system. " "Consider specifying a local-directory to point workers to write " "scratch data to a local disk.", ): self._workspace = WorkSpace(os.path.abspath(local_directory)) self._workdir = self._workspace.new_work_dir(prefix="worker-") self.local_directory = self._workdir.dir_path if preload is None: preload = dask.config.get("distributed.worker.preload") if preload_argv is None: preload_argv = dask.config.get("distributed.worker.preload-argv") self.preloads = preloading.process_preloads( self, preload, preload_argv, file_dir=self.local_directory ) if scheduler_file: cfg = json_load_robust(scheduler_file) scheduler_addr = cfg["address"] elif scheduler_ip is None and dask.config.get("scheduler-address", None): scheduler_addr = dask.config.get("scheduler-address") elif scheduler_port is None: scheduler_addr = coerce_to_address(scheduler_ip) else: scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port)) self.contact_address = contact_address if protocol is None: protocol_address = scheduler_addr.split("://") if len(protocol_address) == 2: protocol = protocol_address[0] self._start_port = port self._start_host = host if host: # Helpful error message if IPv6 specified incorrectly _, host_address = parse_address(host) if host_address.count(":") > 1 and not host_address.startswith("["): raise ValueError( "Host address with IPv6 must be bracketed like '[::1]'; " f"got {host_address}" ) self._interface = interface self._protocol = protocol if ncores is not None: warnings.warn("the ncores= parameter has moved to nthreads=") nthreads = ncores self.nthreads = nthreads or CPU_COUNT if resources is None: resources = dask.config.get("distributed.worker.resources", None) self.total_resources = resources or {} self.available_resources = (resources or {}).copy() self.death_timeout = parse_timedelta(death_timeout) self.extensions = dict() if silence_logs: silence_logging(level=silence_logs) if isinstance(security, dict): security = Security(**security) self.security = security or Security() assert isinstance(self.security, Security) self.connection_args = self.security.get_connection_args("worker") self.memory_limit = parse_memory_limit(memory_limit, self.nthreads) self.paused = False if "memory_target_fraction" in kwargs: self.memory_target_fraction = kwargs.pop("memory_target_fraction") else: self.memory_target_fraction = dask.config.get( "distributed.worker.memory.target" ) if "memory_spill_fraction" in kwargs: self.memory_spill_fraction = kwargs.pop("memory_spill_fraction") else: self.memory_spill_fraction = dask.config.get( "distributed.worker.memory.spill" ) if "memory_pause_fraction" in kwargs: self.memory_pause_fraction = kwargs.pop("memory_pause_fraction") else: self.memory_pause_fraction = dask.config.get( "distributed.worker.memory.pause" ) if isinstance(data, MutableMapping): self.data = data elif callable(data): self.data = data() elif isinstance(data, tuple): self.data = data[0](**data[1]) elif self.memory_limit and ( self.memory_target_fraction or self.memory_spill_fraction ): from .spill import SpillBuffer self.data = SpillBuffer( os.path.join(self.local_directory, "storage"), target=int( self.memory_limit * (self.memory_target_fraction or self.memory_spill_fraction) ) or sys.maxsize, ) else: self.data = dict() self.actors = {} self.loop = loop or IOLoop.current() self.reconnect = reconnect # Common executors always available self.executors: dict[str, concurrent.futures.Executor] = { "offload": utils._offload_executor, "actor": ThreadPoolExecutor(1, thread_name_prefix="Dask-Actor-Threads"), } if nvml.device_get_count() > 0: self.executors["gpu"] = ThreadPoolExecutor( 1, thread_name_prefix="Dask-GPU-Threads" ) # Find the default executor if executor == "offload": self.executors["default"] = self.executors["offload"] elif isinstance(executor, dict): self.executors.update(executor) elif executor is not None: self.executors["default"] = executor if "default" not in self.executors: self.executors["default"] = ThreadPoolExecutor( self.nthreads, thread_name_prefix="Dask-Default-Threads" ) self.batched_stream = BatchedSend(interval="2ms", loop=self.loop) self.name = name self.scheduler_delay = 0 self.stream_comms = dict() self.heartbeat_active = False self._ipython_kernel = None if self.local_directory not in sys.path: sys.path.insert(0, self.local_directory) self.services = {} self.service_specs = services or {} self._dashboard_address = dashboard_address self._dashboard = dashboard self._http_prefix = http_prefix self.metrics = dict(metrics) if metrics else {} self.startup_information = ( dict(startup_information) if startup_information else {} ) self.low_level_profiler = low_level_profiler handlers = { "gather": self.gather, "run": self.run, "run_coroutine": self.run_coroutine, "get_data": self.get_data, "update_data": self.update_data, "free_keys": self.handle_free_keys, "terminate": self.close, "ping": pingpong, "upload_file": self.upload_file, "start_ipython": self.start_ipython, "call_stack": self.get_call_stack, "profile": self.get_profile, "profile_metadata": self.get_profile_metadata, "get_logs": self.get_logs, "keys": self.keys, "versions": self.versions, "actor_execute": self.actor_execute, "actor_attribute": self.actor_attribute, "plugin-add": self.plugin_add, "plugin-remove": self.plugin_remove, "get_monitor_info": self.get_monitor_info, } stream_handlers = { "close": self.close, "compute-task": self.add_task, "cancel-compute": self.cancel_compute, "free-keys": self.handle_free_keys, "superfluous-data": self.handle_superfluous_data, "steal-request": self.steal_request, } super().__init__( handlers=handlers, stream_handlers=stream_handlers, io_loop=self.loop, connection_args=self.connection_args, **kwargs, ) self.scheduler = self.rpc(scheduler_addr) self.execution_state = { "scheduler": self.scheduler.address, "ioloop": self.loop, "worker": self, } pc = PeriodicCallback(self.heartbeat, 1000) self.periodic_callbacks["heartbeat"] = pc pc = PeriodicCallback( lambda: self.batched_stream.send({"op": "keep-alive"}), 60000 ) self.periodic_callbacks["keep-alive"] = pc self._suspicious_count_limit = 10 self._address = contact_address self.memory_monitor_interval = parse_timedelta( memory_monitor_interval, default="ms" ) if self.memory_limit: self._memory_monitoring = False pc = PeriodicCallback( self.memory_monitor, self.memory_monitor_interval * 1000 ) self.periodic_callbacks["memory"] = pc if extensions is None: extensions = DEFAULT_EXTENSIONS for ext in extensions: ext(self) self._throttled_gc = ThrottledGC(logger=logger) setproctitle("dask-worker [not started]") profile_trigger_interval = parse_timedelta( dask.config.get("distributed.worker.profile.interval"), default="ms" ) pc = PeriodicCallback(self.trigger_profile, profile_trigger_interval * 1000) self.periodic_callbacks["profile"] = pc pc = PeriodicCallback(self.cycle_profile, profile_cycle_interval * 1000) self.periodic_callbacks["profile-cycle"] = pc self.plugins = {} self._pending_plugins = plugins self.lifetime = lifetime or dask.config.get( "distributed.worker.lifetime.duration" ) lifetime_stagger = lifetime_stagger or dask.config.get( "distributed.worker.lifetime.stagger" ) self.lifetime_restart = lifetime_restart or dask.config.get( "distributed.worker.lifetime.restart" ) if isinstance(self.lifetime, str): self.lifetime = parse_timedelta(self.lifetime) if isinstance(lifetime_stagger, str): lifetime_stagger = parse_timedelta(lifetime_stagger) if self.lifetime: self.lifetime += (random.random() * 2 - 1) * lifetime_stagger self.io_loop.call_later(self.lifetime, self.close_gracefully) Worker._instances.add(self) ################## # Administrative # ################## def __repr__(self): return "<%s: %r, %s, %s, stored: %d, running: %d/%d, ready: %d, comm: %d, waiting: %d>" % ( self.__class__.__name__, self.address, self.name, self.status, len(self.data), self.executing_count, self.nthreads, len(self.ready), self.in_flight_tasks, self.waiting_for_data_count, ) @property def logs(self): return self._deque_handler.deque def log_event(self, topic, msg): self.batched_stream.send( { "op": "log-event", "topic": topic, "msg": msg, } ) @property def worker_address(self): """For API compatibility with Nanny""" return self.address @property def local_dir(self): """For API compatibility with Nanny""" warnings.warn( "The local_dir attribute has moved to local_directory", stacklevel=2 ) return self.local_directory @property def executor(self): return self.executors["default"] async def get_metrics(self): out = dict( executing=self.executing_count, in_memory=len(self.data), ready=len(self.ready), in_flight=self.in_flight_tasks, bandwidth={ "total": self.bandwidth, "workers": dict(self.bandwidth_workers), "types": keymap(typename, self.bandwidth_types), }, spilled_nbytes=getattr(self.data, "spilled_total", 0), ) out.update(self.monitor.recent()) for k, metric in self.metrics.items(): try: result = metric(self) if isawaitable(result): result = await result # In case of collision, prefer core metrics out.setdefault(k, result) except Exception: # TODO: log error once pass return out async def get_startup_information(self): result = {} for k, f in self.startup_information.items(): try: v = f(self) if isawaitable(v): v = await v result[k] = v except Exception: # TODO: log error once pass return result def identity(self, comm=None): return { "type": type(self).__name__, "id": self.id, "scheduler": self.scheduler.address, "nthreads": self.nthreads, "ncores": self.nthreads, # backwards compatibility "memory_limit": self.memory_limit, } ##################### # External Services # ##################### async def _register_with_scheduler(self): self.periodic_callbacks["keep-alive"].stop() self.periodic_callbacks["heartbeat"].stop() start = time() if self.contact_address is None: self.contact_address = self.address logger.info("-" * 49) while True: try: _start = time() comm = await connect(self.scheduler.address, **self.connection_args) comm.name = "Worker->Scheduler" comm._server = weakref.ref(self) await comm.write( dict( op="register-worker", reply=False, address=self.contact_address, keys=list(self.data), nthreads=self.nthreads, name=self.name, nbytes={ ts.key: ts.get_nbytes() for ts in self.tasks.values() # Only if the task is in memory this is a sensible # result since otherwise it simply submits the # default value if ts.state == "memory" }, types={k: typename(v) for k, v in self.data.items()}, now=time(), resources=self.total_resources, memory_limit=self.memory_limit, local_directory=self.local_directory, services=self.service_ports, nanny=self.nanny, pid=os.getpid(), versions=get_versions(), metrics=await self.get_metrics(), extra=await self.get_startup_information(), ), serializers=["msgpack"], ) future = comm.read(deserializers=["msgpack"]) response = await future if response.get("warning"): logger.warning(response["warning"]) _end = time() middle = (_start + _end) / 2 self._update_latency(_end - start) self.scheduler_delay = response["time"] - middle self.status = Status.running break except OSError: logger.info("Waiting to connect to: %26s", self.scheduler.address) await asyncio.sleep(0.1) except TimeoutError: logger.info("Timed out when connecting to scheduler") if response["status"] != "OK": raise ValueError(f"Unexpected response from register: {response!r}") else: await asyncio.gather( *( self.plugin_add(name=name, plugin=plugin) for name, plugin in response["worker-plugins"].items() ) ) logger.info(" Registered to: %26s", self.scheduler.address) logger.info("-" * 49) self.batched_stream.start(comm) self.periodic_callbacks["keep-alive"].start() self.periodic_callbacks["heartbeat"].start() self.loop.add_callback(self.handle_scheduler, comm) def _update_latency(self, latency): self.latency = latency * 0.05 + self.latency * 0.95 if self.digests is not None: self.digests["latency"].add(latency) async def heartbeat(self): if self.heartbeat_active: logger.debug("Heartbeat skipped: channel busy") return self.heartbeat_active = True logger.debug("Heartbeat: %s", self.address) try: start = time() response = await retry_operation( self.scheduler.heartbeat_worker, address=self.contact_address, now=start, metrics=await self.get_metrics(), executing={ key: start - self.tasks[key].start_time for key in self.active_keys if key in self.tasks }, ) end = time() middle = (start + end) / 2 self._update_latency(end - start) if response["status"] == "missing": for i in range(10): if self.status != Status.running: break else: await asyncio.sleep(0.05) else: await self._register_with_scheduler() return self.scheduler_delay = response["time"] - middle self.periodic_callbacks["heartbeat"].callback_time = ( response["heartbeat-interval"] * 1000 ) self.bandwidth_workers.clear() self.bandwidth_types.clear() except CommClosedError: logger.warning("Heartbeat to scheduler failed", exc_info=True) if not self.reconnect: await self.close(report=False) except OSError as e: # Scheduler is gone. Respect distributed.comm.timeouts.connect if "Timed out trying to connect" in str(e): await self.close(report=False) else: raise e finally: self.heartbeat_active = False async def handle_scheduler(self, comm): try: await self.handle_stream( comm, every_cycle=[self.ensure_communicating, self.ensure_computing] ) except Exception as e: logger.exception(e) raise finally: if self.reconnect and self.status == Status.running: logger.info("Connection to scheduler broken. Reconnecting...") self.loop.add_callback(self.heartbeat) else: await self.close(report=False) def start_ipython(self, comm): """Start an IPython kernel Returns Jupyter connection info dictionary. """ from ._ipython_utils import start_ipython if self._ipython_kernel is None: self._ipython_kernel = start_ipython( ip=self.ip, ns={"worker": self}, log=logger ) return self._ipython_kernel.get_connection_info() async def upload_file(self, comm, filename=None, data=None, load=True): out_filename = os.path.join(self.local_directory, filename) def func(data): if isinstance(data, str): data = data.encode() with open(out_filename, "wb") as f: f.write(data) f.flush() return data if len(data) < 10000: data = func(data) else: data = await offload(func, data) if load: try: import_file(out_filename) cache_loads.data.clear() except Exception as e: logger.exception(e) raise e return {"status": "OK", "nbytes": len(data)} def keys(self, comm=None): return list(self.data) async def gather(self, comm=None, who_has=None): who_has = { k: [coerce_to_address(addr) for addr in v] for k, v in who_has.items() if k not in self.data } result, missing_keys, missing_workers = await gather_from_workers( who_has, rpc=self.rpc, who=self.address ) self.update_data(data=result, report=False) if missing_keys: logger.warning( "Could not find data: %s on workers: %s (who_has: %s)", missing_keys, missing_workers, who_has, ) return {"status": "partial-fail", "keys": missing_keys} else: return {"status": "OK"} def get_monitor_info(self, comm=None, recent=False, start=0): result = dict( range_query=( self.monitor.recent() if recent else self.monitor.range_query(start=start) ), count=self.monitor.count, last_time=self.monitor.last_time, ) if nvml.device_get_count() > 0: result["gpu_name"] = self.monitor.gpu_name result["gpu_memory_total"] = self.monitor.gpu_memory_total return result ############# # Lifecycle # ############# async def start(self): if self.status and self.status in ( Status.closed, Status.closing, Status.closing_gracefully, ): return assert self.status is Status.undefined, self.status await super().start() enable_gc_diagnosis() thread_state.on_event_loop_thread = True ports = parse_ports(self._start_port) for port in ports: start_address = address_from_user_args( host=self._start_host, port=port, interface=self._interface, protocol=self._protocol, security=self.security, ) kwargs = self.security.get_listen_args("worker") if self._protocol in ("tcp", "tls"): kwargs = kwargs.copy() kwargs["default_host"] = get_ip( get_address_host(self.scheduler.address) ) try: await self.listen(start_address, **kwargs) except OSError as e: if len(ports) > 1 and e.errno == errno.EADDRINUSE: continue else: raise else: self._start_address = start_address break else: raise ValueError( f"Could not start Worker on host {self._start_host}" f"with port {self._start_port}" ) # Start HTTP server associated with this Worker node routes = get_handlers( server=self, modules=dask.config.get("distributed.worker.http.routes"), prefix=self._http_prefix, ) self.start_http_server(routes, self._dashboard_address) if self._dashboard: try: import distributed.dashboard.worker except ImportError: logger.debug("To start diagnostics web server please install Bokeh") else: distributed.dashboard.worker.connect( self.http_application, self.http_server, self, prefix=self._http_prefix, ) self.ip = get_address_host(self.address) if self.name is None: self.name = self.address for preload in self.preloads: await preload.start() # Services listen on all addresses # Note Nanny is not a "real" service, just some metadata # passed in service_ports... self.start_services(self.ip) try: listening_address = "%s%s:%d" % (self.listener.prefix, self.ip, self.port) except Exception: listening_address = f"{self.listener.prefix}{self.ip}" logger.info(" Start worker at: %26s", self.address) logger.info(" Listening to: %26s", listening_address) for k, v in self.service_ports.items(): logger.info(" {:>16} at: {:>26}".format(k, self.ip + ":" + str(v))) logger.info("Waiting to connect to: %26s", self.scheduler.address) logger.info("-" * 49) logger.info(" Threads: %26d", self.nthreads) if self.memory_limit: logger.info(" Memory: %26s", format_bytes(self.memory_limit)) logger.info(" Local Directory: %26s", self.local_directory) setproctitle("dask-worker [%s]" % self.address) await asyncio.gather( *(self.plugin_add(plugin=plugin) for plugin in self._pending_plugins) ) self._pending_plugins = () await self._register_with_scheduler() self.start_periodic_callbacks() return self def _close(self, *args, **kwargs): warnings.warn("Worker._close has moved to Worker.close", stacklevel=2) return self.close(*args, **kwargs) async def close( self, report=True, timeout=30, nanny=True, executor_wait=True, safe=False ): with log_errors(): if self.status in (Status.closed, Status.closing): await self.finished() return self.reconnect = False disable_gc_diagnosis() try: logger.info("Stopping worker at %s", self.address) except ValueError: # address not available if already closed logger.info("Stopping worker") if self.status not in (Status.running, Status.closing_gracefully): logger.info("Closed worker has not yet started: %s", self.status) self.status = Status.closing for preload in self.preloads: await preload.teardown() if nanny and self.nanny: with self.rpc(self.nanny) as r: await r.close_gracefully() setproctitle("dask-worker [closing]") teardowns = [ plugin.teardown(self) for plugin in self.plugins.values() if hasattr(plugin, "teardown") ] await asyncio.gather(*(td for td in teardowns if isawaitable(td))) for pc in self.periodic_callbacks.values(): pc.stop() if self._client: # If this worker is the last one alive, clean up the worker # initialized clients if not any( w for w in Worker._instances if w != self and w.status == Status.running ): for c in Worker._initialized_clients: # Regardless of what the client was initialized with # we'll require the result as a future. This is # necessary since the heursitics of asynchronous are not # reliable and we might deadlock here c._asynchronous = True if c.asynchronous: await c.close() else: # There is still the chance that even with us # telling the client to be async, itself will decide # otherwise c.close() with suppress(EnvironmentError, TimeoutError): if report and self.contact_address is not None: await asyncio.wait_for( self.scheduler.unregister( address=self.contact_address, safe=safe ), timeout, ) await self.scheduler.close_rpc() self._workdir.release() self.stop_services() if ( self.batched_stream and self.batched_stream.comm and not self.batched_stream.comm.closed() ): self.batched_stream.send({"op": "close-stream"}) if self.batched_stream: with suppress(TimeoutError): await self.batched_stream.close(timedelta(seconds=timeout)) for executor in self.executors.values(): if executor is utils._offload_executor: continue # Never shutdown the offload executor if isinstance(executor, ThreadPoolExecutor): executor._work_queue.queue.clear() executor.shutdown(wait=executor_wait, timeout=timeout) else: executor.shutdown(wait=executor_wait) self.stop() await self.rpc.close() self.status = Status.closed await super().close() setproctitle("dask-worker [closed]") return "OK" async def close_gracefully(self, restart=None): """Gracefully shut down a worker This first informs the scheduler that we're shutting down, and asks it to move our data elsewhere. Afterwards, we close as normal """ if self.status in (Status.closing, Status.closing_gracefully): await self.finished() if self.status == Status.closed: return if restart is None: restart = self.lifetime_restart logger.info("Closing worker gracefully: %s", self.address) self.status = Status.closing_gracefully await self.scheduler.retire_workers(workers=[self.address], remove=False) await self.close(safe=True, nanny=not restart) async def terminate(self, comm=None, report=True, **kwargs): await self.close(report=report, **kwargs) return "OK" async def wait_until_closed(self): warnings.warn("wait_until_closed has moved to finished()") await self.finished() assert self.status == Status.closed ################ # Worker Peers # ################ def send_to_worker(self, address, msg): if address not in self.stream_comms: bcomm = BatchedSend(interval="1ms", loop=self.loop) self.stream_comms[address] = bcomm async def batched_send_connect(): comm = await connect( address, **self.connection_args # TODO, serialization ) comm.name = "Worker->Worker" await comm.write({"op": "connection_stream"}) bcomm.start(comm) self.loop.add_callback(batched_send_connect) self.stream_comms[address].send(msg) async def get_data( self, comm, keys=None, who=None, serializers=None, max_connections=None ): start = time() if max_connections is None: max_connections = self.total_in_connections # Allow same-host connections more liberally if ( max_connections and comm and get_address_host(comm.peer_address) == get_address_host(self.address) ): max_connections = max_connections * 2 if self.paused: max_connections = 1 throttle_msg = " Throttling outgoing connections because worker is paused." else: throttle_msg = "" if ( max_connections is not False and self.outgoing_current_count >= max_connections ): logger.debug( "Worker %s has too many open connections to respond to data request " "from %s (%d/%d).%s", self.address, who, self.outgoing_current_count, max_connections, throttle_msg, ) return {"status": "busy"} self.outgoing_current_count += 1 data = {k: self.data[k] for k in keys if k in self.data} if len(data) < len(keys): for k in set(keys) - set(data): if k in self.actors: from .actor import Actor data[k] = Actor(type(self.actors[k]), self.address, k, worker=self) msg = {"status": "OK", "data": {k: to_serialize(v) for k, v in data.items()}} nbytes = {k: self.tasks[k].nbytes for k in data if k in self.tasks} stop = time() if self.digests is not None: self.digests["get-data-load-duration"].add(stop - start) start = time() try: compressed = await comm.write(msg, serializers=serializers) response = await comm.read(deserializers=serializers) assert response == "OK", response except OSError: logger.exception( "failed during get data with %s -> %s", self.address, who, exc_info=True ) comm.abort() raise finally: self.outgoing_current_count -= 1 stop = time() if self.digests is not None: self.digests["get-data-send-duration"].add(stop - start) total_bytes = sum(filter(None, nbytes.values())) self.outgoing_count += 1 duration = (stop - start) or 0.5 # windows self.outgoing_transfer_log.append( { "start": start + self.scheduler_delay, "stop": stop + self.scheduler_delay, "middle": (start + stop) / 2, "duration": duration, "who": who, "keys": nbytes, "total": total_bytes, "compressed": compressed, "bandwidth": total_bytes / duration, } ) return Status.dont_reply ################### # Local Execution # ################### def update_data(self, comm=None, data=None, report=True, serializers=None): for key, value in data.items(): ts = self.tasks.get(key) if getattr(ts, "state", None) is not None: self.transition(ts, "memory", value=value) else: self.tasks[key] = ts = TaskState(key) self.put_key_in_memory(ts, value) ts.priority = None ts.duration = None ts.scheduler_holds_ref = True self.log.append((key, "receive-from-scatter")) if report: self.log.append( ("Notifying scheduler about in-memory in update-data", list(data)) ) self.batched_stream.send({"op": "add-keys", "keys": list(data)}) info = {"nbytes": {k: sizeof(v) for k, v in data.items()}, "status": "OK"} return info def handle_free_keys(self, comm=None, keys=None, reason=None): """ Handler to be called by the scheduler. The given keys are no longer referred to and required by the scheduler. The worker is now allowed to release the key, if applicable. This does not guarantee that the memory is released since the worker may still decide to hold on to the data and task since it is required by an upstream dependency. """ self.log.append(("free-keys", keys, reason)) for key in keys: ts = self.tasks.get(key) if ts is not None: ts.scheduler_holds_ref = False self.release_key(key, report=False, reason=reason) def handle_superfluous_data(self, keys=(), reason=None): """Stream handler notifying the worker that it might be holding unreferenced, superfluous data. This should not actually happen during ordinary operations and is only intended to correct any erroneous state. An example where this is necessary is if a worker fetches data for a downstream task but that task is released before the data arrives. In this case, the scheduler will notify the worker that it may be holding this unnecessary data, if the worker hasn't released the data itself, already. This handler does not guarantee the task nor the data to be actually released but only asks the worker to release the data on a best effort guarantee. This protects from race conditions where the given keys may already have been rescheduled for compute in which case the compute would win and this handler is ignored. For stronger guarantees, see handler free_keys """ self.log.append(("Handle superfluous data", keys, reason)) for key in list(keys): ts = self.tasks.get(key) if ts and not ts.scheduler_holds_ref: self.release_key(key, reason=f"delete data: {reason}", report=False) logger.debug("Worker %s -- Deleted %d keys", self.name, len(keys)) return "OK" async def set_resources(self, **resources): for r, quantity in resources.items(): if r in self.total_resources: self.available_resources[r] += quantity - self.total_resources[r] else: self.available_resources[r] = quantity self.total_resources[r] = quantity await retry_operation( self.scheduler.set_resources, resources=self.total_resources, worker=self.contact_address, ) ################### # Task Management # ################### def cancel_compute(self, key, reason): """ Cancel a task on a best effort basis. This is only possible while a task is in state `waiting` or `ready`. Nothing will happen otherwise. """ ts = self.tasks.get(key) if ts and ts.state in ("waiting", "ready"): self.log.append((key, "cancel-compute", reason)) ts.scheduler_holds_ref = False # All possible dependents of TS should not be in state Processing on # scheduler side and therefore should not be assigned to a worker, # yet. assert not ts.dependents self.release_key(key, reason=reason, report=False) def add_task( self, key, function=None, args=None, kwargs=None, task=no_value, who_has=None, nbytes=None, priority=None, duration=None, resource_restrictions=None, actor=False, annotations=None, **kwargs2, ): try: runspec = SerializedTask(function, args, kwargs, task) if key in self.tasks: ts = self.tasks[key] ts.scheduler_holds_ref = True if ts.state == "memory": assert key in self.data or key in self.actors logger.debug( "Asked to compute pre-existing result: %s: %s", key, ts.state ) self.send_task_state_to_scheduler(ts) return if ts.state in IN_PLAY: return if ts.state == "error": ts.exception = None ts.exception_text = "" ts.traceback = None ts.traceback_text = "" else: # This is a scheduler re-assignment # Either `fetch` -> `waiting` or `flight` -> `waiting` self.log.append((ts.key, "re-adding key, new TaskState")) self.transition(ts, "waiting", runspec=runspec) else: self.log.append((key, "new")) self.tasks[key] = ts = TaskState( key=key, runspec=SerializedTask(function, args, kwargs, task) ) self.transition(ts, "waiting") # TODO: move transition of `ts` to end of `add_task` # This will require a chained recommendation transition system like # the scheduler if priority is not None: priority = tuple(priority) + (self.generation,) self.generation -= 1 if actor: self.actors[ts.key] = None ts.scheduler_holds_ref = True ts.runspec = runspec ts.priority = priority ts.duration = duration if resource_restrictions: ts.resource_restrictions = resource_restrictions ts.annotations = annotations who_has = who_has or {} for dependency, workers in who_has.items(): assert workers if dependency not in self.tasks: # initial state is "new" # this dependency does not already exist on worker self.tasks[dependency] = dep_ts = TaskState(key=dependency) # link up to child / parents ts.dependencies.add(dep_ts) dep_ts.dependents.add(ts) # check to ensure task wasn't already executed and partially released # # TODO: make this less bad state = "fetch" if dependency not in self.data else "memory" # transition from new -> fetch handles adding dependency # to waiting_for_data discarded_self = False if self.address in workers and state == "fetch": discarded_self = True workers = set(workers) workers.discard(self.address) who_has[dependency] = tuple(workers) self.transition(dep_ts, state, who_has=workers) self.log.append( ( dependency, "new-dep", dep_ts.state, f"requested by {ts.key}", discarded_self, ) ) else: # task was already present on worker dep_ts = self.tasks[dependency] # link up to child / parents ts.dependencies.add(dep_ts) dep_ts.dependents.add(ts) if dep_ts.state not in ("memory",): ts.waiting_for_data.add(dep_ts.key) self.update_who_has(who_has=who_has) if nbytes is not None: for key, value in nbytes.items(): self.tasks[key].nbytes = value if ts.waiting_for_data: self.data_needed.append(ts.key) else: self.transition(ts, "ready") if self.validate: for worker, keys in self.has_what.items(): for k in keys: assert worker in self.tasks[k].who_has if who_has: assert all(self.tasks[dep] in ts.dependencies for dep in who_has) assert all(self.tasks[dep.key] for dep in ts.dependencies) for dependency in ts.dependencies: self.validate_task(dependency) self.validate_task(ts) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition(self, ts, finish, **kwargs): if ts is None: return start = ts.state if start == finish: return func = self._transitions[start, finish] self.log.append((ts.key, start, finish)) state = func(ts, **kwargs) if state and finish != state: self.log.append((ts.key, start, finish, state)) ts.state = state or finish if self.validate: self.validate_task(ts) self._notify_plugins("transition", ts.key, start, state or finish, **kwargs) def transition_new_waiting(self, ts): try: if self.validate: assert ts.state == "new" assert ts.runspec is not None assert not ts.who_has except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_new_fetch(self, ts, who_has): try: if self.validate: assert ts.state == "new" assert ts.runspec is None assert who_has for dependent in ts.dependents: dependent.waiting_for_data.add(ts.key) ts.who_has.update(who_has) for w in who_has: self.has_what[w].add(ts.key) self.pending_data_per_worker[w].append(ts.key) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_fetch_waiting(self, ts, runspec): """This is a rescheduling transition that occurs after a worker failure. A task was available from another worker but that worker died and the scheduler reassigned the task for computation here. """ try: if self.validate: assert ts.state == "fetch" assert ts.runspec is None assert runspec is not None ts.runspec = runspec # remove any stale entries in `has_what` for worker in self.has_what.keys(): self.has_what[worker].discard(ts.key) # clear `who_has` of stale info ts.who_has.clear() except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_flight_waiting(self, ts, runspec): """This is a rescheduling transition that occurs after a worker failure. A task was in flight from another worker to this worker when that worker died and the scheduler reassigned the task for computation here. """ try: if self.validate: assert ts.state == "flight" assert ts.runspec is None assert runspec is not None ts.runspec = runspec # remove any stale entries in `has_what` for worker in self.has_what.keys(): self.has_what[worker].discard(ts.key) # clear `who_has` of stale info ts.who_has.clear() except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_fetch_flight(self, ts, worker=None): try: if self.validate: assert ts.state == "fetch" assert ts.dependents ts.coming_from = worker self.in_flight_tasks += 1 except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_flight_fetch(self, ts): try: if self.validate: assert ts.state == "flight" self.in_flight_tasks -= 1 ts.coming_from = None ts.runspec = None if not ts.who_has: if ts.key not in self._missing_dep_flight: self._missing_dep_flight.add(ts.key) logger.info("Task %s does not know who has", ts) self.loop.add_callback(self.handle_missing_dep, ts) for w in ts.who_has: self.pending_data_per_worker[w].append(ts.key) for dependent in ts.dependents: dependent.waiting_for_data.add(ts.key) if dependent.state == "waiting": self.data_needed.append(dependent.key) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_flight_memory(self, ts, value=None): try: if self.validate: assert ts.state == "flight" self.in_flight_tasks -= 1 ts.coming_from = None self.put_key_in_memory(ts, value) for dependent in ts.dependents: try: dependent.waiting_for_data.remove(ts.key) self.waiting_for_data_count -= 1 except KeyError: pass self.log.append(("Notifying scheduler about in-memory", ts.key)) self.batched_stream.send({"op": "add-keys", "keys": [ts.key]}) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_waiting_ready(self, ts): try: if self.validate: assert ts.state == "waiting" assert not ts.waiting_for_data assert all( dep.key in self.data or dep.key in self.actors for dep in ts.dependencies ) assert all(dep.state == "memory" for dep in ts.dependencies) assert ts.key not in self.ready self.has_what[self.address].discard(ts.key) if ts.resource_restrictions is not None: self.constrained.append(ts.key) return "constrained" else: heapq.heappush(self.ready, (ts.priority, ts.key)) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_waiting_done(self, ts, value=None): try: if self.validate: assert ts.state == "waiting" assert ts.key not in self.ready self.waiting_for_data_count -= len(ts.waiting_for_data) ts.waiting_for_data.clear() if value is not None: self.put_key_in_memory(ts, value) self.send_task_state_to_scheduler(ts) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_ready_executing(self, ts): try: if self.validate: assert not ts.waiting_for_data assert ts.key not in self.data assert ts.state in READY assert ts.key not in self.ready assert all( dep.key in self.data or dep.key in self.actors for dep in ts.dependencies ) self.executing_count += 1 self.loop.add_callback(self.execute, ts.key) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_ready_error(self, ts): if self.validate: assert ts.exception is not None assert ts.traceback is not None assert ts.exception_text assert ts.traceback_text self.send_task_state_to_scheduler(ts) def transition_ready_memory(self, ts, value=no_value): if value is not no_value: self.put_key_in_memory(ts, value=value) self.send_task_state_to_scheduler(ts) def transition_constrained_executing(self, ts): self.transition_ready_executing(ts) for resource, quantity in ts.resource_restrictions.items(): self.available_resources[resource] -= quantity if self.validate: assert all(v >= 0 for v in self.available_resources.values()) def transition_executing_done(self, ts, value=no_value, report=True): try: if self.validate: assert ts.state == "executing" or ts.key in self.long_running assert not ts.waiting_for_data assert ts.key not in self.ready out = None if ts.resource_restrictions is not None: for resource, quantity in ts.resource_restrictions.items(): self.available_resources[resource] += quantity if ts.state == "executing": self.executing_count -= 1 self.executed_count += 1 elif ts.state == "long-running": self.long_running.remove(ts.key) if value is not no_value: try: self.put_key_in_memory(ts, value, transition=False) except Exception as e: logger.info("Failed to put key in memory", exc_info=True) msg = error_message(e) ts.exception = msg["exception"] ts.exception_text = msg["exception_text"] ts.traceback = msg["traceback"] ts.traceback_text = msg["traceback_text"] ts.state = "error" out = "error" for d in ts.dependents: d.waiting_for_data.add(ts.key) if report and self.batched_stream and self.status == Status.running: self.send_task_state_to_scheduler(ts) else: raise CommClosedError return out except OSError: logger.info("Comm closed") except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def transition_executing_long_running(self, ts, compute_duration=None): try: if self.validate: assert ts.state == "executing" self.executing_count -= 1 self.long_running.add(ts.key) self.batched_stream.send( { "op": "long-running", "key": ts.key, "compute_duration": compute_duration, } ) self.ensure_computing() except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def maybe_transition_long_running(self, ts, compute_duration=None): if ts.state == "executing": self.transition(ts, "long-running", compute_duration=compute_duration) def stateof(self, key): ts = self.tasks[key] return { "executing": ts.state == "executing", "waiting_for_data": bool(ts.waiting_for_data), "heap": key in pluck(1, self.ready), "data": key in self.data, } def story(self, *keys): keys = [key.key if isinstance(key, TaskState) else key for key in keys] return [ msg for msg in self.log if any(key in msg for key in keys) or any( key in c for key in keys for c in msg if isinstance(c, (tuple, list, set)) ) ] def ensure_communicating(self): changed = True try: while ( changed and self.data_needed and len(self.in_flight_workers) < self.total_out_connections ): changed = False logger.debug( "Ensure communicating. Pending: %d. Connections: %d/%d", len(self.data_needed), len(self.in_flight_workers), self.total_out_connections, ) key = self.data_needed[0] if key not in self.tasks: self.data_needed.popleft() changed = True continue ts = self.tasks[key] if ts.state != "waiting": self.log.append((key, "communication pass")) self.data_needed.popleft() changed = True continue dependencies = ts.dependencies if self.validate: assert all(dep.key in self.tasks for dep in dependencies) dependencies_fetch = set() dependencies_missing = set() for dependency_ts in dependencies: if dependency_ts.state == "fetch": if not dependency_ts.who_has: dependencies_missing.add(dependency_ts) else: dependencies_fetch.add(dependency_ts) del dependencies, dependency_ts if dependencies_missing: missing_deps2 = { dep for dep in dependencies_missing if dep.key not in self._missing_dep_flight } for dep in missing_deps2: self._missing_dep_flight.add(dep.key) if missing_deps2: logger.info( "Can't find dependencies %s for key %s", missing_deps2.copy(), key, ) self.loop.add_callback(self.handle_missing_dep, *missing_deps2) dependencies_fetch -= dependencies_missing self.log.append( ("gather-dependencies", key, {d.key for d in dependencies_fetch}) ) in_flight = False while dependencies_fetch and ( len(self.in_flight_workers) < self.total_out_connections or self.comm_nbytes < self.comm_threshold_bytes ): to_gather_ts = dependencies_fetch.pop() workers = [ w for w in to_gather_ts.who_has if w not in self.in_flight_workers ] if not workers: in_flight = True continue host = get_address_host(self.address) local = [w for w in workers if get_address_host(w) == host] if local: worker = random.choice(local) else: worker = random.choice(list(workers)) to_gather, total_nbytes = self.select_keys_for_gather( worker, to_gather_ts.key ) self.comm_nbytes += total_nbytes self.in_flight_workers[worker] = to_gather for d in to_gather: dependencies_fetch.discard(self.tasks.get(d)) self.transition(self.tasks[d], "flight", worker=worker) assert not worker == self.address self.loop.add_callback( self.gather_dep, worker=worker, to_gather=to_gather, total_nbytes=total_nbytes, cause=ts, ) changed = True if not dependencies_fetch and not in_flight: self.data_needed.popleft() except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def send_task_state_to_scheduler(self, ts): if ts.key in self.data or self.actors.get(ts.key): typ = ts.type if ts.nbytes is None or typ is None: try: value = self.data[ts.key] except KeyError: value = self.actors[ts.key] ts.nbytes = sizeof(value) typ = ts.type = type(value) del value try: typ_serialized = dumps_function(typ) except PicklingError: # Some types fail pickling (example: _thread.lock objects), # send their name as a best effort. typ_serialized = pickle.dumps(typ.__name__, protocol=4) d = { "op": "task-finished", "status": "OK", "key": ts.key, "nbytes": ts.nbytes, "thread": self.threads.get(ts.key), "type": typ_serialized, "typename": typename(typ), "metadata": ts.metadata, } elif ts.exception is not None: d = { "op": "task-erred", "status": "error", "key": ts.key, "thread": self.threads.get(ts.key), "exception": ts.exception, "traceback": ts.traceback, "exception_text": ts.exception_text, "traceback_text": ts.traceback_text, } else: logger.error("Key not ready to send to worker, %s: %s", ts.key, ts.state) return if ts.startstops: d["startstops"] = ts.startstops self.batched_stream.send(d) def put_key_in_memory(self, ts, value, transition=True): if ts.key in self.data: ts.state = "memory" return if ts.key in self.actors: self.actors[ts.key] = value else: start = time() self.data[ts.key] = value ts.state = "memory" stop = time() if stop - start > 0.020: ts.startstops.append( {"action": "disk-write", "start": start, "stop": stop} ) if ts.nbytes is None: ts.nbytes = sizeof(value) ts.type = type(value) for dep in ts.dependents: try: dep.waiting_for_data.remove(ts.key) self.waiting_for_data_count -= 1 except KeyError: pass if not dep.waiting_for_data: self.transition(dep, "ready") self.log.append((ts.key, "put-in-memory")) def select_keys_for_gather(self, worker, dep): assert isinstance(dep, str) deps = {dep} total_bytes = self.tasks[dep].get_nbytes() L = self.pending_data_per_worker[worker] while L: d = L.popleft() ts = self.tasks.get(d) if ts is None or ts.state != "fetch": continue if total_bytes + ts.get_nbytes() > self.target_message_size: break deps.add(d) total_bytes += ts.get_nbytes() return deps, total_bytes @property def total_comm_bytes(self): warnings.warn( "The attribute `Worker.total_comm_bytes` has been renamed to `comm_threshold_bytes`. " "Future versions will only support the new name.", DeprecationWarning, ) return self.comm_threshold_bytes async def gather_dep( self, worker: str, to_gather: Iterable[str], total_nbytes: int, cause: TaskState, ): """Gather dependencies for a task from a worker who has them Parameters ---------- worker : str Address of worker to gather dependencies from to_gather : list Keys of dependencies to gather from worker -- this is not necessarily equivalent to the full list of dependencies of ``dep`` as some dependencies may already be present on this worker. total_nbytes : int Total number of bytes for all the dependencies in to_gather combined cause : TaskState Task we want to gather dependencies for """ if self.validate: self.validate_state() if self.status != Status.running: return with log_errors(): response = {} to_gather_keys = set() try: if self.validate: self.validate_state() for dependency_key in to_gather: dependency_ts = self.tasks.get(dependency_key) if dependency_ts and dependency_ts.state == "flight": to_gather_keys.add(dependency_key) # Keep namespace clean since this func is long and has many # dep*, *ts* variables del to_gather, dependency_key, dependency_ts self.log.append(("request-dep", cause.key, worker, to_gather_keys)) logger.debug( "Request %d keys for task %s from %s", len(to_gather_keys), cause, worker, ) start = time() response = await get_data_from_worker( self.rpc, to_gather_keys, worker, who=self.address ) stop = time() if response["status"] == "busy": self.log.append(("busy-gather", worker, to_gather_keys)) for key in to_gather_keys: ts = self.tasks.get(key) if ts and ts.state == "flight": self.transition(ts, "fetch") return cause.startstops.append( { "action": "transfer", "start": start + self.scheduler_delay, "stop": stop + self.scheduler_delay, "source": worker, } ) total_bytes = sum( self.tasks[key].get_nbytes() for key in response["data"] if key in self.tasks ) duration = (stop - start) or 0.010 bandwidth = total_bytes / duration self.incoming_transfer_log.append( { "start": start + self.scheduler_delay, "stop": stop + self.scheduler_delay, "middle": (start + stop) / 2.0 + self.scheduler_delay, "duration": duration, "keys": { key: self.tasks[key].nbytes for key in response["data"] if key in self.tasks }, "total": total_bytes, "bandwidth": bandwidth, "who": worker, } ) if total_bytes > 1000000: self.bandwidth = self.bandwidth * 0.95 + bandwidth * 0.05 bw, cnt = self.bandwidth_workers[worker] self.bandwidth_workers[worker] = (bw + bandwidth, cnt + 1) types = set(map(type, response["data"].values())) if len(types) == 1: [typ] = types bw, cnt = self.bandwidth_types[typ] self.bandwidth_types[typ] = (bw + bandwidth, cnt + 1) if self.digests is not None: self.digests["transfer-bandwidth"].add(total_bytes / duration) self.digests["transfer-duration"].add(duration) self.counters["transfer-count"].add(len(response["data"])) self.incoming_count += 1 self.log.append(("receive-dep", worker, list(response["data"]))) except OSError: logger.exception("Worker stream died during communication: %s", worker) has_what = self.has_what.pop(worker) self.pending_data_per_worker.pop(worker) self.log.append(("receive-dep-failed", worker, has_what)) for d in has_what: ts = self.tasks[d] ts.who_has.remove(worker) except Exception as e: logger.exception(e) if self.batched_stream and LOG_PDB: import pdb pdb.set_trace() raise finally: self.comm_nbytes -= total_nbytes busy = response.get("status", "") == "busy" data = response.get("data", {}) # FIXME: We should not handle keys which were skipped by this coro. to_gather_keys is only a subset assert set(to_gather_keys).issubset( set(self.in_flight_workers.get(worker)) ) for d in self.in_flight_workers.pop(worker): ts = self.tasks.get(d) try: if not busy and d in data: self.transition(ts, "memory", value=data[d]) elif ts is None or ts.state == "executing": self.log.append(("already-executing", d)) self.release_key(d, reason="already executing at gather") elif ts.state == "flight" and not ts.dependents: self.log.append(("flight no-dependents", d)) self.release_key( d, reason="In-flight task no longer has dependents." ) elif ( not busy and d not in data and ts.dependents and ts.state != "memory" ): ts.who_has.discard(worker) self.has_what[worker].discard(ts.key) self.log.append(("missing-dep", d)) self.batched_stream.send( { "op": "missing-data", "errant_worker": worker, "key": d, } ) self.transition(ts, "fetch") elif ts.state not in ("ready", "memory"): self.transition(ts, "fetch") else: logger.debug( "Unexpected task state encountered for %r after gather_dep", ts, ) except Exception as exc: emsg = error_message(exc) assert ts is not None, ts self.log.append( (ts.key, "except-gather-dep-result", emsg, time()) ) # FIXME: We currently cannot release this task and its # dependent safely logger.debug( "Exception occured while handling `gather_dep` response for %r", ts, exc_info=True, ) if self.validate: self.validate_state() self.ensure_computing() if not busy: self.repetitively_busy = 0 self.ensure_communicating() else: # Exponential backoff to avoid hammering scheduler/worker self.repetitively_busy += 1 await asyncio.sleep(0.100 * 1.5 ** self.repetitively_busy) await self.query_who_has(*to_gather_keys) self.ensure_communicating() def bad_dep(self, dep): exc = ValueError( "Could not find dependent %s. Check worker logs" % str(dep.key) ) for ts in list(dep.dependents): msg = error_message(exc) ts.exception = msg["exception"] ts.traceback = msg["traceback"] ts.exception_text = msg["exception_text"] ts.traceback_text = msg["traceback_text"] self.transition(ts, "error") self.release_key(dep.key, reason="bad dep") async def handle_missing_dep(self, *deps, **kwargs): self.log.append(("handle-missing", deps)) try: deps = {dep for dep in deps if dep.dependents} if not deps: return for dep in list(deps): if ( self._suspicious_count_limit and dep.suspicious_count > self._suspicious_count_limit ): deps.remove(dep) self.bad_dep(dep) if not deps: return for dep in deps: logger.info( "Dependent not found: %s %s . Asking scheduler", dep.key, dep.suspicious_count, ) who_has = await retry_operation( self.scheduler.who_has, keys=list(dep.key for dep in deps) ) who_has = {k: v for k, v in who_has.items() if v} self.update_who_has(who_has) still_missing = set() for dep in deps: dep.suspicious_count += 1 if not who_has.get(dep.key): logger.info( "No workers found for %s", dep.key, ) self.log.append((dep.key, "no workers found", dep.dependents)) self.release_key(dep.key, reason="Handle missing no workers") elif self.address in who_has and dep.state != "memory": still_missing.add(dep) self.batched_stream.send( { "op": "release-worker-data", "keys": [dep.key], "worker": self.address, } ) else: logger.debug("New workers found for %s", dep.key) self.log.append((dep.key, "new workers found")) for dependent in dep.dependents: if dep.key in dependent.waiting_for_data: self.data_needed.append(dependent.key) if still_missing: logger.debug( "Found self referencing who has response from scheduler for keys %s.\n" "Trying again handle_missing", deps, ) await self.handle_missing_dep(*deps) except Exception: logger.error("Handle missing dep failed, retrying", exc_info=True) retries = kwargs.get("retries", 5) self.log.append(("handle-missing-failed", retries, deps)) if retries > 0: await self.handle_missing_dep(*deps, retries=retries - 1) else: raise finally: try: for dep in deps: self._missing_dep_flight.remove(dep.key) except KeyError: pass self.ensure_communicating() async def query_who_has(self, *deps): with log_errors(): response = await retry_operation(self.scheduler.who_has, keys=deps) self.update_who_has(response) return response def update_who_has(self, who_has): try: for dep, workers in who_has.items(): if not workers: continue if dep in self.tasks: if self.address in workers and self.tasks[dep].state != "memory": logger.debug( "Scheduler claims worker %s holds data for task %s which is not true.", self.name, dep, ) # Do not mutate the input dict. That's rude workers = set(workers) - {self.address} self.tasks[dep].who_has.update(workers) for worker in workers: self.has_what[worker].add(dep) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def steal_request(self, key): # There may be a race condition between stealing and releasing a task. # In this case the self.tasks is already cleared. The `None` will be # registered as `already-computing` on the other end ts = self.tasks.get(key) if key in self.tasks: state = ts.state else: state = None response = {"op": "steal-response", "key": key, "state": state} self.batched_stream.send(response) if state in ("ready", "waiting", "constrained"): # If task is marked as "constrained" we haven't yet assigned it an # `available_resources` to run on, that happens in # `transition_constrained_executing` ts.scheduler_holds_ref = False self.release_key(ts.key, reason="stolen") if self.validate: assert ts.key not in self.tasks def release_key( self, key: Hashable, cause: TaskState | None = None, reason: str | None = None, report: bool = True, ): try: if self.validate: assert not isinstance(key, TaskState) ts = self.tasks.get(key, None) # If the scheduler holds a reference which is usually the # case when it instructed the task to be computed here or if # data was scattered we must not release it unless the # scheduler allow us to. See also handle_delete_data and if ts is None or ts.scheduler_holds_ref: return logger.debug( "Release key %s", {"key": key, "cause": cause, "reason": reason} ) if cause: self.log.append((key, "release-key", {"cause": cause}, reason)) else: self.log.append((key, "release-key", reason)) if key in self.data: try: del self.data[key] except FileNotFoundError: logger.error("Tried to delete %s but no file found", exc_info=True) if key in self.actors: del self.actors[key] for worker in ts.who_has: self.has_what[worker].discard(ts.key) ts.who_has.clear() if key in self.threads: del self.threads[key] if ts.state == "executing": self.executing_count -= 1 if ts.resource_restrictions is not None: if ts.state == "executing": for resource, quantity in ts.resource_restrictions.items(): self.available_resources[resource] += quantity for d in ts.dependencies: d.dependents.discard(ts) if not d.dependents and d.state in ("flight", "fetch"): self.release_key(d.key, reason="Dependent released") if report: # Inform the scheduler of keys which will have gone missing # We are releasing them before they have completed if ts.state in PROCESSING: # This path is only hit with work stealing msg = {"op": "release", "key": key, "cause": cause} else: # This path is only hit when calling release_key manually msg = { "op": "release-worker-data", "keys": [key], "worker": self.address, } self.batched_stream.send(msg) self._notify_plugins("release_key", key, ts.state, cause, reason, report) del self.tasks[key] except CommClosedError: pass except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise ################ # Execute Task # ################ def run(self, comm, function, args=(), wait=True, kwargs=None): return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait) def run_coroutine(self, comm, function, args=(), kwargs=None, wait=True): return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait) async def plugin_add(self, comm=None, plugin=None, name=None): with log_errors(pdb=False): if isinstance(plugin, bytes): plugin = pickle.loads(plugin) if name is None: name = _get_plugin_name(plugin) assert name if name in self.plugins: await self.plugin_remove(comm=comm, name=name) self.plugins[name] = plugin logger.info("Starting Worker plugin %s" % name) if hasattr(plugin, "setup"): try: result = plugin.setup(worker=self) if isawaitable(result): result = await result except Exception as e: msg = error_message(e) return msg return {"status": "OK"} async def plugin_remove(self, comm=None, name=None): with log_errors(pdb=False): logger.info(f"Removing Worker plugin {name}") try: plugin = self.plugins.pop(name) if hasattr(plugin, "teardown"): result = plugin.teardown(worker=self) if isawaitable(result): result = await result except Exception as e: msg = error_message(e) return msg return {"status": "OK"} async def actor_execute( self, comm=None, actor=None, function=None, args=(), kwargs: dict | None = None, ): kwargs = kwargs or {} separate_thread = kwargs.pop("separate_thread", True) key = actor actor = self.actors[key] func = getattr(actor, function) name = key_split(key) + "." + function try: if iscoroutinefunction(func): result = await func(*args, **kwargs) elif separate_thread: result = await self.loop.run_in_executor( self.executors["actor"], apply_function_actor, func, args, kwargs, self.execution_state, name, self.active_threads, self.active_threads_lock, ) else: result = func(*args, **kwargs) return {"status": "OK", "result": to_serialize(result)} except Exception as ex: return {"status": "error", "exception": to_serialize(ex)} def actor_attribute(self, comm=None, actor=None, attribute=None): try: value = getattr(self.actors[actor], attribute) return {"status": "OK", "result": to_serialize(value)} except Exception as ex: return {"status": "error", "exception": to_serialize(ex)} def meets_resource_constraints(self, key: str) -> bool: ts = self.tasks[key] if not ts.resource_restrictions: return True for resource, needed in ts.resource_restrictions.items(): if self.available_resources[resource] < needed: return False return True async def _maybe_deserialize_task(self, ts): if not isinstance(ts.runspec, SerializedTask): return ts.runspec try: start = time() # Offload deserializing large tasks if sizeof(ts.runspec) > OFFLOAD_THRESHOLD: function, args, kwargs = await offload(_deserialize, *ts.runspec) else: function, args, kwargs = _deserialize(*ts.runspec) stop = time() if stop - start > 0.010: ts.startstops.append( {"action": "deserialize", "start": start, "stop": stop} ) return function, args, kwargs except Exception: logger.error("Could not deserialize task", exc_info=True) self.log.append((ts.key, "deserialize-error")) raise def ensure_computing(self): if self.paused: return try: while self.constrained and self.executing_count < self.nthreads: key = self.constrained[0] ts = self.tasks.get(key, None) if ts is None or ts.state != "constrained": self.constrained.popleft() continue if self.meets_resource_constraints(key): self.constrained.popleft() self.transition(ts, "executing") else: break while self.ready and self.executing_count < self.nthreads: priority, key = heapq.heappop(self.ready) ts = self.tasks.get(key) if ts is None: # It is possible for tasks to be released while still remaining on `ready` # The scheduler might have re-routed to a new worker and told this worker # to release. If the task has "disappeared" just continue through the heap continue elif ts.key in self.data: self.transition(ts, "memory") elif ts.state in READY: self.transition(ts, "executing") except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise async def execute(self, key): if self.status in (Status.closing, Status.closed, Status.closing_gracefully): return if key not in self.tasks: return ts = self.tasks[key] if ts.state != "executing": # This might happen if keys are canceled logger.debug( "Trying to execute a task %s which is not in executing state anymore" % ts ) return try: if self.validate: assert not ts.waiting_for_data assert ts.state == "executing" assert ts.runspec is not None function, args, kwargs = await self._maybe_deserialize_task(ts) args2, kwargs2 = self._prepare_args_for_execution(ts, args, kwargs) if ts.annotations is not None and "executor" in ts.annotations: executor = ts.annotations["executor"] else: executor = "default" assert executor in self.executors assert key == ts.key self.active_keys.add(ts.key) try: e = self.executors[executor] ts.start_time = time() if iscoroutinefunction(function): result = await apply_function_async( function, args2, kwargs2, self.scheduler_delay, ) elif "ThreadPoolExecutor" in str(type(e)): result = await self.loop.run_in_executor( e, apply_function, function, args2, kwargs2, self.execution_state, ts.key, self.active_threads, self.active_threads_lock, self.scheduler_delay, ) else: result = await self.loop.run_in_executor( e, apply_function_simple, function, args2, kwargs2, self.scheduler_delay, ) finally: self.active_keys.discard(ts.key) # We'll need to check again for the task state since it may have # changed since the execution was kicked off. In particular, it may # have been canceled and released already in which case we'll have # to drop the result immediately if ts.key not in self.tasks: logger.debug( "Dropping result for %s since task has already been released." % ts.key ) return result["key"] = ts.key value = result.pop("result", None) ts.startstops.append( {"action": "compute", "start": result["start"], "stop": result["stop"]} ) self.threads[ts.key] = result["thread"] if result["op"] == "task-finished": ts.nbytes = result["nbytes"] ts.type = result["type"] self.transition(ts, "memory", value=value) if self.digests is not None: self.digests["task-duration"].add(result["stop"] - result["start"]) elif isinstance(result.pop("actual-exception"), Reschedule): self.batched_stream.send({"op": "reschedule", "key": ts.key}) self.transition(ts, "rescheduled", report=False) self.release_key(ts.key, report=False, reason="Reschedule") else: ts.exception = result["exception"] ts.traceback = result["traceback"] ts.exception_text = result["exception_text"] ts.traceback_text = result["traceback_text"] logger.warning( "Compute Failed\n" "Function: %s\n" "args: %s\n" "kwargs: %s\n" "Exception: %r\n", str(funcname(function))[:1000], convert_args_to_str(args2, max_len=1000), convert_kwargs_to_str(kwargs2, max_len=1000), result["exception"].data, ) self.transition(ts, "error") logger.debug("Send compute response to scheduler: %s, %s", ts.key, result) if self.validate: assert ts.state != "executing" assert not ts.waiting_for_data except Exception as exc: logger.error( "Exception during execution of task %s.", ts.key, exc_info=True ) emsg = error_message(exc) ts.exception = emsg["exception"] ts.traceback = emsg["traceback"] ts.exception_text = emsg["exception_text"] ts.traceback_text = emsg["traceback_text"] self.transition(ts, "error") finally: self.ensure_computing() self.ensure_communicating() def _prepare_args_for_execution(self, ts, args, kwargs): start = time() data = {} for dep in ts.dependencies: k = dep.key try: data[k] = self.data[k] except KeyError: from .actor import Actor # TODO: create local actor data[k] = Actor(type(self.actors[k]), self.address, k, self) args2 = pack_data(args, data, key_types=(bytes, str)) kwargs2 = pack_data(kwargs, data, key_types=(bytes, str)) stop = time() if stop - start > 0.005: ts.startstops.append({"action": "disk-read", "start": start, "stop": stop}) if self.digests is not None: self.digests["disk-load-duration"].add(stop - start) return args2, kwargs2 ################## # Administrative # ################## async def memory_monitor(self): """Track this process's memory usage and act accordingly If we rise above 70% memory use, start dumping data to disk. If we rise above 80% memory use, stop execution of new tasks """ if self._memory_monitoring: return self._memory_monitoring = True total = 0 proc = self.monitor.proc memory = proc.memory_info().rss frac = memory / self.memory_limit def check_pause(memory): frac = memory / self.memory_limit # Pause worker threads if above 80% memory use if self.memory_pause_fraction and frac > self.memory_pause_fraction: # Try to free some memory while in paused state self._throttled_gc.collect() if not self.paused: logger.warning( "Worker is at %d%% memory usage. Pausing worker. " "Process memory: %s -- Worker memory limit: %s", int(frac * 100), format_bytes(memory), format_bytes(self.memory_limit) if self.memory_limit is not None else "None", ) self.paused = True elif self.paused: logger.warning( "Worker is at %d%% memory usage. Resuming worker. " "Process memory: %s -- Worker memory limit: %s", int(frac * 100), format_bytes(memory), format_bytes(self.memory_limit) if self.memory_limit is not None else "None", ) self.paused = False self.ensure_computing() check_pause(memory) # Dump data to disk if above 70% if self.memory_spill_fraction and frac > self.memory_spill_fraction: logger.debug( "Worker is at %.0f%% memory usage. Start spilling data to disk.", frac * 100, ) start = time() target = self.memory_limit * self.memory_target_fraction count = 0 need = memory - target while memory > target: if not self.data.fast: logger.warning( "Unmanaged memory use is high. This may indicate a memory leak " "or the memory may not be released to the OS; see " "https://distributed.dask.org/en/latest/worker.html#memtrim " "for more information. " "-- Unmanaged memory: %s -- Worker memory limit: %s", format_bytes(memory), format_bytes(self.memory_limit), ) break k, v, weight = self.data.fast.evict() del k, v total += weight count += 1 # If the current buffer is filled with a lot of small values, # evicting one at a time is very slow and the worker might # generate new data faster than it is able to evict. Therefore, # only pass on control if we spent at least 0.5s evicting if time() - start > 0.5: await asyncio.sleep(0) start = time() memory = proc.memory_info().rss if total > need and memory > target: # Issue a GC to ensure that the evicted data is actually # freed from memory and taken into account by the monitor # before trying to evict even more data. self._throttled_gc.collect() memory = proc.memory_info().rss check_pause(memory) if count: logger.debug( "Moved %d tasks worth %s to disk", count, format_bytes(total), ) self._memory_monitoring = False return total def cycle_profile(self): now = time() + self.scheduler_delay prof, self.profile_recent = self.profile_recent, profile.create() self.profile_history.append((now, prof)) self.profile_keys_history.append((now, dict(self.profile_keys))) self.profile_keys.clear() def trigger_profile(self): """ Get a frame from all actively computing threads Merge these frames into existing profile counts """ if not self.active_threads: # hope that this is thread-atomic? return start = time() with self.active_threads_lock: active_threads = self.active_threads.copy() frames = sys._current_frames() frames = {ident: frames[ident] for ident in active_threads} llframes = {} if self.low_level_profiler: llframes = {ident: profile.ll_get_stack(ident) for ident in active_threads} for ident, frame in frames.items(): if frame is not None: key = key_split(active_threads[ident]) llframe = llframes.get(ident) state = profile.process( frame, True, self.profile_recent, stop="distributed/worker.py" ) profile.llprocess(llframe, None, state) profile.process( frame, True, self.profile_keys[key], stop="distributed/worker.py" ) stop = time() if self.digests is not None: self.digests["profile-duration"].add(stop - start) async def get_profile( self, comm=None, start=None, stop=None, key=None, server=False ): now = time() + self.scheduler_delay if server: history = self.io_loop.profile elif key is None: history = self.profile_history else: history = [(t, d[key]) for t, d in self.profile_keys_history if key in d] if start is None: istart = 0 else: istart = bisect.bisect_left(history, (start,)) if stop is None: istop = None else: istop = bisect.bisect_right(history, (stop,)) + 1 if istop >= len(history): istop = None # include end if istart == 0 and istop is None: history = list(history) else: iistop = len(history) if istop is None else istop history = [history[i] for i in range(istart, iistop)] prof = profile.merge(*pluck(1, history)) if not history: return profile.create() if istop is None and (start is None or start < now): if key is None: recent = self.profile_recent else: recent = self.profile_keys[key] prof = profile.merge(prof, recent) return prof async def get_profile_metadata(self, comm=None, start=0, stop=None): add_recent = stop is None now = time() + self.scheduler_delay stop = stop or now start = start or 0 result = { "counts": [ (t, d["count"]) for t, d in self.profile_history if start < t < stop ], "keys": [ (t, {k: d["count"] for k, d in v.items()}) for t, v in self.profile_keys_history if start < t < stop ], } if add_recent: result["counts"].append((now, self.profile_recent["count"])) result["keys"].append( (now, {k: v["count"] for k, v in self.profile_keys.items()}) ) return result def get_call_stack(self, comm=None, keys=None): with self.active_threads_lock: frames = sys._current_frames() active_threads = self.active_threads.copy() frames = {k: frames[ident] for ident, k in active_threads.items()} if keys is not None: frames = {k: frame for k, frame in frames.items() if k in keys} result = {k: profile.call_stack(frame) for k, frame in frames.items()} return result def _notify_plugins(self, method_name, *args, **kwargs): for name, plugin in self.plugins.items(): if hasattr(plugin, method_name): try: getattr(plugin, method_name)(*args, **kwargs) except Exception: logger.info( "Plugin '%s' failed with exception" % name, exc_info=True ) ############## # Validation # ############## def validate_task_memory(self, ts): assert ts.key in self.data or ts.key in self.actors assert isinstance(ts.nbytes, int) assert not ts.waiting_for_data assert ts.key not in self.ready assert ts.state == "memory" def validate_task_executing(self, ts): assert ts.state == "executing" assert ts.runspec is not None assert ts.key not in self.data assert not ts.waiting_for_data assert all( dep.key in self.data or dep.key in self.actors for dep in ts.dependencies ) def validate_task_ready(self, ts): assert ts.key in pluck(1, self.ready) assert ts.key not in self.data assert ts.state != "executing" assert not ts.waiting_for_data assert all( dep.key in self.data or dep.key in self.actors for dep in ts.dependencies ) def validate_task_waiting(self, ts): assert ts.key not in self.data assert ts.state == "waiting" if ts.dependencies and ts.runspec: assert not all(dep.key in self.data for dep in ts.dependencies) def validate_task_flight(self, ts): assert ts.key not in self.data assert not any(dep.key in self.ready for dep in ts.dependents) assert ts.coming_from assert ts.coming_from in self.in_flight_workers assert ts.key in self.in_flight_workers[ts.coming_from] def validate_task_fetch(self, ts): assert ts.runspec is None assert ts.key not in self.data assert self.address not in ts.who_has # !!!!!!!! # FIXME This is currently not an invariant since upon comm failure we # remove the erroneous worker from all who_has and correct the state # upon the next ensure_communicate # if not ts.who_has: # # If we do not know who_has for a fetch task, it must be logged in # # the missing dep. There should be a handle_missing_dep running for # # all of these keys # assert ts.key in self._missing_dep_flight, ( # ts.key, # self.story(ts), # self._missing_dep_flight.copy(), # self.in_flight_workers.copy(), # ) assert ts.dependents for w in ts.who_has: assert ts.key in self.has_what[w] def validate_task(self, ts): try: if ts.state == "memory": self.validate_task_memory(ts) elif ts.state == "waiting": self.validate_task_waiting(ts) elif ts.state == "ready": self.validate_task_ready(ts) elif ts.state == "executing": self.validate_task_executing(ts) elif ts.state == "flight": self.validate_task_flight(ts) elif ts.state == "fetch": self.validate_task_fetch(ts) except Exception as e: logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise def validate_state(self): if self.status != Status.running: return try: for ts in self.tasks.values(): assert ts.state is not None # check that worker has task for worker in ts.who_has: assert ts.key in self.has_what[worker] # check that deps have a set state and that dependency<->dependent links are there for dep in ts.dependencies: # self.tasks was just a dict of tasks # and this check was originally that the key was in `task_state` # so we may have popped the key out of `self.tasks` but the # dependency can still be in `memory` before GC grabs it...? # Might need better bookkeeping assert dep.state is not None assert ts in dep.dependents, ts for key in ts.waiting_for_data: ts_wait = self.tasks[key] assert ( ts_wait.state == "flight" or ts_wait.state == "fetch" or ts_wait.key in self._missing_dep_flight or ts_wait.who_has.issubset(self.in_flight_workers) ) if ts.state == "memory": assert isinstance(ts.nbytes, int) assert not ts.waiting_for_data assert ts.key in self.data or ts.key in self.actors for worker, keys in self.has_what.items(): for k in keys: assert worker in self.tasks[k].who_has for ts in self.tasks.values(): self.validate_task(ts) except Exception as e: self.loop.add_callback(self.close) logger.exception(e) if LOG_PDB: import pdb pdb.set_trace() raise ####################################### # Worker Clients (advanced workloads) # ####################################### @property def client(self) -> Client: with self._lock: if self._client: return self._client else: return self._get_client() def _get_client(self, timeout=None) -> Client: """Get local client attached to this worker If no such client exists, create one See Also -------- get_client """ if timeout is None: timeout = dask.config.get("distributed.comm.timeouts.connect") timeout = parse_timedelta(timeout, "s") try: from .client import default_client client = default_client() except ValueError: # no clients found, need to make a new one pass else: # must be lazy import otherwise cyclic import from distributed.deploy.cluster import Cluster if ( client.scheduler and client.scheduler.address == self.scheduler.address # The below conditions should only happen in case a second # cluster is alive, e.g. if a submitted task spawned its onwn # LocalCluster, see gh4565 or ( isinstance(client._start_arg, str) and client._start_arg == self.scheduler.address or isinstance(client._start_arg, Cluster) and client._start_arg.scheduler_address == self.scheduler.address ) ): self._client = client if not self._client: from .client import Client asynchronous = self.loop is IOLoop.current() self._client = Client( self.scheduler, loop=self.loop, security=self.security, set_as_default=True, asynchronous=asynchronous, direct_to_workers=True, name="worker", timeout=timeout, ) Worker._initialized_clients.add(self._client) if not asynchronous: assert self._client.status == "running" return self._client def get_current_task(self): """Get the key of the task we are currently running This only makes sense to run within a task Examples -------- >>> from dask.distributed import get_worker >>> def f(): ... return get_worker().get_current_task() >>> future = client.submit(f) # doctest: +SKIP >>> future.result() # doctest: +SKIP 'f-1234' See Also -------- get_worker """ return self.active_threads[threading.get_ident()] def get_worker() -> Worker: """Get the worker currently running this task Examples -------- >>> def f(): ... worker = get_worker() # The worker on which this task is running ... return worker.address >>> future = client.submit(f) # doctest: +SKIP >>> future.result() # doctest: +SKIP 'tcp://127.0.0.1:47373' See Also -------- get_client worker_client """ try: return thread_state.execution_state["worker"] except AttributeError: try: return first(w for w in Worker._instances if w.status == Status.running) except StopIteration: raise ValueError("No workers found") def get_client(address=None, timeout=None, resolve_address=True) -> Client: """Get a client while within a task. This client connects to the same scheduler to which the worker is connected Parameters ---------- address : str, optional The address of the scheduler to connect to. Defaults to the scheduler the worker is connected to. timeout : int or str Timeout (in seconds) for getting the Client. Defaults to the ``distributed.comm.timeouts.connect`` configuration value. resolve_address : bool, default True Whether to resolve `address` to its canonical form. Returns ------- Client Examples -------- >>> def f(): ... client = get_client(timeout="10s") ... futures = client.map(lambda x: x + 1, range(10)) # spawn many tasks ... results = client.gather(futures) ... return sum(results) >>> future = client.submit(f) # doctest: +SKIP >>> future.result() # doctest: +SKIP 55 See Also -------- get_worker worker_client secede """ if timeout is None: timeout = dask.config.get("distributed.comm.timeouts.connect") timeout = parse_timedelta(timeout, "s") if address and resolve_address: address = comm.resolve_address(address) try: worker = get_worker() except ValueError: # could not find worker pass else: if not address or worker.scheduler.address == address: return worker._get_client(timeout=timeout) from .client import Client try: client = Client.current() # TODO: assumes the same scheduler except ValueError: client = None if client and (not address or client.scheduler.address == address): return client elif address: return Client(address, timeout=timeout) else: raise ValueError("No global client found and no address provided") def secede(): """ Have this task secede from the worker's thread pool This opens up a new scheduling slot and a new thread for a new task. This enables the client to schedule tasks on this node, which is especially useful while waiting for other jobs to finish (e.g., with ``client.gather``). Examples -------- >>> def mytask(x): ... # do some work ... client = get_client() ... futures = client.map(...) # do some remote work ... secede() # while that work happens, remove ourself from the pool ... return client.gather(futures) # return gathered results See Also -------- get_client get_worker """ worker = get_worker() tpe_secede() # have this thread secede from the thread pool duration = time() - thread_state.start_time worker.loop.add_callback( worker.maybe_transition_long_running, worker.tasks[thread_state.key], compute_duration=duration, ) class Reschedule(Exception): """Reschedule this task Raising this exception will stop the current execution of the task and ask the scheduler to reschedule this task, possibly on a different machine. This does not guarantee that the task will move onto a different machine. The scheduler will proceed through its normal heuristics to determine the optimal machine to accept this task. The machine will likely change if the load across the cluster has significantly changed since first scheduling the task. """ def parse_memory_limit(memory_limit, nthreads, total_cores=CPU_COUNT) -> int | None: if memory_limit is None: return None if memory_limit == "auto": memory_limit = int(system.MEMORY_LIMIT * min(1, nthreads / total_cores)) with suppress(ValueError, TypeError): memory_limit = float(memory_limit) if isinstance(memory_limit, float) and memory_limit <= 1: memory_limit = int(memory_limit * system.MEMORY_LIMIT) if isinstance(memory_limit, str): memory_limit = parse_bytes(memory_limit) else: memory_limit = int(memory_limit) return min(memory_limit, system.MEMORY_LIMIT) async def get_data_from_worker( rpc, keys, worker, who=None, max_connections=None, serializers=None, deserializers=None, ): """Get keys from worker The worker has a two step handshake to acknowledge when data has been fully delivered. This function implements that handshake. See Also -------- Worker.get_data Worker.gather_deps utils_comm.gather_data_from_workers """ if serializers is None: serializers = rpc.serializers if deserializers is None: deserializers = rpc.deserializers async def _get_data(): comm = await rpc.connect(worker) comm.name = "Ephemeral Worker->Worker for gather" try: response = await send_recv( comm, serializers=serializers, deserializers=deserializers, op="get_data", keys=keys, who=who, max_connections=max_connections, ) try: status = response["status"] except KeyError: raise ValueError("Unexpected response", response) else: if status == "OK": await comm.write("OK") return response finally: rpc.reuse(worker, comm) return await retry_operation(_get_data, operation="get_data_from_worker") job_counter = [0] cache_loads = LRU(maxsize=100) def loads_function(bytes_object): """Load a function from bytes, cache bytes""" if len(bytes_object) < 100000: try: result = cache_loads[bytes_object] except KeyError: result = pickle.loads(bytes_object) cache_loads[bytes_object] = result return result return pickle.loads(bytes_object) def _deserialize(function=None, args=None, kwargs=None, task=no_value): """Deserialize task inputs and regularize to func, args, kwargs""" if function is not None: function = loads_function(function) if args and isinstance(args, bytes): args = pickle.loads(args) if kwargs and isinstance(kwargs, bytes): kwargs = pickle.loads(kwargs) if task is not no_value: assert not function and not args and not kwargs function = execute_task args = (task,) return function, args or (), kwargs or {} def execute_task(task): """Evaluate a nested task >>> inc = lambda x: x + 1 >>> execute_task((inc, 1)) 2 >>> execute_task((sum, [1, 2, (inc, 3)])) 7 """ if istask(task): func, args = task[0], task[1:] return func(*map(execute_task, args)) elif isinstance(task, list): return list(map(execute_task, task)) else: return task cache_dumps = LRU(maxsize=100) _cache_lock = threading.Lock() def dumps_function(func) -> bytes: """Dump a function to bytes, cache functions""" try: with _cache_lock: result = cache_dumps[func] except KeyError: result = pickle.dumps(func, protocol=4) if len(result) < 100000: with _cache_lock: cache_dumps[func] = result except TypeError: # Unhashable function result = pickle.dumps(func, protocol=4) return result def dumps_task(task): """Serialize a dask task Returns a dict of bytestrings that can each be loaded with ``loads`` Examples -------- Either returns a task as a function, args, kwargs dict >>> from operator import add >>> dumps_task((add, 1)) # doctest: +SKIP {'function': b'\x80\x04\x95\x00\x8c\t_operator\x94\x8c\x03add\x94\x93\x94.' 'args': b'\x80\x04\x95\x07\x00\x00\x00K\x01K\x02\x86\x94.'} Or as a single task blob if it can't easily decompose the result. This happens either if the task is highly nested, or if it isn't a task at all >>> dumps_task(1) # doctest: +SKIP {'task': b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.'} """ if istask(task): if task[0] is apply and not any(map(_maybe_complex, task[2:])): d = {"function": dumps_function(task[1]), "args": warn_dumps(task[2])} if len(task) == 4: d["kwargs"] = warn_dumps(task[3]) return d elif not any(map(_maybe_complex, task[1:])): return {"function": dumps_function(task[0]), "args": warn_dumps(task[1:])} return to_serialize(task) _warn_dumps_warned = [False] def warn_dumps(obj, dumps=pickle.dumps, limit=1e6): """Dump an object to bytes, warn if those bytes are large""" b = dumps(obj, protocol=4) if not _warn_dumps_warned[0] and len(b) > limit: _warn_dumps_warned[0] = True s = str(obj) if len(s) > 70: s = s[:50] + " ... " + s[-15:] warnings.warn( "Large object of size %s detected in task graph: \n" " %s\n" "Consider scattering large objects ahead of time\n" "with client.scatter to reduce scheduler burden and \n" "keep data on workers\n\n" " future = client.submit(func, big_data) # bad\n\n" " big_future = client.scatter(big_data) # good\n" " future = client.submit(func, big_future) # good" % (format_bytes(len(b)), s) ) return b def apply_function( function, args, kwargs, execution_state, key, active_threads, active_threads_lock, time_delay, ): """Run a function, collect information Returns ------- msg: dictionary with status, result/error, timings, etc.. """ ident = threading.get_ident() with active_threads_lock: active_threads[ident] = key thread_state.start_time = time() thread_state.execution_state = execution_state thread_state.key = key msg = apply_function_simple(function, args, kwargs, time_delay) with active_threads_lock: del active_threads[ident] return msg def apply_function_simple( function, args, kwargs, time_delay, ): """Run a function, collect information Returns ------- msg: dictionary with status, result/error, timings, etc.. """ ident = threading.get_ident() start = time() try: result = function(*args, **kwargs) except Exception as e: msg = error_message(e) msg["op"] = "task-erred" msg["actual-exception"] = e else: msg = { "op": "task-finished", "status": "OK", "result": result, "nbytes": sizeof(result), "type": type(result) if result is not None else None, } finally: end = time() msg["start"] = start + time_delay msg["stop"] = end + time_delay msg["thread"] = ident return msg async def apply_function_async( function, args, kwargs, time_delay, ): """Run a function, collect information Returns ------- msg: dictionary with status, result/error, timings, etc.. """ ident = threading.get_ident() start = time() try: result = await function(*args, **kwargs) except Exception as e: msg = error_message(e) msg["op"] = "task-erred" msg["actual-exception"] = e else: msg = { "op": "task-finished", "status": "OK", "result": result, "nbytes": sizeof(result), "type": type(result) if result is not None else None, } finally: end = time() msg["start"] = start + time_delay msg["stop"] = end + time_delay msg["thread"] = ident return msg def apply_function_actor( function, args, kwargs, execution_state, key, active_threads, active_threads_lock ): """Run a function, collect information Returns ------- msg: dictionary with status, result/error, timings, etc.. """ ident = threading.get_ident() with active_threads_lock: active_threads[ident] = key thread_state.execution_state = execution_state thread_state.key = key thread_state.actor = True result = function(*args, **kwargs) with active_threads_lock: del active_threads[ident] return result def get_msg_safe_str(msg): """Make a worker msg, which contains args and kwargs, safe to cast to str: allowing for some arguments to raise exceptions during conversion and ignoring them. """ class Repr: def __init__(self, f, val): self._f = f self._val = val def __repr__(self): return self._f(self._val) msg = msg.copy() if "args" in msg: msg["args"] = Repr(convert_args_to_str, msg["args"]) if "kwargs" in msg: msg["kwargs"] = Repr(convert_kwargs_to_str, msg["kwargs"]) return msg def convert_args_to_str(args, max_len: int | None = None) -> str: """Convert args to a string, allowing for some arguments to raise exceptions during conversion and ignoring them. """ length = 0 strs = ["" for i in range(len(args))] for i, arg in enumerate(args): try: sarg = repr(arg) except Exception: sarg = "< could not convert arg to str >" strs[i] = sarg length += len(sarg) + 2 if max_len is not None and length > max_len: return "({}".format(", ".join(strs[: i + 1]))[:max_len] else: return "({})".format(", ".join(strs)) def convert_kwargs_to_str(kwargs: dict, max_len: int | None = None) -> str: """Convert kwargs to a string, allowing for some arguments to raise exceptions during conversion and ignoring them. """ length = 0 strs = ["" for i in range(len(kwargs))] for i, (argname, arg) in enumerate(kwargs.items()): try: sarg = repr(arg) except Exception: sarg = "< could not convert arg to str >" skwarg = repr(argname) + ": " + sarg strs[i] = skwarg length += len(skwarg) + 2 if max_len is not None and length > max_len: return "{{{}".format(", ".join(strs[: i + 1]))[:max_len] else: return "{{{}}}".format(", ".join(strs)) async def run(server, comm, function, args=(), kwargs=None, is_coro=None, wait=True): kwargs = kwargs or {} function = pickle.loads(function) if is_coro is None: is_coro = iscoroutinefunction(function) else: warnings.warn( "The is_coro= parameter is deprecated. " "We now automatically detect coroutines/async functions" ) assert wait or is_coro, "Combination not supported" if args: args = pickle.loads(args) if kwargs: kwargs = pickle.loads(kwargs) if has_arg(function, "dask_worker"): kwargs["dask_worker"] = server if has_arg(function, "dask_scheduler"): kwargs["dask_scheduler"] = server logger.info("Run out-of-band function %r", funcname(function)) try: if not is_coro: result = function(*args, **kwargs) else: if wait: result = await function(*args, **kwargs) else: server.loop.add_callback(function, *args, **kwargs) result = None except Exception as e: logger.warning( "Run Failed\nFunction: %s\nargs: %s\nkwargs: %s\n", str(funcname(function))[:1000], convert_args_to_str(args, max_len=1000), convert_kwargs_to_str(kwargs, max_len=1000), exc_info=True, ) response = error_message(e) else: response = {"status": "OK", "result": to_serialize(result)} return response _global_workers = Worker._instances try: if nvml.device_get_count() < 1: raise RuntimeError except (Exception, RuntimeError): pass else: async def gpu_metric(worker): result = await offload(nvml.real_time) return result DEFAULT_METRICS["gpu"] = gpu_metric def gpu_startup(worker): return nvml.one_time() DEFAULT_STARTUP_INFORMATION["gpu"] = gpu_startup def print(*args, **kwargs): """Dask print function This prints both wherever this function is run, and also in the user's client session """ try: worker = get_worker() except ValueError: pass else: msg = { "args": tuple(stringify(arg) for arg in args), "kwargs": {k: stringify(v) for k, v in kwargs.items()}, } worker.log_event("print", msg) builtins.print(*args, **kwargs) def warn(*args, **kwargs): """Dask warn function This raises a warning both wherever this function is run, and also in the user's client session """ try: worker = get_worker() except ValueError: pass else: worker.log_event("warn", {"args": args, "kwargs": kwargs}) warnings.warn(*args, **kwargs)
[ [ [ 23, 34 ] ], [ [ 43, 50 ], [ 35154, 35161 ], [ 35428, 35435 ], [ 37180, 37187 ], [ 44727, 44734 ], [ 46346, 46353 ], [ 47711, 47718 ], [ 92541, 92548 ], [ 117016, 117023 ] ], [ [ 58, 64 ], [ 119813, 119819 ], [ 119936, 119942 ] ], [ [ 72, 80 ], [ 147870, 147878 ] ], [ [ 88, 106 ], [ 24139, 24149 ] ], [ [ 114, 119 ], [ 42341, 42346 ] ], [ [ 127, 132 ], [ 70002, 70007 ], [ 106619, 106624 ] ], [ [ 140, 147 ], [ 2225, 2232 ] ], [ [ 155, 157 ], [ 19264, 19266 ], [ 19285, 19287 ], [ 19355, 19357 ], [ 19789, 19791 ], [ 23650, 23652 ], [ 34285, 34287 ], [ 39148, 39150 ] ], [ [ 165, 171 ], [ 29509, 29515 ], [ 79056, 79062 ], [ 79136, 79142 ] ], [ [ 179, 182 ], [ 23882, 23885 ], [ 25261, 25264 ], [ 25283, 25286 ], [ 118473, 118476 ], [ 121563, 121566 ] ], [ [ 190, 199 ], [ 138075, 138084 ], [ 15183, 15192 ], [ 15751, 15760 ], [ 130347, 130356 ], [ 140910, 140919 ], [ 141496, 141505 ], [ 142336, 142345 ], [ 143210, 143219 ] ], [ [ 207, 215 ], [ 19051, 19059 ], [ 21551, 21559 ], [ 30669, 30677 ], [ 45026, 45034 ], [ 50028, 50036 ], [ 83579, 83587 ], [ 140067, 140075 ], [ 145741, 145749 ], [ 148226, 148234 ] ], [ [ 223, 230 ], [ 13678, 13685 ], [ 13723, 13730 ], [ 33105, 33112 ] ], [ [ 255, 266 ], [ 15060, 15071 ], [ 15116, 15127 ], [ 15866, 15877 ], [ 18523, 18534 ], [ 18633, 18644 ] ], [ [ 268, 273 ], [ 15128, 15133 ], [ 15228, 15233 ], [ 15930, 15935 ], [ 16027, 16032 ], [ 16131, 16136 ], [ 16274, 16279 ], [ 16442, 16447 ], [ 18183, 18188 ], [ 18273, 18278 ] ], [ [ 275, 285 ], [ 2786, 2796 ] ], [ [ 314, 322 ], [ 98469, 98477 ] ], [ [ 324, 332 ], [ 83920, 83928 ] ], [ [ 334, 348 ], [ 23248, 23262 ] ], [ [ 372, 380 ], [ 47579, 47587 ], [ 48340, 48348 ], [ 134830, 134838 ] ], [ [ 402, 411 ], [ 48416, 48425 ] ], [ [ 432, 443 ], [ 31549, 31560 ], [ 31993, 32004 ], [ 46389, 46400 ], [ 102479, 102490 ], [ 103042, 103053 ] ], [ [ 463, 476 ], [ 80789, 80802 ] ], [ [ 496, 509 ], [ 514, 527 ] ], [ [ 553, 559 ], [ 127651, 127657 ], [ 127853, 127859 ], [ 131134, 131140 ] ], [ [ 577, 582 ], [ 130919, 130924 ] ], [ [ 584, 590 ], [ 31253, 31259 ] ], [ [ 592, 597 ] ], [ [ 599, 604 ], [ 75247, 75252 ], [ 120312, 120317 ], [ 123038, 123043 ] ], [ [ 646, 652 ], [ 24008, 24014 ], [ 129288, 129294 ] ], [ [ 654, 670 ], [ 27454, 27470 ], [ 27556, 27572 ], [ 27999, 28015 ], [ 28549, 28565 ], [ 28683, 28699 ] ], [ [ 679, 683 ], [ 2264, 2268 ], [ 2707, 2711 ], [ 14741, 14745 ], [ 15375, 15379 ], [ 15492, 15496 ], [ 16300, 16304 ], [ 16515, 16519 ], [ 18438, 18442 ], [ 18828, 18832 ], [ 19222, 19226 ], [ 20007, 20011 ], [ 20113, 20117 ], [ 20459, 20463 ], [ 20532, 20536 ], [ 21744, 21748 ], [ 22619, 22623 ], [ 22879, 22883 ], [ 23138, 23142 ], [ 28457, 28461 ], [ 28909, 28913 ], [ 29034, 29038 ], [ 29163, 29167 ], [ 42841, 42845 ], [ 128076, 128080 ], [ 132208, 132212 ] ], [ [ 706, 712 ], [ 137820, 137826 ], [ 139259, 139265 ] ], [ [ 737, 746 ], [ 134633, 134642 ], [ 21680, 21689 ] ], [ [ 776, 781 ], [ 139295, 139300 ] ], [ [ 787, 799 ], [ 44549, 44561 ], [ 116377, 116389 ], [ 116423, 116435 ], [ 117672, 117684 ], [ 140560, 140572 ], [ 114668, 114680 ], [ 114714, 114726 ], [ 115161, 115173 ], [ 115203, 115215 ] ], [ [ 805, 813 ], [ 111691, 111699 ], [ 146257, 146265 ], [ 146703, 146711 ] ], [ [ 819, 830 ], [ 2690, 2701 ], [ 18426, 18437 ], [ 135101, 135112 ] ], [ [ 836, 851 ], [ 18913, 18928 ], [ 21935, 21950 ], [ 27831, 27846 ], [ 28428, 28443 ], [ 29311, 29326 ], [ 29419, 29434 ], [ 128148, 128163 ], [ 132276, 132291 ] ], [ [ 857, 866 ], [ 147715, 147724 ], [ 147774, 147783 ] ], [ [ 872, 880 ], [ 31260, 31268 ], [ 33908, 33916 ], [ 81281, 81289 ] ], [ [ 899, 903 ], [ 132361, 132365 ] ], [ [ 905, 915 ], [ 20188, 20198 ] ], [ [ 917, 924 ], [ 15878, 15885 ], [ 15979, 15986 ], [ 117904, 117911 ], [ 118652, 118659 ], [ 118913, 118920 ], [ 119047, 119054 ], [ 119103, 119110 ], [ 120297, 120304 ], [ 120375, 120382 ], [ 120612, 120619 ], [ 121847, 121854 ] ], [ [ 926, 932 ], [ 134767, 134773 ], [ 135018, 135024 ], [ 135208, 135214 ] ], [ [ 934, 939 ], [ 24195, 24200 ], [ 48529, 48534 ] ], [ [ 961, 972 ], [ 25010, 25021 ], [ 50335, 50346 ] ], [ [ 991, 998 ], [ 32970, 32977 ], [ 50502, 50509 ] ], [ [ 1000, 1016 ], [ 42124, 42140 ], [ 43479, 43495 ], [ 51210, 51226 ], [ 51249, 51265 ], [ 78882, 78898 ], [ 78964, 78980 ] ], [ [ 1046, 1068 ], [ 41678, 41700 ] ], [ [ 1070, 1083 ], [ 21164, 21177 ] ], [ [ 1108, 1125 ], [ 105323, 105340 ] ], [ [ 1150, 1165 ], [ 37598, 37613 ], [ 73902, 73917 ], [ 101256, 101271 ] ], [ [ 1171, 1177 ], [ 34984, 34990 ], [ 37078, 37084 ], [ 38422, 38428 ], [ 41293, 41299 ], [ 41320, 41326 ], [ 41348, 41354 ], [ 41435, 41441 ], [ 45309, 45315 ], [ 45324, 45330 ], [ 45714, 45720 ], [ 45730, 45736 ], [ 45866, 45872 ], [ 46780, 46786 ], [ 48980, 48986 ], [ 49379, 49385 ], [ 49395, 49401 ], [ 49484, 49490 ], [ 49681, 49687 ], [ 50147, 50153 ], [ 53905, 53911 ], [ 73792, 73798 ], [ 84713, 84719 ], [ 107443, 107449 ], [ 107459, 107465 ], [ 107474, 107480 ], [ 125548, 125554 ], [ 130969, 130975 ] ], [ [ 1183, 1200 ], [ 20635, 20652 ], [ 20710, 20727 ], [ 39961, 39978 ] ], [ [ 1206, 1219 ], [ 73309, 73322 ], [ 91555, 91568 ], [ 92905, 92918 ], [ 102611, 102624 ], [ 103166, 103179 ], [ 112342, 112355 ], [ 141630, 141643 ], [ 142476, 142489 ], [ 146898, 146911 ] ], [ [ 1225, 1233 ], [ 26076, 26084 ] ], [ [ 1239, 1248 ], [ 135974, 135983 ] ], [ [ 1277, 1281 ], [ 147064, 147068 ], [ 24326, 24330 ], [ 40977, 40981 ], [ 147235, 147239 ], [ 147359, 147363 ] ], [ [ 1314, 1330 ], [ 102086, 102102 ] ], [ [ 1354, 1363 ], [ 19779, 19788 ] ], [ [ 1382, 1394 ], [ 42782, 42794 ] ], [ [ 1416, 1420 ], [ 32746, 32750 ], [ 32934, 32938 ], [ 33980, 33984 ], [ 34786, 34790 ], [ 36409, 36413 ], [ 36862, 36866 ], [ 50978, 50982 ], [ 52555, 52559 ], [ 52684, 52688 ], [ 53166, 53170 ], [ 82249, 82253 ], [ 82345, 82349 ], [ 85667, 85671 ], [ 85841, 85845 ], [ 91736, 91740 ], [ 105232, 105236 ], [ 105528, 105532 ], [ 108603, 108607 ], [ 112773, 112777 ], [ 113233, 113237 ], [ 115732, 115736 ], [ 116968, 116972 ], [ 117061, 117065 ], [ 117817, 117821 ], [ 118354, 118358 ], [ 119240, 119244 ], [ 119467, 119471 ], [ 120783, 120787 ], [ 133831, 133835 ], [ 141028, 141032 ], [ 141530, 141534 ], [ 141976, 141980 ], [ 142370, 142374 ], [ 142822, 142826 ] ], [ [ 1439, 1449 ], [ 6242, 6252 ] ], [ [ 1473, 1485 ], [ 28350, 28362 ], [ 44664, 44676 ], [ 46111, 46123 ], [ 49041, 49053 ] ], [ [ 1508, 1514 ], [ 139759, 139765 ], [ 80965, 80971 ], [ 102012, 102018 ], [ 136931, 136937 ], [ 137038, 137044 ], [ 137339, 137345 ], [ 137420, 137426 ], [ 138293, 138299 ], [ 138492, 138498 ], [ 145628, 145634 ], [ 145972, 145978 ], [ 146023, 146029 ] ], [ [ 1516, 1528 ], [ 52421, 52433 ], [ 104297, 104309 ], [ 104403, 104415 ], [ 104610, 104622 ], [ 104715, 104727 ], [ 139681, 139693 ], [ 146971, 146983 ] ], [ [ 1549, 1570 ], [ 2590, 2611 ] ], [ [ 1593, 1601 ], [ 22136, 22144 ], [ 22193, 22201 ], [ 22245, 22253 ] ], [ [ 1622, 1643 ], [ 54816, 54822 ], [ 80616, 80622 ], [ 82575, 82581 ], [ 105302, 105308 ], [ 141857, 141863 ], [ 142703, 142709 ] ], [ [ 1676, 1694 ], [ 24241, 24259 ], [ 24391, 24409 ], [ 24872, 24890 ], [ 48662, 48680 ] ], [ [ 1727, 1747 ], [ 133755, 133765 ] ], [ [ 1773, 1776 ], [ 136687, 136690 ], [ 138043, 138046 ] ], [ [ 1782, 1794 ], [ 35192, 35204 ], [ 47606, 47618 ], [ 48349, 48361 ] ], [ [ 1800, 1814 ], [ 139317, 139331 ], [ 139555, 139569 ] ], [ [ 1820, 1826 ], [ 42096, 42102 ] ], [ [ 1832, 1839 ], [ 146051, 146058 ], [ 146131, 146138 ] ], [ [ 1845, 1856 ], [ 39592, 39603 ] ], [ [ 1862, 1881 ], [ 103674, 103693 ], [ 108629, 108648 ], [ 145693, 145712 ] ], [ [ 1887, 1903 ], [ 20344, 20360 ] ], [ [ 1909, 1918 ], [ 103613, 103622 ], [ 118809, 118818 ] ], [ [ 1924, 1934 ], [ 45264, 45274 ], [ 84761, 84771 ], [ 96353, 96363 ], [ 101922, 101932 ], [ 102767, 102777 ] ], [ [ 1940, 1947 ], [ 39521, 39528 ], [ 105389, 105396 ], [ 147227, 147234 ] ], [ [ 1953, 1964 ], [ 41593, 41604 ] ], [ [ 1970, 1985 ], [ 22037, 22052 ] ], [ [ 1991, 2003 ], [ 41535, 41547 ], [ 130821, 130833 ], [ 133840, 133852 ], [ 133961, 133973 ], [ 141002, 141014 ], [ 141039, 141051 ], [ 141090, 141102 ], [ 143304, 143316 ], [ 143355, 143367 ], [ 143382, 143394 ] ], [ [ 2009, 2025 ], [ 19420, 19436 ] ], [ [ 2053, 2072 ], [ 40138, 40157 ] ], [ [ 2074, 2083 ], [ 113106, 113115 ], [ 113170, 113179 ] ], [ [ 2085, 2100 ], [ 36445, 36460 ], [ 57392, 57407 ], [ 94012, 94027 ], [ 96396, 96411 ], [ 136590, 136605 ] ], [ [ 2125, 2136 ], [ 28314, 28325 ] ], [ [ 2138, 2158 ], [ 45450, 45470 ] ], [ [ 2160, 2179 ], [ 41505, 41524 ] ], [ [ 2202, 2214 ], [ 34331, 34343 ] ], [ [ 2216, 2222 ], [ 18996, 19002 ], [ 28333, 28339 ], [ 32850, 32856 ], [ 34726, 34732 ], [ 35065, 35071 ], [ 35222, 35228 ], [ 35645, 35651 ], [ 35724, 35730 ], [ 36219, 36225 ], [ 36332, 36338 ], [ 37627, 37633 ], [ 38318, 38324 ], [ 38454, 38460 ], [ 38969, 38975 ], [ 39710, 39716 ], [ 40310, 40316 ], [ 43149, 43155 ], [ 44037, 44043 ], [ 44102, 44108 ], [ 44224, 44230 ], [ 44301, 44307 ], [ 44376, 44382 ], [ 44406, 44412 ], [ 44506, 44512 ], [ 44590, 44596 ], [ 45507, 45513 ], [ 45648, 45654 ], [ 45774, 45780 ], [ 49600, 49606 ], [ 51671, 51677 ], [ 52925, 52931 ], [ 56951, 56957 ], [ 58971, 58977 ], [ 63549, 63555 ], [ 64499, 64505 ], [ 65150, 65156 ], [ 66038, 66044 ], [ 66953, 66959 ], [ 67373, 67379 ], [ 67897, 67903 ], [ 68365, 68371 ], [ 69185, 69191 ], [ 70095, 70101 ], [ 70695, 70701 ], [ 71389, 71395 ], [ 73225, 73231 ], [ 73979, 73985 ], [ 74049, 74055 ], [ 74722, 74728 ], [ 76015, 76021 ], [ 77764, 77770 ], [ 80135, 80141 ], [ 81794, 81800 ], [ 85455, 85461 ], [ 88525, 88531 ], [ 88953, 88959 ], [ 91306, 91312 ], [ 91916, 91922 ], [ 93811, 93817 ], [ 94392, 94398 ], [ 95110, 95116 ], [ 95460, 95466 ], [ 95752, 95758 ], [ 96829, 96835 ], [ 97375, 97381 ], [ 99097, 99103 ], [ 99543, 99549 ], [ 101333, 101339 ], [ 102291, 102297 ], [ 102802, 102808 ], [ 105785, 105791 ], [ 107259, 107265 ], [ 107707, 107713 ], [ 110144, 110150 ], [ 111459, 111465 ], [ 111969, 111975 ], [ 112215, 112221 ], [ 115574, 115580 ], [ 115959, 115965 ], [ 117556, 117562 ], [ 122225, 122231 ], [ 125369, 125375 ], [ 127357, 127363 ], [ 146214, 146220 ], [ 146601, 146607 ], [ 114438, 114444 ], [ 114947, 114953 ] ], [ [ 2254, 2261 ], [ 63584, 63591 ], [ 64534, 64541 ], [ 65185, 65192 ], [ 66073, 66080 ], [ 66988, 66995 ], [ 67408, 67415 ], [ 68400, 68407 ], [ 69220, 69227 ], [ 70130, 70137 ], [ 70730, 70737 ], [ 71424, 71431 ], [ 74084, 74091 ], [ 74757, 74764 ], [ 80170, 80177 ], [ 89016, 89023 ], [ 97410, 97417 ], [ 101368, 101375 ], [ 107294, 107301 ], [ 125404, 125411 ], [ 127392, 127399 ] ], [ [ 2313, 2321 ], [ 58439, 58447 ], [ 71837, 71845 ], [ 72356, 72364 ], [ 137128, 137136 ], [ 71872, 71880 ], [ 73063, 73071 ], [ 137461, 137469 ] ], [ [ 2349, 2356 ], [ 59209, 59216 ] ], [ [ 2409, 2416 ] ], [ [ 2455, 2465 ], [ 100638, 100648 ] ], [ [ 2533, 2538 ], [ 71036, 71041 ], [ 107156, 107161 ] ], [ [ 2568, 2586 ], [ 28212, 28230 ] ], [ [ 2614, 2629 ], [ 14414, 14429 ], [ 147278, 147293 ] ], [ [ 2636, 2663 ], [ 14459, 14486 ], [ 147380, 147407 ] ], [ [ 2670, 2687 ], [ 6209, 6226 ] ], [ [ 2769, 2783 ], [ 58682, 58696 ], [ 59901, 59915 ], [ 105152, 105166 ] ], [ [ 2863, 2872 ], [ 54325, 54334 ], [ 59853, 59862 ], [ 61023, 61032 ], [ 75389, 75398 ], [ 83977, 83986 ], [ 98494, 98503 ], [ 98676, 98685 ] ], [ [ 6235, 6241 ], [ 147033, 147039 ], [ 29636, 29642 ], [ 46713, 46719 ], [ 46843, 46849 ], [ 129654, 129660 ], [ 130392, 130398 ], [ 130936, 130942 ] ], [ [ 130376, 130386 ], [ 132417, 132427 ], [ 133738, 133748 ], [ 147614, 147624 ], [ 148095, 148105 ] ], [ [ 131070, 131080 ] ], [ [ 133027, 133033 ] ], [ [ 134029, 134039 ], [ 110969, 110979 ] ], [ [ 134578, 134596 ], [ 22359, 22377 ] ], [ [ 135231, 136650 ], [ 85707, 85727 ] ], [ [ 136653, 136664 ] ], [ [ 136673, 136684 ], [ 39634, 39645 ], [ 136859, 136870 ], [ 136970, 136981 ] ], [ [ 136710, 136724 ], [ 137258, 137272 ] ], [ [ 137071, 137083 ], [ 105397, 105409 ], [ 105483, 105495 ] ], [ [ 137635, 137647 ], [ 137546, 137558 ], [ 137898, 137910 ], [ 137976, 137988 ] ], [ [ 138029, 138040 ], [ 138237, 138248 ], [ 138403, 138414 ] ], [ [ 138061, 138072 ], [ 138203, 138214 ], [ 138374, 138385 ] ], [ [ 138098, 138112 ], [ 80750, 80764 ], [ 139374, 139388 ], [ 139615, 139629 ] ], [ [ 138547, 138557 ] ], [ [ 139702, 139720 ], [ 139892, 139910 ], [ 139942, 139960 ] ], [ [ 139737, 139747 ], [ 139407, 139417 ], [ 139489, 139499 ], [ 139648, 139658 ] ], [ [ 140614, 140628 ], [ 109055, 109069 ] ], [ [ 141268, 141289 ], [ 109545, 109566 ], [ 141124, 141145 ] ], [ [ 142099, 142942 ], [ 108695, 108715 ] ], [ [ 142949, 142969 ], [ 103911, 103931 ] ], [ [ 143538, 143554 ] ], [ [ 144130, 144149 ], [ 111739, 111758 ], [ 143984, 144003 ], [ 146743, 146762 ] ], [ [ 144774, 144795 ], [ 111801, 111822 ], [ 144071, 144092 ], [ 146796, 146817 ] ], [ [ 145501, 147012 ], [ 101603, 101606 ], [ 101769, 101772 ] ], [ [ 147015, 147030 ] ], [ [ 147174, 147272 ], [ 147303, 147313 ] ], [ [ 147323, 147334 ], [ 147417, 147428 ] ], [ [ 147435, 147440 ] ], [ [ 147908, 147912 ] ] ]
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Generated code. DO NOT EDIT! # # Snippet for CreateUtilizationReport # NOTE: This snippet has been automatically generated for illustrative purposes only. # It may require modifications to work in your environment. # To install the latest published package dependency, execute the following: # python3 -m pip install google-cloud-vm-migration # [START vmmigration_v1_generated_VmMigration_CreateUtilizationReport_async] from google.cloud import vmmigration_v1 async def sample_create_utilization_report(): # Create a client client = vmmigration_v1.VmMigrationAsyncClient() # Initialize request argument(s) request = vmmigration_v1.CreateUtilizationReportRequest( parent="parent_value", utilization_report_id="utilization_report_id_value", ) # Make the request operation = client.create_utilization_report(request=request) print("Waiting for operation to complete...") response = await operation.result() # Handle the response print(response) # [END vmmigration_v1_generated_VmMigration_CreateUtilizationReport_async]
[ [ [ 1052, 1066 ], [ 1150, 1164 ], [ 1242, 1256 ] ], [ [ 1069, 1615 ] ] ]
"""Class to hold the tracks and cameras of a 3D scene. This can be the output of either data association or of bundle adjustment. Authors: Ayush Baid, John Lambert, Xiaolong Wu """ import itertools from typing import Any, Dict, List, Optional, Tuple import numpy as np from gtsam import PinholeCameraCal3Bundler, Pose3, SfmTrack import gtsfm.utils.graph as graph_utils import gtsfm.utils.logger as logger_utils import gtsfm.utils.reprojection as reproj_utils logger = logger_utils.get_logger() EQUALITY_TOLERANCE = 1e-5 PRINT_NUM_SIG_FIGS = 2 class GtsfmData: """Class containing cameras and tracks, essentially describing the complete 3D scene. This class is needed over GTSAM's SfmData type because GTSAM's type does not allow for non-contiguous cameras. The situation of non-contiguous cameras can exists because of failures in front-end. """ def __init__(self, number_images: int) -> None: """Initializes the class. Args: number_images: number of images/cameras in the scene. """ self._cameras: Dict[int, PinholeCameraCal3Bundler] = {} self._tracks: List[SfmTrack] = [] self._number_images = number_images def __eq__(self, other: object) -> bool: """Checks equality with the other object.""" if not isinstance(other, GtsfmData): return False if self._number_images != other.number_images(): return False for i, cam in self._cameras.items(): other_cam = other.get_camera(i) if not cam.equals(other_cam, EQUALITY_TOLERANCE): return False for j in range(self.number_tracks()): track = self.get_track(j) other_track = other.get_track(j) if track.number_measurements() != other_track.number_measurements(): return False for k in range(track.number_measurements()): i, uv = track.measurement(k) other_i, other_uv = other_track.measurement(k) if i != other_i: return False if not np.allclose(uv, other_uv): return False return True def number_images(self) -> int: """Getter for the number of images. Returns: Number of images. """ return self._number_images def number_tracks(self) -> int: """Getter for the number of tracks. Returns: Number of tracks. """ return len(self._tracks) def get_valid_camera_indices(self) -> List[int]: """Getter for image indices where there is a valid (not None) camera. Returns: List of indices with a valid camera. """ return list(self._cameras.keys()) def get_camera(self, index: int) -> Optional[PinholeCameraCal3Bundler]: """Getter for camera. Args: index: the image index to fetch the camera for. Returns: The camera if it is a valid one, None otherwise. """ return self._cameras.get(index) def get_camera_poses(self) -> List[Optional[Pose3]]: """Getter for camera poses wTi. This function returns the pose for all cameras (equal to number_images in GtsfmData), even if they were not computed by the pipeline. Returns: camera poses as a list, each representing wTi """ cameras = [self.get_camera(i) for i in range(self.number_images())] poses = [camera.pose() if camera is not None else None for camera in cameras] return poses def get_track(self, index: int) -> SfmTrack: """Getter for the track. Args: index: track index to fetch. Returns: Requested track. """ return self._tracks[index] def add_track(self, track: SfmTrack) -> bool: """Add a track, after checking if all the cameras in the track are already added. Args: track: track to add. Returns: Flag indicating the success of adding operation. """ # check if all cameras are already added for j in range(track.number_measurements()): i, _ = track.measurement(j) if i not in self._cameras: return False self._tracks.append(track) return True def add_camera(self, index: int, camera: PinholeCameraCal3Bundler) -> None: """Adds a camera. Args: index: the index associated with this camera. camera: camera object to it. Raises: ValueError: if the camera to be added is not a valid camera object. """ if camera is None: raise ValueError("Camera cannot be None, should be a valid camera") self._cameras[index] = camera def get_track_length_statistics(self) -> Tuple[float, float]: """Compute mean and median lengths of all the tracks. Returns: Mean track length. Median track length. """ if self.number_tracks() == 0: return 0, 0 track_lengths = self.get_track_lengths() return np.mean(track_lengths), np.median(track_lengths) def get_track_lengths(self) -> np.ndarray: """Get an array containing the lengths of all tracks. Returns: Array containing all track lengths. """ if self.number_tracks() == 0: return np.array([], dtype=np.uint32) track_lengths = [self.get_track(j).number_measurements() for j in range(self.number_tracks())] return np.array(track_lengths, dtype=np.uint32) def select_largest_connected_component(self) -> "GtsfmData": """Selects the subset of data belonging to the largest connected component of the graph where the edges are between cameras which feature in the same track. Returns: New GtSfmData object with the subset of tracks and cameras. """ camera_edges = [] for sfm_track in self._tracks: cameras_in_use = [] for m_idx in range(sfm_track.number_measurements()): i, _ = sfm_track.measurement(m_idx) cameras_in_use.append(i) # Recreate track connectivity from track information # For example: a track has cameras [0, 2, 5]. In that case we will add pairs (0, 2), (0, 5), (2, 5) camera_edges += list(itertools.combinations(cameras_in_use, 2)) if len(camera_edges) == 0: return GtsfmData(self._number_images) cameras_in_largest_cc = graph_utils.get_nodes_in_largest_connected_component(camera_edges) logger.info( "Largest connected component contains {} of {} cameras returned by front-end (of {} total imgs)".format( len(cameras_in_largest_cc), len(self.get_valid_camera_indices()), self._number_images ) ) return GtsfmData.from_selected_cameras(self, cameras_in_largest_cc) @classmethod def from_selected_cameras(cls, gtsfm_data: "GtsfmData", camera_indices: List[int]) -> "GtsfmData": """Selects the cameras in the input list and the tracks associated with those cameras. Args: gtsfm_data: data to pick the cameras from. camera_indices: camera indices to select and keep in the new data. Returns: New object with the selected cameras and associated tracks. """ new_data = cls(gtsfm_data.number_images()) for i in gtsfm_data.get_valid_camera_indices(): if i in camera_indices: new_data.add_camera(i, gtsfm_data.get_camera(i)) new_camera_indices = new_data.get_valid_camera_indices() # add tracks which have all the camera present in new data for j in range(gtsfm_data.number_tracks()): track = gtsfm_data.get_track(j) is_valid = True for k in range(track.number_measurements()): i, _ = track.measurement(k) if i not in new_camera_indices: is_valid = False break if is_valid: new_data.add_track(track) return new_data def get_scene_reprojection_errors(self) -> np.ndarray: """Get the scene reprojection errors for all 3D points and all associated measurements. Returns: Reprojection errors as a 1D numpy array. """ scene_reproj_errors: List[float] = [] for track in self._tracks: track_errors, _ = reproj_utils.compute_track_reprojection_errors(self._cameras, track) scene_reproj_errors.extend(track_errors) return np.array(scene_reproj_errors) def aggregate_metrics(self) -> Dict[str, Any]: """Aggregate metrics about the reprojection errors and 3d track lengths (summary stats). Args: ba_data: bundle adjustment result Returns: dictionary containing metrics of bundle adjustment result """ track_lengths_3d = self.get_track_lengths() scene_reproj_errors = self.get_scene_reprojection_errors() convert_to_rounded_float = lambda x: float(np.round(x, 3)) stats_dict = {} stats_dict["number_tracks"] = self.number_tracks() stats_dict["3d_track_lengths"] = { "min": convert_to_rounded_float(track_lengths_3d.min()), "mean": convert_to_rounded_float(np.mean(track_lengths_3d)), "median": convert_to_rounded_float(np.median(track_lengths_3d)), "max": convert_to_rounded_float(track_lengths_3d.max()), } stats_dict["reprojection_errors"] = { "min": convert_to_rounded_float(np.min(scene_reproj_errors)), "mean": convert_to_rounded_float(np.mean(scene_reproj_errors)), "median": convert_to_rounded_float(np.median(scene_reproj_errors)), "max": convert_to_rounded_float(np.max(scene_reproj_errors)), } return stats_dict def get_avg_scene_reprojection_error(self) -> float: """Get average reprojection error for all 3d points in the entire scene Returns: Average of reprojection errors for every 3d point to its 2d measurements """ scene_reproj_errors = self.get_scene_reprojection_errors() scene_avg_reproj_error = np.mean(scene_reproj_errors) return scene_avg_reproj_error def log_scene_reprojection_error_stats(self) -> None: """Logs reprojection error stats for all 3d points in the entire scene.""" scene_reproj_errors = self.get_scene_reprojection_errors() logger.info("Min scene reproj error: %.3f", np.min(scene_reproj_errors)) logger.info("Avg scene reproj error: %.3f", np.mean(scene_reproj_errors)) logger.info("Median scene reproj error: %.3f", np.median(scene_reproj_errors)) logger.info("Max scene reproj error: %.3f", np.max(scene_reproj_errors)) def __validate_track(self, track: SfmTrack, reproj_err_thresh: float) -> bool: """Validates a track based on reprojection errors and cheirality checks. Args: track: track with 3D landmark and measurements. reproj_err_thresh: reprojection err threshold for each measurement. Returns: validity of the track. """ errors, avg_reproj_error = reproj_utils.compute_track_reprojection_errors(self._cameras, track) # track is valid as all measurements have error below the threshold cheirality_success = np.all(~np.isnan(errors)) return np.all(errors < reproj_err_thresh) and cheirality_success def filter_landmarks(self, reproj_err_thresh: float = 5) -> "GtsfmData": """Filters out landmarks with high reprojection error Args: reproj_err_thresh: reprojection err threshold for each measurement. """ # TODO: move this function to utils or GTSAM filtered_data = GtsfmData(self.number_images()) # add all the cameras for i in self.get_valid_camera_indices(): filtered_data.add_camera(i, self.get_camera(i)) for j in range(self.number_tracks()): track = self.get_track(j) if self.__validate_track(track, reproj_err_thresh): filtered_data.add_track(track) return filtered_data
[ [ [ 189, 198 ], [ 6545, 6554 ] ], [ [ 218, 221 ], [ 8917, 8920 ] ], [ [ 223, 227 ], [ 1073, 1077 ], [ 8907, 8911 ] ], [ [ 229, 233 ], [ 1136, 1140 ], [ 2605, 2609 ], [ 3163, 3167 ], [ 7208, 7212 ], [ 8620, 8624 ] ], [ [ 235, 243 ], [ 2856, 2864 ], [ 3168, 3176 ] ], [ [ 245, 250 ], [ 4950, 4955 ] ], [ [ 259, 270 ], [ 2131, 2133 ], [ 5254, 5256 ], [ 5278, 5280 ], [ 5339, 5341 ], [ 5548, 5550 ], [ 5567, 5569 ], [ 5697, 5699 ], [ 5727, 5729 ], [ 8400, 8402 ], [ 8840, 8842 ], [ 9609, 9611 ], [ 9684, 9686 ], [ 9883, 9885 ], [ 9958, 9960 ], [ 10036, 10038 ], [ 10113, 10115 ], [ 10532, 10534 ], [ 10860, 10862 ], [ 10941, 10943 ], [ 11026, 11028 ], [ 11110, 11112 ], [ 11733, 11735 ], [ 11741, 11743 ], [ 11774, 11776 ], [ 9352, 9354 ] ], [ [ 289, 313 ], [ 1083, 1107 ], [ 2865, 2889 ], [ 4475, 4499 ] ], [ [ 315, 320 ], [ 3177, 3182 ] ], [ [ 322, 330 ], [ 1141, 1149 ], [ 3689, 3697 ], [ 3914, 3922 ], [ 11178, 11186 ] ], [ [ 339, 371 ], [ 6707, 6718 ] ], [ [ 379, 413 ], [ 472, 484 ] ], [ [ 421, 461 ], [ 8702, 8714 ], [ 11559, 11571 ] ], [ [ 463, 469 ], [ 6782, 6788 ], [ 10816, 10822 ], [ 10897, 10903 ], [ 10979, 10985 ], [ 11066, 11072 ] ], [ [ 499, 517 ], [ 1584, 1602 ] ], [ [ 525, 543 ] ], [ [ 556, 565 ], [ 1333, 1342 ], [ 6643, 6652 ], [ 7053, 7062 ], [ 12156, 12165 ] ] ]
'''n = 99 p = 'garrafas' while n > 0: if n == 1: p = 'garrafa' print(f'{n} {p} de cerveja no muro!') print(f'{n} {p} no muro!') print('Se uma garrafa cair no chão') print('Quantas restarão?') n -= 1 print('Fim da canção!')''' p = 'garrafas' for c in range(99, 0, -1): if c == 1: p = 'garrafa' print(f'{c} {p} de cerveja no muro!') print(f'{c} {p} no muro!') print('Se uma garrafa cair no chão') print('Quantas restarão?') print('Fim da canção!')
[ [ [ 262, 263 ], [ 359, 360 ], [ 401, 402 ] ], [ [ 282, 283 ], [ 312, 313 ], [ 355, 356 ], [ 397, 398 ] ], [ [ 328, 329 ], [ 359, 360 ], [ 401, 402 ] ] ]
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2018-02-17 21:34 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('wagtailcommerce_carts', '0004_cart_coupon'), ('wagtailcommerce_orders', '0014_auto_20180130_1050'), ] operations = [ migrations.AddField( model_name='order', name='cart', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='wagtailcommerce_carts.Cart', verbose_name='cart'), ), ]
[ [ [ 96, 112 ] ], [ [ 136, 146 ], [ 206, 216 ], [ 403, 413 ] ], [ [ 148, 154 ], [ 499, 505 ] ], [ [ 162, 187 ], [ 550, 556 ] ], [ [ 196, 205 ] ] ]
import os import numpy as np from pyhlm.model import WeakLimitHDPHLM, WeakLimitHDPHLMPython from pyhlm.internals.hlm_states import WeakLimitHDPHLMStates from pyhlm.word_model import LetterHSMM, LetterHSMMPython import pyhsmm import warnings from tqdm import trange warnings.filterwarnings('ignore') import time #%% def load_datas(dataset_dir): data = [] names = np.loadtxt(dataset_dir + "files.txt", dtype=str) files = names for name in names: mfcc = np.loadtxt(dataset_dir + "DATA/" + name + ".txt") delta = np.loadtxt(dataset_dir + "DATA/" + name + "_d.txt") delta_delta = np.loadtxt(dataset_dir + "DATA/" + name + "_dd.txt") data.append(np.hstack((mfcc, np.hstack((delta,delta_delta))))) return data def unpack_durations(dur): unpacked = np.zeros(dur.sum()) d = np.cumsum(dur[:-1]) unpacked[d-1] = 1.0 return unpacked def save_stateseq(model, dataset_dir): # Save sampled states sequences. names = np.loadtxt(dataset_dir + "files.txt", dtype=str) for i, s in enumerate(model.states_list): with open("results/" + names[i] + "_s.txt", "a") as f: np.savetxt(f, s.stateseq) with open("results/" + names[i] + "_l.txt", "a") as f: np.savetxt(f, s.letter_stateseq) with open("results/" + names[i] + "_d.txt", "a") as f: np.savetxt(f, unpack_durations(s.durations_censored)) def save_params(itr_idx, model): with open("parameters/ITR_{0:04d}.txt".format(itr_idx), "w") as f: f.write(str(model.params)) def save_loglikelihood(model): with open("summary_files/log_likelihood.txt", "a") as f: f.write(str(model.log_likelihood()) + "\n") def save_resample_times(resample_time): with open("summary_files/resample_times.txt", "a") as f: f.write(str(resample_time) + "\n") #%% if not os.path.exists('results'): os.mkdir('results') if not os.path.exists('parameters'): os.mkdir('parameters') if not os.path.exists('summary_files'): os.mkdir('summary_files') #%% dataset_dir = "murakami_dataset/" #%% thread_num = 64 pre_train_iter = 1 train_iter = 100 trunc = 120 obs_dim = 9 letter_upper = 50 word_upper = 50 model_hypparams = {'num_states': word_upper, 'alpha': 10, 'gamma': 10, 'init_state_concentration': 10} obs_hypparams = { 'mu_0':np.zeros(obs_dim), 'sigma_0':np.identity(obs_dim), 'kappa_0':0.01, 'nu_0':obs_dim+2 } dur_hypparams = { 'alpha_0':200, 'beta_0':10 } #%% letter_obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(letter_upper)] letter_dur_distns = [pyhsmm.distributions.PoissonDuration(**dur_hypparams) for state in range(letter_upper)] dur_distns = [pyhsmm.distributions.PoissonDuration(lmbda=20) for state in range(word_upper)] length_distn = pyhsmm.distributions.PoissonDuration(alpha_0=30, beta_0=10, lmbda=3) #%% letter_hsmm = LetterHSMM(alpha=10, gamma=10, init_state_concentration=10, obs_distns=letter_obs_distns, dur_distns=letter_dur_distns) model = WeakLimitHDPHLM(model_hypparams, letter_hsmm, dur_distns, length_distn) #%% files = np.loadtxt(dataset_dir + "files.txt", dtype=str) datas = load_datas(dataset_dir) #%% Pre training. for d in datas: letter_hsmm.add_data(d, trunc=trunc) for t in trange(pre_train_iter): letter_hsmm.resample_model(num_procs=1) letter_hsmm.states_list = [] #%% print("Add datas...") for d in datas: model.add_data(d, trunc=trunc, generate=False) model.resample_states(num_procs=thread_num) # # or # for d in datas: # model.add_data(d, trunc=trunc, initialize_from_prior=False) print("Done!") #%% Save init params and pyper params with open("parameters/hypparams.txt", "w") as f: f.write(str(model.hypparams)) save_params(0, model) save_loglikelihood(model) #%% for t in trange(train_iter): st = time.time() model.resample_model(num_procs=thread_num) resample_model_time = time.time() - st save_stateseq(model, dataset_dir) save_loglikelihood(model) save_params(t+1, model) save_resample_times(resample_model_time) print(model.word_list) print(model.word_counts()) print("log_likelihood:{}".format(model.log_likelihood())) print("resample_model:{}".format(resample_model_time))
[ [ [ 7, 9 ], [ 1856, 1858 ], [ 1887, 1889 ], [ 1915, 1917 ], [ 1949, 1951 ], [ 1980, 1982 ], [ 2017, 2019 ] ], [ [ 17, 28 ], [ 2329, 2331 ], [ 2362, 2364 ], [ 3107, 3109 ], [ 371, 373 ], [ 476, 478 ], [ 542, 544 ], [ 616, 618 ], [ 689, 691 ], [ 706, 708 ], [ 799, 801 ], [ 827, 829 ], [ 980, 982 ], [ 1150, 1152 ], [ 1251, 1253 ], [ 1359, 1361 ] ], [ [ 53, 68 ], [ 3022, 3037 ] ], [ [ 70, 91 ] ], [ [ 131, 152 ] ], [ [ 182, 192 ], [ 2894, 2904 ] ], [ [ 194, 210 ] ], [ [ 218, 224 ], [ 2508, 2514 ], [ 2610, 2616 ], [ 2712, 2718 ], [ 2806, 2812 ] ], [ [ 232, 240 ], [ 265, 273 ] ], [ [ 258, 264 ], [ 3273, 3279 ], [ 3798, 3804 ] ], [ [ 306, 310 ], [ 3827, 3831 ], [ 3912, 3916 ] ], [ [ 320, 330 ], [ 3164, 3174 ] ], [ [ 761, 777 ], [ 1373, 1389 ] ], [ [ 896, 909 ], [ 3933, 3946 ] ], [ [ 1418, 1429 ], [ 3736, 3747 ], [ 4001, 4012 ] ], [ [ 1558, 1576 ], [ 3758, 3776 ], [ 3971, 3989 ] ], [ [ 1703, 1722 ], [ 4029, 4048 ] ], [ [ 2048, 2059 ], [ 3118, 3129 ], [ 3175, 3186 ], [ 3954, 3965 ] ], [ [ 2087, 2097 ], [ 3496, 3506 ], [ 3874, 3884 ] ], [ [ 2103, 2117 ], [ 3280, 3294 ] ], [ [ 2122, 2132 ], [ 3805, 3815 ] ], [ [ 2139, 2144 ], [ 3257, 3262 ], [ 3441, 3446 ] ], [ [ 2151, 2158 ], [ 2338, 2345 ], [ 2374, 2381 ], [ 2415, 2422 ] ], [ [ 2163, 2175 ], [ 2574, 2586 ], [ 2683, 2695 ] ], [ [ 2181, 2191 ], [ 2230, 2240 ], [ 2778, 2788 ] ], [ [ 2197, 2212 ], [ 3038, 3053 ] ], [ [ 2300, 2313 ], [ 2540, 2553 ] ], [ [ 2427, 2440 ], [ 2649, 2662 ] ], [ [ 2487, 2504 ], [ 2965, 2982 ] ], [ [ 2589, 2606 ], [ 2995, 3012 ] ], [ [ 2698, 2708 ], [ 3068, 3078 ] ], [ [ 2791, 2803 ], [ 3080, 3092 ] ], [ [ 2880, 2891 ], [ 3055, 3066 ], [ 3227, 3238 ], [ 3301, 3312 ], [ 3341, 3352 ] ], [ [ 3014, 3019 ], [ 3417, 3422 ], [ 3464, 3469 ], [ 3718, 3723 ], [ 3751, 3756 ], [ 3777, 3782 ], [ 3843, 3848 ], [ 3947, 3952 ], [ 3990, 3995 ], [ 4018, 4023 ], [ 4080, 4085 ], [ 4107, 4112 ], [ 4165, 4170 ] ], [ [ 3099, 3104 ] ], [ [ 3156, 3161 ], [ 3216, 3221 ], [ 3406, 3411 ] ], [ [ 3211, 3212 ], [ 3248, 3249 ] ], [ [ 3268, 3269 ] ], [ [ 3401, 3402 ], [ 3432, 3433 ] ], [ [ 3699, 3700 ], [ 3706, 3707 ] ], [ [ 3793, 3794 ], [ 4013, 4014 ] ], [ [ 3822, 3824 ], [ 3926, 3928 ] ], [ [ 3890, 3909 ], [ 4049, 4068 ], [ 4227, 4246 ] ] ]
# # MIT License # # Copyright 2017 Launchpad project contributors (see COPYRIGHT.md) # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # """ This is a client implementation. This module is appended to zip file with python interpreter and sent together with bootloader executable to be executed on remote machine. """
[]
from django.shortcuts import render from django.views.generic import View from django.core.exceptions import ObjectDoesNotExist from django.contrib import messages from cart.models import Order class Dashboard(View): def get(self,*args,**kwargs): order_qs = Order.objects.filter(user=self.request.user,orderd=True) context = { 'orders':order_qs, } return render(self.request,'dashboard/index.html',context=context)
[ [ [ 29, 35 ], [ 404, 410 ] ], [ [ 69, 73 ], [ 211, 215 ] ], [ [ 109, 127 ] ], [ [ 155, 163 ] ], [ [ 188, 193 ], [ 271, 276 ] ], [ [ 201, 210 ] ] ]
import pytest from testutils.factories import create_test_person from django.contrib.auth.models import User, Permission from openstates.data.models import Person, Organization from people_admin.models import UnmatchedName, NameStatus, DeltaSet from people_admin.views import MATCHER_PERM, EDIT_PERM, RETIRE_PERM import json @pytest.fixture def admin_user(): u = User.objects.create(username="admin") user_permissions = list( Permission.objects.filter( codename__in=[ p.split(".")[1] for p in (MATCHER_PERM, EDIT_PERM, RETIRE_PERM) ] ).values_list("id", flat=True) ) u.user_permissions.set(user_permissions) return u @pytest.mark.django_db def test_apply_match_matches(client, django_assert_num_queries, kansas, admin_user): p = Person.objects.create(name="Samuel L. Jackson") # kansas is a test fixture, it has some fake data attached we can use session = kansas.legislative_sessions.get(identifier="2020") UnmatchedName.objects.create( id=1, session=session, name="Sam Jackson", sponsorships_count=5, votes_count=5 ) apply_data = { "match_data": {"unmatchedId": 1, "button": "Match", "matchedId": p.id} } client.force_login(admin_user) with django_assert_num_queries(6): resp = client.post( "/admin/people/matcher/update/", json.dumps(apply_data), content_type="application/json", ) assert resp.status_code == 200 assert resp.json() == {"status": "success"} # get refreshed object from database matched = UnmatchedName.objects.get() assert matched.status == NameStatus.MATCHED_PERSON assert matched.matched_person_id == p.id @pytest.mark.django_db def test_apply_match_ignore(client, django_assert_num_queries, kansas, admin_user): session = kansas.legislative_sessions.get(identifier="2020") UnmatchedName.objects.create( id=2, session=session, name="Eva Green", sponsorships_count=16, votes_count=7 ) match_data = {"match_data": {"unmatchedId": 2, "button": "Ignore", "matchedId": ""}} client.force_login(admin_user) with django_assert_num_queries(6): # client can be used to mock GET/POST/etc. resp = client.post( "/admin/people/matcher/update/", json.dumps(match_data), content_type="application/json", ) assert resp.status_code == 200 assert resp.json() == {"status": "success"} # get refreshed object from database matched = UnmatchedName.objects.get() assert matched.status == NameStatus.IGNORED @pytest.mark.django_db def test_apply_match_source_error( client, django_assert_num_queries, kansas, admin_user ): session = kansas.legislative_sessions.get(identifier="2020") UnmatchedName.objects.create( id=3, session=session, name="David Tennant", sponsorships_count=10, votes_count=2, ) match_data = { "match_data": {"unmatchedId": 3, "button": "Source Error", "matchedId": ""} } client.force_login(admin_user) with django_assert_num_queries(6): resp = client.post( "/admin/people/matcher/update/", json.dumps(match_data), content_type="application/json", ) assert resp.status_code == 200 assert resp.json() == {"status": "success"} # get refreshed object from database matched = UnmatchedName.objects.get() assert matched.status == NameStatus.SOURCE_ERROR @pytest.mark.django_db def test_apply_match_404(client, django_assert_num_queries, admin_user): client.force_login(admin_user) with django_assert_num_queries(5): match_data = { "match_data": {"unmatchedId": 9999, "button": "Match", "matchedId": "1"} } resp = client.post( "/admin/people/matcher/update/", json.dumps(match_data), content_type="application/json", ) assert resp.status_code == 404 @pytest.mark.django_db def test_people_list(client, django_assert_num_queries, admin_user, kansas): house = Organization.objects.get(name="Kansas House") senate = Organization.objects.get(name="Kansas Senate") sam = create_test_person("Sam Jackson", org=house, party="Democratic", district="1") sam.identifiers.create(scheme="twitter", identifier="@SamuelLJackson") sam.contact_details.create( value="555-555-5555", type="voice", note="Capitol Office" ) create_test_person("Bosephorous Fogg", org=house, party="Republican", district="2") create_test_person("Cran Crumble", org=senate, party="Republican", district="A") client.force_login(admin_user) with django_assert_num_queries(7): resp = client.get("/admin/people/ks/") assert resp.status_code == 200 people = resp.context["context"]["current_people"] assert len(people) == 3 sam_data = [p for p in people if p["name"] == "Sam Jackson"][0] assert sam_data["district"] == "1" assert sam_data["twitter"] == "@SamuelLJackson" assert sam_data["capitol_voice"] == "555-555-5555" @pytest.mark.django_db def test_retire_person(client, django_assert_num_queries, admin_user, kansas): house = Organization.objects.get(name="Kansas House") sam = create_test_person("Sam Jackson", org=house, party="Democratic", district="1") retire_data = { "id": sam.id, "name": sam.name, "reason": "ran for new office", "retirementDate": "2021-01-01", "isDead": False, "vacantSeat": True, } client.force_login(admin_user) with django_assert_num_queries(6): resp = client.post( "/admin/people/retire/", json.dumps(retire_data), content_type="application/json", ) assert resp.status_code == 200 ds = DeltaSet.objects.get() assert "retire Sam Jackson" == ds.name assert ds.person_retirements.all().count() == 1 retirement = ds.person_retirements.get() assert retirement.person_id == sam.id assert retirement.reason == "ran for new office" assert retirement.date == "2021-01-01" assert retirement.is_vacant assert retirement.is_dead is False
[ [ [ 7, 13 ], [ 328, 334 ], [ 698, 704 ], [ 1741, 1747 ], [ 2635, 2641 ], [ 3552, 3558 ], [ 4041, 4047 ], [ 5155, 5161 ] ], [ [ 46, 64 ], [ 4268, 4286 ], [ 4530, 4548 ], [ 4618, 4636 ], [ 5324, 5342 ] ], [ [ 104, 108 ], [ 369, 373 ] ], [ [ 110, 120 ], [ 444, 454 ] ], [ [ 156, 162 ], [ 813, 819 ] ], [ [ 164, 176 ], [ 4152, 4164 ], [ 4211, 4223 ], [ 5268, 5280 ] ], [ [ 209, 222 ], [ 1005, 1018 ], [ 1610, 1623 ], [ 1916, 1929 ], [ 2556, 2569 ], [ 2822, 2835 ], [ 3468, 3481 ] ], [ [ 224, 234 ], [ 1667, 1677 ], [ 2613, 2623 ], [ 3525, 3535 ] ], [ [ 236, 244 ], [ 5891, 5899 ] ], [ [ 276, 288 ], [ 540, 552 ] ], [ [ 290, 299 ], [ 554, 563 ] ], [ [ 301, 312 ], [ 565, 576 ] ], [ [ 320, 324 ], [ 1392, 1396 ], [ 2338, 2342 ], [ 3250, 3254 ], [ 3924, 3928 ], [ 5762, 5766 ] ], [ [ 347, 357 ] ], [ [ 724, 748 ] ], [ [ 1767, 1790 ] ], [ [ 2661, 2690 ] ], [ [ 3578, 3598 ] ], [ [ 4067, 4083 ] ], [ [ 5181, 5199 ] ] ]
""" # Copyright 2021 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ import os from pathlib import Path from imageops.utils import Utils from imageops.logger import Logger class Server(object): """ Backend server for imageops API The request_id is the only input param which used to identify this request """ logger = Logger(__name__).get_logger() def __init__(self, request_id=None): """ Init Server class """ if not request_id: msg = 'Lacking request_id.' self.logger.error(msg) raise ValueError(msg) self.request_id = str(request_id) if not os.getenv('TMP_PATH'): msg = 'No TMP_PATH found in env.' self.logger.error(msg) raise ValueError(msg) self.tmp_path = os.getenv('TMP_PATH') if not os.getenv('IMAGE_PATH'): msg = 'No IMAGE_PATH found in env.' self.logger.error(msg) raise ValueError(msg) self.image_path = os.getenv('IMAGE_PATH') self.check_record_file = 'check_info.json' self.compress_record_file = 'compress_status.log' self.check_rc = {0: 'Check Completed, the image is (now) consistent', 1: 'Check completed, image is corrupted', 2: 'Check completed, image has leaked clusters, but is not corrupted', 3: 'Check failed', 4: 'Check in Progress', 5: 'Check Exiting because of not support this type of image', 6: 'Check Time Out'} self.compress_rc = {0: 'Compress Completed', 1: 'Compress In Progress', 2: 'Compress Failed', 3: 'Compress Exiting because of No enouth space left', 4: 'Compress Time Out'} def check_vm_image(self, input_image=None): """ Check the input vm image to get it's checksum and basic info such as type and size """ self.logger.info('Start to check VM image %s ...', input_image) if not input_image: msg = 'No image is given to do the check.' self.logger.error(msg) raise ValueError(msg) image = Path(input_image) if not image.is_file(): msg = 'Given image {} is not exist.'.format(input_image) self.logger.error(msg) raise ValueError(msg) try: check_record_path = os.path.join(self.tmp_path, self.request_id) os.makedirs(check_record_path) check_record_file = os.path.join(check_record_path, self.check_record_file) check_info = {'checkResult': 4} check_info['imageInfo'] = Utils.check_cmd_exec(input_image, check_record_file) check_info['checksum'] = Utils.get_md5_checksum(input_image, check_record_file) Utils.write_json_file(check_record_file, check_info) status = 0 msg = 'Check In Progress' except Exception as exception: status = 1 msg = 'Check Failed' check_info = {'checkResult': 99} Utils.write_json_file(check_record_file, check_info) self.logger.error(exception) self.logger.info('Check image %s, status: %s, msg: %s', input_image, status, msg) return status, msg def get_check_status(self): """ Get the status of one check with the request ID """ self.logger.info('Start to get check status...') check_info = {} try: check_record_file = os.path.join(self.tmp_path, self.request_id, self.check_record_file) check_info = Utils.read_json_file(check_record_file) self.logger.debug(check_info) if not check_info.get('imageInfo'): return 4, self.check_rc[4], check_info image_info = check_info.get('imageInfo') if image_info.get('filename'): file_name = image_info.get('filename').split('/')[-1] check_info['imageInfo']['filename'] = file_name if check_info.get('checkResult') == 4 or not check_info.get('checksum'): return 4, self.check_rc[4], check_info if check_info.get('checkResult') == 99: return 3, self.check_rc[3], check_info if check_info.get('checkResult') == 100: return 6, self.check_rc[6], check_info if check_info.get('checkResult') == 63: return 5, self.check_rc[5], check_info if check_info.get('checkResult') == 0: return 0, self.check_rc[0], check_info if check_info.get('checkResult') == 2: return 1, self.check_rc[1], check_info if check_info.get('checkResult') == 3: return 2, self.check_rc[2], check_info return 3, self.check_rc[3], check_info except IOError as io_exception: self.logger.exception(io_exception) return 3, '{}, {}'.format(self.check_rc[3], 'nonexistent request ID'), check_info except Exception: return 3, self.check_rc[3], check_info def compress_vm_image(self, input_image=None, output_image=None): """ Compress the input vm image to get a slim one which is sparsify Also can transfer raw image to qcow2 one """ self.logger.info('Start to compress vm image %s ...', input_image) if not input_image: msg = 'No image is given.' self.logger.error(msg) raise ValueError(msg) if not output_image: msg = 'No output image path is given.' self.logger.error(msg) raise ValueError(msg) image = Path(input_image) if not image.is_file(): msg = 'Image {} is not exist.'.format(input_image) self.logger.error(msg) raise ValueError(msg) try: compress_record_path = os.path.join(self.tmp_path, self.request_id) os.makedirs(compress_record_path) compress_record_file = os.path.join(compress_record_path, self.compress_record_file) self.logger.info('Start to compress image %s ...', input_image) if not Utils.check_compress_requires(input_image, self.tmp_path): self.logger.error('Free disk space under %s is not enough to compress image %s', self.tmp_path, input_image) status = 1 msg = '{}'.format(self.compress_rc.get(3)) Utils.append_write_plain_file(compress_record_file, msg) else: self.logger.info('Free disk space under %s is enough to compress image %s', self.tmp_path, input_image) Utils.compress_cmd_exec(input_image, output_image, compress_record_file) status = 0 msg = '{}'.format('Compress In Progress') except Exception as exception: self.logger.error(exception) status = 1 msg = '{}'.format(self.compress_rc.get(2)) Utils.append_write_plain_file(compress_record_file, msg) self.logger.info('Compress image %s with status: %s and msg: %s', input_image, status, msg) return status, msg def get_compress_status(self): """ Get the status of one compress with the request ID """ self.logger.info('Start to get status of compress image ...') try: compress_record_file = os.path.join(self.tmp_path, self.request_id, self.compress_record_file) with open(compress_record_file, 'r') as compress_file: for line in compress_file: if self.compress_rc[0] in line: self.logger.info(self.compress_rc[0]) return 0, self.compress_rc[0], 1 for item in [2, 3, 4]: if self.compress_rc[item] in line: self.logger.error(self.compress_rc[item]) return item, self.compress_rc[item], 0 except IOError as io_exception: self.logger.exception(io_exception) return 2, '{}, {}'.format(self.compress_rc[2], 'nonexistent request ID'), 0 except Exception as exception: self.logger.exception(exception) return 2, self.compress_rc[2], 0 try: compress_rate = Utils.get_compress_rate(compress_record_file) self.logger.info(self.compress_rc[1]) return 1, self.compress_rc[1], compress_rate except Exception as exception: self.logger.exception(exception) return 2, self.compress_rc[2], 0
[ [ [ 609, 611 ], [ 1190, 1192 ], [ 1352, 1354 ], [ 1390, 1392 ], [ 1558, 1560 ], [ 3092, 3094 ], [ 3149, 3151 ], [ 3212, 3214 ], [ 4227, 4229 ], [ 6744, 6746 ], [ 6801, 6803 ], [ 6870, 6872 ], [ 8342, 8344 ] ], [ [ 632, 636 ], [ 2858, 2862 ], [ 6513, 6517 ] ], [ [ 665, 670 ], [ 3351, 3356 ], [ 3441, 3446 ], [ 3509, 3514 ], [ 3776, 3781 ], [ 4411, 4416 ], [ 7028, 7033 ], [ 7348, 7353 ], [ 7592, 7597 ], [ 7920, 7925 ], [ 9377, 9382 ] ], [ [ 699, 705 ], [ 874, 880 ] ], [ [ 713, 719 ] ] ]
""" Compute the plane wave decomposition for an incident broadband plane wave on an open circular array using a modal beamformer of finite order. """ import numpy as np import matplotlib.pyplot as plt import micarray from micarray.util import db Nsf = 50 # order of the incident sound field N = 30 # order of modal beamformer/microphone array pw_angle = 1.23 * np.pi # incidence angle of plane wave pol_pwd = np.linspace(0, 2*np.pi, 180, endpoint=False) # angles for plane wave decomposition k = np.linspace(0, 20, 100) # wavenumber vector r = 1 # radius of array # get uniform grid (microphone positions) of order N pol, weights = micarray.modal.angular.grid_equal_polar_angle(N) # pressure on the surface of an open cylinder for an incident plane wave Bn = micarray.modal.radial.circular_pw(Nsf, k, r, setup='open') D = micarray.modal.radial.circ_diagonal_mode_mat(Bn) Psi_p = micarray.modal.angular.cht_matrix(Nsf, pol) Psi_pw = micarray.modal.angular.cht_matrix(Nsf, pw_angle) p = np.matmul(np.matmul(Psi_p, D), np.conj(Psi_pw.T)) p = np.squeeze(p) # incident plane wave exhibiting infinite spatial bandwidth # p = np.exp(1j * k[:, np.newaxis]*r * np.cos(pol - pw_angle)) # plane wave decomposition using modal beamforming Bn = micarray.modal.radial.circular_pw(N, k, r, setup='open') Dn, _ = micarray.modal.radial.regularize(1/Bn, 3000, 'softclip') D = micarray.modal.radial.circ_diagonal_mode_mat(Dn) Psi_p = micarray.modal.angular.cht_matrix(N, pol, weights) Psi_q = micarray.modal.angular.cht_matrix(N, pol_pwd) A_pwd = np.matmul(np.matmul(Psi_q, D), np.conj(Psi_p.T)) q_pwd = np.squeeze(np.matmul(A_pwd, np.expand_dims(p, 2))) q_pwd_t = np.fft.fftshift(np.fft.irfft(q_pwd, axis=0), axes=0) # visualize plane wave decomposition (aka beampattern) plt.figure() plt.pcolormesh(k, pol_pwd/np.pi, db(q_pwd.T), vmin=-40) plt.colorbar() plt.xlabel(r'$kr$') plt.ylabel(r'$\phi / \pi$') plt.title('Plane wave docomposition by modal beamformer (frequency domain)') plt.savefig('modal_beamforming_open_circular_array_fd.png') plt.figure() plt.pcolormesh(range(2*len(k)-2), pol_pwd/np.pi, db(q_pwd_t.T), vmin=-40) plt.colorbar() plt.ylabel(r'$\phi / \pi$') plt.title('Plane wave docomposition by modal beamformer (time domain)') plt.savefig('modal_beamforming_open_circular_array_td.png')
[ [ [ 166, 177 ], [ 373, 375 ], [ 422, 424 ], [ 439, 441 ], [ 510, 512 ], [ 1003, 1005 ], [ 1013, 1015 ], [ 1034, 1036 ], [ 1057, 1059 ], [ 1548, 1550 ], [ 1558, 1560 ], [ 1579, 1581 ], [ 1605, 1607 ], [ 1616, 1618 ], [ 1633, 1635 ], [ 1666, 1668 ], [ 1682, 1684 ], [ 1814, 1816 ], [ 2100, 2102 ] ], [ [ 185, 209 ], [ 1775, 1778 ], [ 1788, 1791 ], [ 1844, 1847 ], [ 1859, 1862 ], [ 1879, 1882 ], [ 1907, 1910 ], [ 1984, 1987 ], [ 2045, 2048 ], [ 2058, 2061 ], [ 2132, 2135 ], [ 2147, 2150 ], [ 2175, 2178 ], [ 2247, 2250 ] ], [ [ 217, 225 ], [ 649, 657 ], [ 777, 785 ], [ 840, 848 ], [ 897, 905 ], [ 950, 958 ], [ 1252, 1260 ], [ 1317, 1325 ], [ 1378, 1386 ], [ 1435, 1443 ], [ 1494, 1502 ] ], [ [ 252, 254 ], [ 1821, 1823 ], [ 2107, 2109 ] ], [ [ 256, 259 ], [ 811, 814 ], [ 931, 934 ], [ 984, 987 ] ], [ [ 302, 303 ], [ 695, 696 ], [ 1286, 1287 ], [ 1469, 1470 ], [ 1528, 1529 ] ], [ [ 355, 363 ], [ 989, 997 ] ], [ [ 412, 419 ], [ 1531, 1538 ], [ 1806, 1813 ], [ 2092, 2099 ] ], [ [ 506, 507 ], [ 816, 817 ], [ 1289, 1290 ], [ 1803, 1804 ], [ 2085, 2086 ] ], [ [ 555, 556 ], [ 819, 820 ], [ 1292, 1293 ] ], [ [ 634, 637 ], [ 936, 939 ], [ 1472, 1475 ] ], [ [ 639, 646 ], [ 1477, 1484 ] ], [ [ 772, 774 ], [ 885, 887 ] ], [ [ 836, 837 ], [ 1030, 1031 ] ], [ [ 889, 894 ], [ 1023, 1028 ] ], [ [ 941, 947 ], [ 1042, 1048 ] ], [ [ 999, 1000 ], [ 1068, 1069 ] ], [ [ 1053, 1054 ], [ 1648, 1649 ] ], [ [ 1247, 1249 ], [ 1352, 1354 ] ], [ [ 1309, 1311 ], [ 1423, 1425 ] ], [ [ 1313, 1314 ] ], [ [ 1374, 1375 ], [ 1575, 1576 ] ], [ [ 1427, 1432 ], [ 1587, 1592 ] ], [ [ 1486, 1491 ], [ 1568, 1573 ] ], [ [ 1540, 1545 ], [ 1626, 1631 ] ], [ [ 1597, 1602 ], [ 1695, 1700 ], [ 1824, 1829 ] ], [ [ 1656, 1663 ], [ 2110, 2117 ] ] ]
# file eulexistdb/manager.py # # Copyright 2010,2011 Emory University Libraries # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.conf import settings from eulexistdb.db import ExistDB from eulexistdb.query import QuerySet class Manager(object): """ Connect an :class:`~eulexistdb.models.XmlModel` to an :class:`~eulexistdb.db.ExistDB` for easy querying. Typically each :class:`~eulexistdb.models.XmlModel` will have one or more ``Manager`` members. Like Django ``Manager`` objects these offer a convenient way to access model-based queries. Like Django ``Manager`` objects, developers can `derive a child class`_ and override :meth:`get_query_set` to modify the default ``QuerySet``. Unlike Django, this implementation does not currently provide a default ``Manager`` for every ``XmlModel``. Developers should consult :class:`eulexistdb.query.QuerySet` for a complete list of its methods. ``Manager`` directly exposes these methods, forwarding them to the ``QuerySet`` returned by its own :meth:`get_query_set`. .. _derive a child class: http://docs.djangoproject.com/en/1.1/topics/db/managers/#modifying-initial-manager-querysets """ def __init__(self, xpath): self.xpath = xpath # NOTE: model needs to be patched in to a real XmlModel class after # the fact. currently this is handled by XmlModelType metaclass # logic. self.model = None def get_query_set(self): """ Get the default :class:`eulexistdb.db.QuerySet` returned by this ``Manager``. Typically this returns a ``QuerySet`` based on the ``Manager``'s `xpath`, evaluated in the ``settings.EXISTDB_ROOT_COLLECTION`` on a default :class:`eulexistdb.db.ExistDB`. This is a convenient point for developers to customize an object's managers. Deriving a child class from Manager and overriding or extending this method is a handy way to create custom queries accessible from an :class:`~eulexistdb.models.XmlModel`. """ if hasattr(settings, 'EXISTDB_FULLTEXT_OPTIONS'): fulltext_opts = settings.EXISTDB_FULLTEXT_OPTIONS else: fulltext_opts = {} return QuerySet(model=self.model, xpath=self.xpath, using=ExistDB(), collection=settings.EXISTDB_ROOT_COLLECTION, fulltext_options=fulltext_opts) ####################### # PROXIES TO QUERYSET # ####################### def count(self): return self.get_query_set().count() def filter(self, *args, **kwargs): return self.get_query_set().filter(*args, **kwargs) def or_filter(self, *args, **kwargs): return self.get_query_set().or_filter(*args, **kwargs) def order_by(self, *args, **kwargs): return self.get_query_set().order_by(*args, **kwargs) def only(self, *args, **kwargs): return self.get_query_set().only(*args, **kwargs) def also(self, *args, **kwargs): return self.get_query_set().also(*args, **kwargs) def distinct(self): return self.get_query_set().distinct() def all(self): return self.get_query_set().all() def get(self, *args, **kwargs): return self.get_query_set().get(*args, **kwargs)
[ [ [ 671, 679 ], [ 2633, 2641 ], [ 2700, 2708 ], [ 2893, 2901 ] ], [ [ 706, 713 ], [ 2847, 2854 ] ], [ [ 743, 751 ], [ 2796, 2804 ] ], [ [ 759, 766 ] ] ]
class Soma: def __init__(self): self.numeroDeCartas = list() def set_numeroDeCartas(self, numero): if numero == '': numero = '1' numero = numero[:] self.numeroDeCartas.extend(numero) def get_numeroDeCartas(self): return self.numeroDeCartas def ConverterPInt(self, converter): convertidos = [] for c in converter: convertidos.append(int(c)) return convertidos def Somar(self): return sum(self.ConverterPInt(self.get_numeroDeCartas()))
[ [ [ 6, 10 ] ] ]
import torch, math from torch.optim.optimizer import Optimizer # RAdam + LARS class Ralamb(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.buffer = [[None, None, None] for ind in range(10)] super(Ralamb, self).__init__(params, defaults) def __setstate__(self, state): super(Ralamb, self).__setstate__(state) def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Ralamb does not support sparse gradients') p_data_fp32 = p.data.float() state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] # Decay the first and second moment running average coefficient # m_t exp_avg.mul_(beta1).add_(1 - beta1, grad) # v_t exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) state['step'] += 1 buffered = self.buffer[int(state['step'] % 10)] if state['step'] == buffered[0]: N_sma, radam_step_size = buffered[1], buffered[2] else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] N_sma_max = 2 / (1 - beta2) - 1 N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = N_sma # more conservative since it's an approximated value if N_sma >= 5: radam_step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) else: radam_step_size = 1.0 / (1 - beta1 ** state['step']) buffered[2] = radam_step_size if group['weight_decay'] != 0: p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) # more conservative since it's an approximated value radam_step = p_data_fp32.clone() if N_sma >= 5: denom = exp_avg_sq.sqrt().add_(group['eps']) radam_step.addcdiv_(-radam_step_size * group['lr'], exp_avg, denom) else: radam_step.add_(-radam_step_size * group['lr'], exp_avg) radam_norm = radam_step.pow(2).sum().sqrt() weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10) if weight_norm == 0 or radam_norm == 0: trust_ratio = 1 else: trust_ratio = weight_norm / radam_norm state['weight_norm'] = weight_norm state['adam_norm'] = radam_norm state['trust_ratio'] = trust_ratio if N_sma >= 5: p_data_fp32.addcdiv_(-radam_step_size * group['lr'] * trust_ratio, exp_avg, denom) else: p_data_fp32.add_(-radam_step_size * group['lr'] * trust_ratio, exp_avg) p.data.copy_(p_data_fp32) return loss
[ [ [ 7, 12 ], [ 1096, 1101 ], [ 1168, 1173 ] ], [ [ 14, 18 ], [ 2427, 2431 ] ], [ [ 53, 62 ], [ 92, 101 ] ], [ [ 85, 91 ], [ 350, 356 ], [ 441, 447 ] ] ]
# RUN: %PYTHON %s | FileCheck %s import gc import io import itertools from mlir.ir import * def run(f): print("\nTEST:", f.__name__) f() gc.collect() assert Context._get_live_count() == 0 # Verify iterator based traversal of the op/region/block hierarchy. # CHECK-LABEL: TEST: testTraverseOpRegionBlockIterators def testTraverseOpRegionBlockIterators(): ctx = Context() ctx.allow_unregistered_dialects = True module = Module.parse(r""" func @f1(%arg0: i32) -> i32 { %1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32 return %1 : i32 } """, ctx) op = module.operation assert op.context is ctx # Get the block using iterators off of the named collections. regions = list(op.regions) blocks = list(regions[0].blocks) # CHECK: MODULE REGIONS=1 BLOCKS=1 print(f"MODULE REGIONS={len(regions)} BLOCKS={len(blocks)}") # Should verify. # CHECK: .verify = True print(f".verify = {module.operation.verify()}") # Get the regions and blocks from the default collections. default_regions = list(op) default_blocks = list(default_regions[0]) # They should compare equal regardless of how obtained. assert default_regions == regions assert default_blocks == blocks # Should be able to get the operations from either the named collection # or the block. operations = list(blocks[0].operations) default_operations = list(blocks[0]) assert default_operations == operations def walk_operations(indent, op): for i, region in enumerate(op): print(f"{indent}REGION {i}:") for j, block in enumerate(region): print(f"{indent} BLOCK {j}:") for k, child_op in enumerate(block): print(f"{indent} OP {k}: {child_op}") walk_operations(indent + " ", child_op) # CHECK: REGION 0: # CHECK: BLOCK 0: # CHECK: OP 0: func # CHECK: REGION 0: # CHECK: BLOCK 0: # CHECK: OP 0: %0 = "custom.addi" # CHECK: OP 1: return walk_operations("", op) run(testTraverseOpRegionBlockIterators) # Verify index based traversal of the op/region/block hierarchy. # CHECK-LABEL: TEST: testTraverseOpRegionBlockIndices def testTraverseOpRegionBlockIndices(): ctx = Context() ctx.allow_unregistered_dialects = True module = Module.parse(r""" func @f1(%arg0: i32) -> i32 { %1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32 return %1 : i32 } """, ctx) def walk_operations(indent, op): for i in range(len(op.regions)): region = op.regions[i] print(f"{indent}REGION {i}:") for j in range(len(region.blocks)): block = region.blocks[j] print(f"{indent} BLOCK {j}:") for k in range(len(block.operations)): child_op = block.operations[k] print(f"{indent} OP {k}: {child_op}") walk_operations(indent + " ", child_op) # CHECK: REGION 0: # CHECK: BLOCK 0: # CHECK: OP 0: func # CHECK: REGION 0: # CHECK: BLOCK 0: # CHECK: OP 0: %0 = "custom.addi" # CHECK: OP 1: return walk_operations("", module.operation) run(testTraverseOpRegionBlockIndices) # CHECK-LABEL: TEST: testBlockArgumentList def testBlockArgumentList(): with Context() as ctx: module = Module.parse(r""" func @f1(%arg0: i32, %arg1: f64, %arg2: index) { return } """, ctx) func = module.body.operations[0] entry_block = func.regions[0].blocks[0] assert len(entry_block.arguments) == 3 # CHECK: Argument 0, type i32 # CHECK: Argument 1, type f64 # CHECK: Argument 2, type index for arg in entry_block.arguments: print(f"Argument {arg.arg_number}, type {arg.type}") new_type = IntegerType.get_signless(8 * (arg.arg_number + 1)) arg.set_type(new_type) # CHECK: Argument 0, type i8 # CHECK: Argument 1, type i16 # CHECK: Argument 2, type i24 for arg in entry_block.arguments: print(f"Argument {arg.arg_number}, type {arg.type}") run(testBlockArgumentList) # CHECK-LABEL: TEST: testOperationOperands def testOperationOperands(): with Context() as ctx: ctx.allow_unregistered_dialects = True module = Module.parse(r""" func @f1(%arg0: i32) { %0 = "test.producer"() : () -> i64 "test.consumer"(%arg0, %0) : (i32, i64) -> () return }""") func = module.body.operations[0] entry_block = func.regions[0].blocks[0] consumer = entry_block.operations[1] assert len(consumer.operands) == 2 # CHECK: Operand 0, type i32 # CHECK: Operand 1, type i64 for i, operand in enumerate(consumer.operands): print(f"Operand {i}, type {operand.type}") run(testOperationOperands) # CHECK-LABEL: TEST: testOperationOperandsSlice def testOperationOperandsSlice(): with Context() as ctx: ctx.allow_unregistered_dialects = True module = Module.parse(r""" func @f1() { %0 = "test.producer0"() : () -> i64 %1 = "test.producer1"() : () -> i64 %2 = "test.producer2"() : () -> i64 %3 = "test.producer3"() : () -> i64 %4 = "test.producer4"() : () -> i64 "test.consumer"(%0, %1, %2, %3, %4) : (i64, i64, i64, i64, i64) -> () return }""") func = module.body.operations[0] entry_block = func.regions[0].blocks[0] consumer = entry_block.operations[5] assert len(consumer.operands) == 5 for left, right in zip(consumer.operands, consumer.operands[::-1][::-1]): assert left == right # CHECK: test.producer0 # CHECK: test.producer1 # CHECK: test.producer2 # CHECK: test.producer3 # CHECK: test.producer4 full_slice = consumer.operands[:] for operand in full_slice: print(operand) # CHECK: test.producer0 # CHECK: test.producer1 first_two = consumer.operands[0:2] for operand in first_two: print(operand) # CHECK: test.producer3 # CHECK: test.producer4 last_two = consumer.operands[3:] for operand in last_two: print(operand) # CHECK: test.producer0 # CHECK: test.producer2 # CHECK: test.producer4 even = consumer.operands[::2] for operand in even: print(operand) # CHECK: test.producer2 fourth = consumer.operands[::2][1::2] for operand in fourth: print(operand) run(testOperationOperandsSlice) # CHECK-LABEL: TEST: testDetachedOperation def testDetachedOperation(): ctx = Context() ctx.allow_unregistered_dialects = True with Location.unknown(ctx): i32 = IntegerType.get_signed(32) op1 = Operation.create( "custom.op1", results=[i32, i32], regions=1, attributes={ "foo": StringAttr.get("foo_value"), "bar": StringAttr.get("bar_value"), }) # CHECK: %0:2 = "custom.op1"() ( { # CHECK: }) {bar = "bar_value", foo = "foo_value"} : () -> (si32, si32) print(op1) # TODO: Check successors once enough infra exists to do it properly. run(testDetachedOperation) # CHECK-LABEL: TEST: testOperationInsertionPoint def testOperationInsertionPoint(): ctx = Context() ctx.allow_unregistered_dialects = True module = Module.parse(r""" func @f1(%arg0: i32) -> i32 { %1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32 return %1 : i32 } """, ctx) # Create test op. with Location.unknown(ctx): op1 = Operation.create("custom.op1") op2 = Operation.create("custom.op2") func = module.body.operations[0] entry_block = func.regions[0].blocks[0] ip = InsertionPoint.at_block_begin(entry_block) ip.insert(op1) ip.insert(op2) # CHECK: func @f1 # CHECK: "custom.op1"() # CHECK: "custom.op2"() # CHECK: %0 = "custom.addi" print(module) # Trying to add a previously added op should raise. try: ip.insert(op1) except ValueError: pass else: assert False, "expected insert of attached op to raise" run(testOperationInsertionPoint) # CHECK-LABEL: TEST: testOperationWithRegion def testOperationWithRegion(): ctx = Context() ctx.allow_unregistered_dialects = True with Location.unknown(ctx): i32 = IntegerType.get_signed(32) op1 = Operation.create("custom.op1", regions=1) block = op1.regions[0].blocks.append(i32, i32) # CHECK: "custom.op1"() ( { # CHECK: ^bb0(%arg0: si32, %arg1: si32): // no predecessors # CHECK: "custom.terminator"() : () -> () # CHECK: }) : () -> () terminator = Operation.create("custom.terminator") ip = InsertionPoint(block) ip.insert(terminator) print(op1) # Now add the whole operation to another op. # TODO: Verify lifetime hazard by nulling out the new owning module and # accessing op1. # TODO: Also verify accessing the terminator once both parents are nulled # out. module = Module.parse(r""" func @f1(%arg0: i32) -> i32 { %1 = "custom.addi"(%arg0, %arg0) : (i32, i32) -> i32 return %1 : i32 } """) func = module.body.operations[0] entry_block = func.regions[0].blocks[0] ip = InsertionPoint.at_block_begin(entry_block) ip.insert(op1) # CHECK: func @f1 # CHECK: "custom.op1"() # CHECK: "custom.terminator" # CHECK: %0 = "custom.addi" print(module) run(testOperationWithRegion) # CHECK-LABEL: TEST: testOperationResultList def testOperationResultList(): ctx = Context() module = Module.parse(r""" func @f1() { %0:3 = call @f2() : () -> (i32, f64, index) return } func private @f2() -> (i32, f64, index) """, ctx) caller = module.body.operations[0] call = caller.regions[0].blocks[0].operations[0] assert len(call.results) == 3 # CHECK: Result 0, type i32 # CHECK: Result 1, type f64 # CHECK: Result 2, type index for res in call.results: print(f"Result {res.result_number}, type {res.type}") run(testOperationResultList) # CHECK-LABEL: TEST: testOperationResultListSlice def testOperationResultListSlice(): with Context() as ctx: ctx.allow_unregistered_dialects = True module = Module.parse(r""" func @f1() { "some.op"() : () -> (i1, i2, i3, i4, i5) return } """) func = module.body.operations[0] entry_block = func.regions[0].blocks[0] producer = entry_block.operations[0] assert len(producer.results) == 5 for left, right in zip(producer.results, producer.results[::-1][::-1]): assert left == right assert left.result_number == right.result_number # CHECK: Result 0, type i1 # CHECK: Result 1, type i2 # CHECK: Result 2, type i3 # CHECK: Result 3, type i4 # CHECK: Result 4, type i5 full_slice = producer.results[:] for res in full_slice: print(f"Result {res.result_number}, type {res.type}") # CHECK: Result 1, type i2 # CHECK: Result 2, type i3 # CHECK: Result 3, type i4 middle = producer.results[1:4] for res in middle: print(f"Result {res.result_number}, type {res.type}") # CHECK: Result 1, type i2 # CHECK: Result 3, type i4 odd = producer.results[1::2] for res in odd: print(f"Result {res.result_number}, type {res.type}") # CHECK: Result 3, type i4 # CHECK: Result 1, type i2 inverted_middle = producer.results[-2:0:-2] for res in inverted_middle: print(f"Result {res.result_number}, type {res.type}") run(testOperationResultListSlice) # CHECK-LABEL: TEST: testOperationAttributes def testOperationAttributes(): ctx = Context() ctx.allow_unregistered_dialects = True module = Module.parse(r""" "some.op"() { some.attribute = 1 : i8, other.attribute = 3.0, dependent = "text" } : () -> () """, ctx) op = module.body.operations[0] assert len(op.attributes) == 3 iattr = IntegerAttr(op.attributes["some.attribute"]) fattr = FloatAttr(op.attributes["other.attribute"]) sattr = StringAttr(op.attributes["dependent"]) # CHECK: Attribute type i8, value 1 print(f"Attribute type {iattr.type}, value {iattr.value}") # CHECK: Attribute type f64, value 3.0 print(f"Attribute type {fattr.type}, value {fattr.value}") # CHECK: Attribute value text print(f"Attribute value {sattr.value}") # We don't know in which order the attributes are stored. # CHECK-DAG: NamedAttribute(dependent="text") # CHECK-DAG: NamedAttribute(other.attribute=3.000000e+00 : f64) # CHECK-DAG: NamedAttribute(some.attribute=1 : i8) for attr in op.attributes: print(str(attr)) # Check that exceptions are raised as expected. try: op.attributes["does_not_exist"] except KeyError: pass else: assert False, "expected KeyError on accessing a non-existent attribute" try: op.attributes[42] except IndexError: pass else: assert False, "expected IndexError on accessing an out-of-bounds attribute" run(testOperationAttributes) # CHECK-LABEL: TEST: testOperationPrint def testOperationPrint(): ctx = Context() module = Module.parse(r""" func @f1(%arg0: i32) -> i32 { %0 = constant dense<[1, 2, 3, 4]> : tensor<4xi32> return %arg0 : i32 } """, ctx) # Test print to stdout. # CHECK: return %arg0 : i32 module.operation.print() # Test print to text file. f = io.StringIO() # CHECK: <class 'str'> # CHECK: return %arg0 : i32 module.operation.print(file=f) str_value = f.getvalue() print(str_value.__class__) print(f.getvalue()) # Test print to binary file. f = io.BytesIO() # CHECK: <class 'bytes'> # CHECK: return %arg0 : i32 module.operation.print(file=f, binary=True) bytes_value = f.getvalue() print(bytes_value.__class__) print(bytes_value) # Test get_asm with options. # CHECK: value = opaque<"_", "0xDEADBEEF"> : tensor<4xi32> # CHECK: "std.return"(%arg0) : (i32) -> () -:4:7 module.operation.print(large_elements_limit=2, enable_debug_info=True, pretty_debug_info=True, print_generic_op_form=True, use_local_scope=True) run(testOperationPrint) # CHECK-LABEL: TEST: testKnownOpView def testKnownOpView(): with Context(), Location.unknown(): Context.current.allow_unregistered_dialects = True module = Module.parse(r""" %1 = "custom.f32"() : () -> f32 %2 = "custom.f32"() : () -> f32 %3 = addf %1, %2 : f32 """) print(module) # addf should map to a known OpView class in the std dialect. # We know the OpView for it defines an 'lhs' attribute. addf = module.body.operations[2] # CHECK: <mlir.dialects._std_ops_gen._AddFOp object print(repr(addf)) # CHECK: "custom.f32"() print(addf.lhs) # One of the custom ops should resolve to the default OpView. custom = module.body.operations[0] # CHECK: <_mlir.ir.OpView object print(repr(custom)) # Check again to make sure negative caching works. custom = module.body.operations[0] # CHECK: <_mlir.ir.OpView object print(repr(custom)) run(testKnownOpView) # CHECK-LABEL: TEST: testSingleResultProperty def testSingleResultProperty(): with Context(), Location.unknown(): Context.current.allow_unregistered_dialects = True module = Module.parse(r""" "custom.no_result"() : () -> () %0:2 = "custom.two_result"() : () -> (f32, f32) %1 = "custom.one_result"() : () -> f32 """) print(module) try: module.body.operations[0].result except ValueError as e: # CHECK: Cannot call .result on operation custom.no_result which has 0 results print(e) else: assert False, "Expected exception" try: module.body.operations[1].result except ValueError as e: # CHECK: Cannot call .result on operation custom.two_result which has 2 results print(e) else: assert False, "Expected exception" # CHECK: %1 = "custom.one_result"() : () -> f32 print(module.body.operations[2]) run(testSingleResultProperty) # CHECK-LABEL: TEST: testPrintInvalidOperation def testPrintInvalidOperation(): ctx = Context() with Location.unknown(ctx): module = Operation.create("module", regions=2) # This module has two region and is invalid verify that we fallback # to the generic printer for safety. block = module.regions[0].blocks.append() # CHECK: // Verification failed, printing generic form # CHECK: "module"() ( { # CHECK: }) : () -> () print(module) # CHECK: .verify = False print(f".verify = {module.operation.verify()}") run(testPrintInvalidOperation) # CHECK-LABEL: TEST: testCreateWithInvalidAttributes def testCreateWithInvalidAttributes(): ctx = Context() with Location.unknown(ctx): try: Operation.create("module", attributes={None:StringAttr.get("name")}) except Exception as e: # CHECK: Invalid attribute key (not a string) when attempting to create the operation "module" print(e) try: Operation.create("module", attributes={42:StringAttr.get("name")}) except Exception as e: # CHECK: Invalid attribute key (not a string) when attempting to create the operation "module" print(e) try: Operation.create("module", attributes={"some_key":ctx}) except Exception as e: # CHECK: Invalid attribute value for the key "some_key" when attempting to create the operation "module" print(e) try: Operation.create("module", attributes={"some_key":None}) except Exception as e: # CHECK: Found an invalid (`None`?) attribute value for the key "some_key" when attempting to create the operation "module" print(e) run(testCreateWithInvalidAttributes) # CHECK-LABEL: TEST: testOperationName def testOperationName(): ctx = Context() ctx.allow_unregistered_dialects = True module = Module.parse(r""" %0 = "custom.op1"() : () -> f32 %1 = "custom.op2"() : () -> i32 %2 = "custom.op1"() : () -> f32 """, ctx) # CHECK: custom.op1 # CHECK: custom.op2 # CHECK: custom.op1 for op in module.body.operations: print(op.operation.name) run(testOperationName) # CHECK-LABEL: TEST: testCapsuleConversions def testCapsuleConversions(): ctx = Context() ctx.allow_unregistered_dialects = True with Location.unknown(ctx): m = Operation.create("custom.op1").operation m_capsule = m._CAPIPtr assert '"mlir.ir.Operation._CAPIPtr"' in repr(m_capsule) m2 = Operation._CAPICreate(m_capsule) assert m2 is m run(testCapsuleConversions)
[ [ [ 41, 43 ], [ 145, 147 ] ], [ [ 51, 53 ], [ 13214, 13216 ], [ 13432, 13434 ] ], [ [ 61, 70 ] ], [ [ 91, 92 ], [ 167, 174 ], [ 374, 381 ], [ 436, 442 ], [ 2221, 2228 ], [ 2283, 2289 ], [ 3245, 3252 ], [ 3276, 3282 ], [ 3728, 3739 ], [ 4117, 4124 ], [ 4191, 4197 ], [ 4810, 4817 ], [ 4884, 4890 ], [ 6434, 6441 ], [ 6492, 6500 ], [ 6525, 6536 ], [ 6562, 6571 ], [ 6665, 6675 ], [ 6713, 6723 ], [ 7077, 7084 ], [ 7139, 7145 ], [ 7318, 7326 ], [ 7351, 7360 ], [ 7392, 7401 ], [ 7514, 7528 ], [ 8022, 8029 ], [ 8080, 8088 ], [ 8113, 8124 ], [ 8150, 8159 ], [ 8432, 8441 ], [ 8479, 8493 ], [ 8791, 8797 ], [ 9037, 9051 ], [ 9350, 9357 ], [ 9371, 9377 ], [ 9954, 9961 ], [ 10028, 10034 ], [ 11458, 11465 ], [ 11520, 11526 ], [ 11760, 11771 ], [ 11815, 11824 ], [ 11869, 11879 ], [ 12922, 12929 ], [ 12943, 12949 ], [ 14020, 14027 ], [ 14031, 14039 ], [ 14055, 14062 ], [ 14119, 14125 ], [ 14991, 14998 ], [ 15002, 15010 ], [ 15026, 15033 ], [ 15090, 15096 ], [ 15907, 15914 ], [ 15924, 15932 ], [ 15960, 15969 ], [ 16503, 16510 ], [ 16520, 16528 ], [ 16558, 16567 ], [ 16602, 16612 ], [ 16785, 16794 ], [ 16827, 16837 ], [ 17010, 17019 ], [ 17234, 17243 ], [ 17574, 17581 ], [ 17636, 17642 ], [ 18013, 18020 ], [ 18071, 18079 ], [ 18102, 18111 ], [ 18240, 18249 ] ], [ [ 98, 101 ], [ 2012, 2015 ], [ 3126, 3129 ], [ 4009, 4012 ], [ 4692, 4695 ], [ 6320, 6323 ], [ 6956, 6959 ], [ 7903, 7906 ], [ 9235, 9238 ], [ 9830, 9833 ], [ 11338, 11341 ], [ 12817, 12820 ], [ 13927, 13930 ], [ 14883, 14886 ], [ 15788, 15791 ], [ 16370, 16373 ], [ 17463, 17466 ], [ 17907, 17910 ], [ 18293, 18296 ] ], [ [ 328, 362 ], [ 2016, 2050 ] ], [ [ 2177, 2209 ], [ 3130, 3162 ] ], [ [ 3213, 3234 ], [ 4013, 4034 ] ], [ [ 4085, 4106 ], [ 4696, 4717 ] ], [ [ 4773, 4799 ], [ 6324, 6350 ] ], [ [ 6401, 6422 ], [ 6960, 6981 ] ], [ [ 7038, 7065 ], [ 7907, 7934 ] ], [ [ 7987, 8010 ], [ 9239, 9262 ] ], [ [ 9315, 9338 ], [ 9834, 9857 ] ], [ [ 9915, 9943 ], [ 11342, 11370 ] ], [ [ 11423, 11446 ], [ 12821, 12844 ] ], [ [ 12892, 12910 ], [ 13931, 13949 ] ], [ [ 13994, 14009 ], [ 14887, 14902 ] ], [ [ 14956, 14980 ], [ 15792, 15816 ] ], [ [ 15870, 15895 ], [ 16374, 16399 ] ], [ [ 16460, 16491 ], [ 17467, 17498 ] ], [ [ 17545, 17562 ], [ 17911, 17928 ] ], [ [ 17979, 18001 ], [ 18297, 18319 ] ] ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # pypepa documentation build configuration file, created by # sphinx-quickstart on Thu Jul 18 15:33:13 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('../pypepa')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.mathjax', 'sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'pypepa' copyright = '2013, Dariusz Dwornikowski' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.4' # The full version, including alpha/beta/rc tags. release = 'latest' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pypepadoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'pypepa.tex', 'pypepa Documentation', 'Dariusz Dwornikowski', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pypepa', 'pypepa Documentation', ['Dariusz Dwornikowski'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'pypepa', 'pypepa Documentation', 'Dariusz Dwornikowski', 'pypepa', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
[ [ [ 447, 450 ] ], [ [ 452, 454 ] ], [ [ 1068, 1078 ] ], [ [ 1200, 1214 ] ], [ [ 1267, 1280 ] ], [ [ 1386, 1396 ] ], [ [ 1449, 1456 ] ], [ [ 1468, 1477 ] ], [ [ 1711, 1718 ] ], [ [ 1777, 1784 ] ], [ [ 2265, 2281 ] ], [ [ 2839, 2853 ] ], [ [ 3242, 3252 ] ], [ [ 4326, 4342 ] ], [ [ 5752, 5769 ] ], [ [ 5868, 5882 ] ], [ [ 6237, 6252 ] ], [ [ 7090, 7099 ] ], [ [ 7504, 7521 ] ] ]
from django.urls import path from . import views urlpatterns = [ path('StatsClass', views.index), path('BasicProbability', views.basic_prob), ]
[ [ [ 24, 28 ], [ 70, 74 ], [ 107, 111 ] ], [ [ 43, 48 ], [ 89, 94 ], [ 132, 137 ] ], [ [ 50, 61 ] ] ]
class DigitalSignatureScheme(object): def get_public_key(self): return self.public_key def sign(self, messsage): raise NotImplementedError def verify(self, message, signature): raise NotImplementedError
[ [ [ 6, 28 ] ] ]
""" Created by Michele Bianco, 9 July 2021 """ import numpy as np, pkg_resources from tqdm import tqdm import tensorflow as tf from tensorflow.keras.models import load_model from tensorflow.keras import backend as K from tensorflow.python.ops import nn_ops from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops def sigmoid_balanced_cross_entropy_with_logits(_sentinel=None, labels=None, logits=None, beta=None, name=None): nn_ops._ensure_xent_args("sigmoid_cross_entropy_with_logits", _sentinel,labels, logits) with ops.name_scope(name, "logistic_loss", [logits, labels]) as name: logits = ops.convert_to_tensor(logits, name="logits") labels = ops.convert_to_tensor(labels, name="labels") try: labels.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError("logits and labels must have the same shape (%s vs %s)" %(logits.get_shape(), labels.get_shape())) zeros = array_ops.zeros_like(logits, dtype=logits.dtype) cond = (logits >= zeros) relu_logits = array_ops.where(cond, logits, zeros) neg_abs_logits = array_ops.where(cond, -logits, logits) balanced_cross_entropy = relu_logits*(1.-beta)-logits*labels*(1.-beta)+math_ops.log1p(math_ops.exp(neg_abs_logits))*((1.-beta)*(1.-labels)+beta*labels) return tf.reduce_mean(balanced_cross_entropy) def balanced_cross_entropy(y_true, y_pred): """ To decrease the number of false negatives, set beta>1. To decrease the number of false positives, set beta<1. """ beta = tf.maximum(tf.reduce_mean(1 - y_true), tf.keras.backend.epsilon()) y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon()) y_pred = K.log(y_pred / (1 - y_pred)) return sigmoid_balanced_cross_entropy_with_logits(logits=y_pred, labels=y_true, beta=beta) def iou(y_true, y_pred): """ Return the Intersection over Union (IoU) for a given label. Args: y_true: the expected y values as a one-hot y_pred: the predicted y values as a one-hot or softmax output label: the label to return the IoU for Returns: the IoU for the given label """ intersection = K.sum(K.abs(y_true * y_pred)) #intersection = K.sum(y_true * y_pred) union = K.sum(y_true) + K.sum(y_pred) - intersection # avoid divide by zero - if the union is zero, return 1, otherwise, return the intersection over union return K.switch(K.equal(union, 0), 1.0, intersection / union) def dice_coef(y_true, y_pred, smooth=1): """ Dice = (2*|X & Y|)/ (|X|+ |Y|) = 2*sum(|A*B|)/(sum(A^2)+sum(B^2)) ref: https://arxiv.org/pdf/1606.04797v1.pdf """ intersection = K.sum(K.abs(y_true * y_pred), axis=-1) return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth) ################################################################ class segunet21cm: def __init__(self, tta=1, verbose=False): """ SegU-Net: segmentation of 21cm images with U-shape network (Bianco et al. 2021, https://arxiv.org/abs/2102.06713) - tta (int): default 0 (super-fast, no pixel-error map) implement the error map with time-test aumentated techique in the prediction process - verbose (bool): default False, activate verbosity Description: tta = 0 : fast (~7 sec), it tends to be a few percent less accurate (<2%) then the other two cases, no pixel-error map (no TTA manipulation) tta = 1 : medium (~17 sec), accurate and preferable than tta=0, with pixel-error map (3 samples) tta = 2 : slow (~10 min), accurate, with pixel-error map (~100 samples) Returns: - X_seg (ndarray) : recovered binary field (1 = neutral and 0 = ionized regions) - X_err (ndarray) : pixel-error map of the recovered binary field Example: $ from tools21cm import segmentation $ seg = segmentation.segunet21cm(tta=1, verbose=True) # load model (need to be done once) $ Xseg, Xseg_err = seg.prediction(x=dT3) Print of the Network's Configuration file: [TRAINING] BATCH_SIZE = 64 AUGMENT = NOISESMT IMG_SHAPE = 128, 128 CHAN_SIZE = 256 DROPOUT = 0.05 KERNEL_SIZE = 3 EPOCHS = 100 LOSS = balanced_cross_entropy METRICS = iou, dice_coef, binary_accuracy, binary_crossentropy LR = 1e-3 RECOMP = False GPUS = 2 PATH = /home/michele/Documents/PhD_Sussex/output/ML/dataset/inputs/data2D_128_030920/ [RESUME] RESUME_PATH = /home/michele/Documents/PhD_Sussex/output/ML/dataset/outputs/new/02-10T23-52-36_128slice/ BEST_EPOCH = 56 RESUME_EPOCH = 66 """ self.TTA = tta self.VERBOSE = verbose if(self.TTA == 2): # slow self.MANIP = self.IndependentOperations(verbose=self.VERBOSE) elif(self.TTA == 1): # fast self.MANIP = {'opt0': [lambda a: a, 0, 0]} elif(self.TTA == 0): # super-fast self.MANIP = {'opt0': [lambda a: a, 0, 0]} self.NR_MANIP = len(self.MANIP) # load model MODEL_NAME = pkg_resources.resource_filename('t2c', 'input_data/segunet_02-10T23-52-36_128slice_ep56.h5') if (os.path.exists(MODEL_NAME)): pass else: if(self.VERBOSE): print(' Download network weights: %s' %MODEL_NAME) MODEL_EPOCH = 56 METRICS = {'balanced_cross_entropy':balanced_cross_entropy, 'iou':iou, 'dice_coef':dice_coef} self.MODEL_LOADED = load_model(MODEL_NAME, custom_objects=METRICS) if(self.VERBOSE): print(' Loaded model: %s' %MODEL_NAME) def UniqueRows(self, arr): """ Remove duplicate row array in 2D data - arr (narray): array with duplicate row Example: >> d = np.array([[0,1,2],[0,1,2],[0,0,0],[0,0,2],[0,1,2]]) >> UniqueRows(d) array([[0, 0, 0], [0, 0, 2], [0, 1, 2]]) """ arr = np.array(arr) if(arr.ndim == 2): arr = np.ascontiguousarray(arr) unique_arr = np.unique(arr.view([('', arr.dtype)]*arr.shape[1])) new_arr = unique_arr.view(arr.dtype).reshape((unique_arr.shape[0], arr.shape[1])) elif(arr.ndim == 1): new_arr = np.array(list(dict.fromkeys(arr))) return new_arr def IndependentOperations(self, verbose=False): ''' How many unique manipulations (horzontal and vertical flip, rotation, etc...) can we operate on a cube? Each indipendent operation is considered as an additional rappresentation of the same coeval data, so that it can be considered for errorbar with SegU-Net ''' data = np.array(range(3**3)).reshape((3,3,3)) func = [lambda a: a, np.fliplr, np.flipud, lambda a: np.flipud(np.fliplr(a)), lambda a: np.fliplr(np.flipud(a))] axis = [0,1,2] angl_rot = [0,1,2,3] tot_manipl_data_flat = np.zeros((len(func)*len(axis)*len(angl_rot), data.size)) tot_operations = {'opt%d' %k:[] for k in range(0,len(func)*len(axis)*len(angl_rot))} i = 0 for f in func: cube = f(data) for rotax in axis: ax_tup = [0,1,2] ax_tup.remove(rotax) for rot in angl_rot: tot_manipl_data_flat[i] = np.rot90(cube, k=rot, axes=ax_tup).flatten() # function, axis of rotation, angle of rotation, slice index tot_operations['opt%d' %i] = [f, rotax, rot] i += 1 uniq_manipl_data_flat = self.UniqueRows(tot_manipl_data_flat).astype(int) uniq_operations = {} for iumdf, uniq_mdf in enumerate(uniq_manipl_data_flat): for itmdf, tot_mdf in enumerate(tot_manipl_data_flat): if(all(uniq_mdf == tot_mdf)): uniq_operations['opt%d' %iumdf] = tot_operations['opt%d' %itmdf] break assert uniq_manipl_data_flat.shape[0] == len(uniq_operations) if(verbose): print('tot number of (unique) manipulation we can do on a cube: %d' %(len(uniq_operations))) return uniq_operations def prediction(self, x): img_shape = x.shape if(self.TTA == 2): X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape))) elif(self.TTA == 1): X_tta = np.zeros((np.append(3*len(self.MANIP), img_shape))) elif(self.TTA == 0): X_tta = np.zeros((np.append(len(self.MANIP), img_shape))) if(self.VERBOSE): loop = tqdm(range(len(self.MANIP))) else: loop = range(len(self.MANIP)) for iopt in loop: opt, rotax, rot = self.MANIP['opt%d' %iopt] ax_tup = [0,1,2] ax_tup.remove(rotax) cube = np.rot90(opt(x), k=rot, axes=ax_tup) X = cube[np.newaxis, ..., np.newaxis] for j in range(img_shape[0]): if(self.TTA == 0): X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze() else: X_tta[iopt,j,:,:] = self.MODEL_LOADED.predict(X[:,j,:,:,:], verbose=0).squeeze() X_tta[iopt+len(self.MANIP),:,j,:] = self.MODEL_LOADED.predict(X[:,:,j,:,:], verbose=0).squeeze() X_tta[iopt+len(self.MANIP)*2,:,:,j] = self.MODEL_LOADED.predict(X[:,:,:,j,:], verbose=0).squeeze() for itta in range(X_tta.shape[0]): opt, rotax, rot = self.MANIP['opt%d' %(itta%len(self.MANIP))] ax_tup = [0,1,2] ax_tup.remove(rotax) X_tta[itta] = opt(np.rot90(X_tta[itta], k=-rot, axes=ax_tup)) X_seg = np.round(np.mean(X_tta, axis=0)) X_err = np.std(X_tta, axis=0) return X_seg, X_err
[ [ [ 55, 66 ], [ 6418, 6420 ], [ 6478, 6480 ], [ 6529, 6531 ], [ 6726, 6728 ], [ 7168, 7170 ], [ 7254, 7256 ], [ 7282, 7284 ], [ 7483, 7485 ], [ 7888, 7890 ], [ 8854, 8856 ], [ 8864, 8866 ], [ 8955, 8957 ], [ 8965, 8967 ], [ 9056, 9058 ], [ 9066, 9068 ], [ 9411, 9413 ], [ 9470, 9472 ], [ 9487, 9489 ], [ 10248, 10250 ], [ 10309, 10311 ], [ 10318, 10320 ], [ 10358, 10360 ], [ 7320, 7322 ], [ 7330, 7332 ], [ 7371, 7373 ], [ 7381, 7383 ] ], [ [ 68, 81 ], [ 5487, 5500 ] ], [ [ 99, 103 ], [ 9160, 9164 ] ], [ [ 112, 128 ], [ 1430, 1432 ], [ 1655, 1657 ], [ 1666, 1668 ], [ 1694, 1696 ], [ 1735, 1737 ], [ 1760, 1762 ], [ 1792, 1794 ] ], [ [ 165, 175 ], [ 5890, 5900 ] ], [ [ 205, 217 ], [ 1833, 1834 ], [ 2311, 2312 ], [ 2317, 2318 ], [ 2396, 2397 ], [ 2412, 2413 ], [ 2559, 2560 ], [ 2568, 2569 ], [ 2820, 2821 ], [ 2826, 2827 ], [ 2902, 2903 ], [ 2908, 2909 ], [ 2931, 2932 ], [ 2937, 2938 ] ], [ [ 252, 258 ], [ 510, 516 ] ], [ [ 299, 302 ], [ 607, 610 ], [ 690, 693 ], [ 753, 756 ] ], [ [ 338, 347 ], [ 1046, 1055 ], [ 1152, 1161 ], [ 1215, 1224 ] ], [ [ 383, 391 ], [ 1334, 1342 ], [ 1349, 1357 ] ], [ [ 398, 440 ], [ 1873, 1915 ] ], [ [ 1474, 1496 ], [ 5803, 5825 ] ], [ [ 1963, 1966 ], [ 5833, 5836 ] ], [ [ 2620, 2629 ], [ 5850, 5859 ] ], [ [ 3043, 3054 ] ] ]
# coding: utf-8 """ Sunshine Conversations API The version of the OpenAPI document: 9.4.5 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from sunshine_conversations_client.configuration import Configuration from sunshine_conversations_client.undefined import Undefined class UserTruncated(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'id': 'str', 'external_id': 'str' } attribute_map = { 'id': 'id', 'external_id': 'externalId' } nulls = set() def __init__(self, id=None, external_id=Undefined(), local_vars_configuration=None): # noqa: E501 """UserTruncated - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._id = None self._external_id = None self.discriminator = None if id is not None: self.id = id self.external_id = external_id @property def id(self): """Gets the id of this UserTruncated. # noqa: E501 The unique ID of the user. # noqa: E501 :return: The id of this UserTruncated. # noqa: E501 :rtype: str """ return self._id @id.setter def id(self, id): """Sets the id of this UserTruncated. The unique ID of the user. # noqa: E501 :param id: The id of this UserTruncated. # noqa: E501 :type: str """ self._id = id @property def external_id(self): """Gets the external_id of this UserTruncated. # noqa: E501 An optional ID that can also be used to retrieve the user. # noqa: E501 :return: The external_id of this UserTruncated. # noqa: E501 :rtype: str """ return self._external_id @external_id.setter def external_id(self, external_id): """Sets the external_id of this UserTruncated. An optional ID that can also be used to retrieve the user. # noqa: E501 :param external_id: The external_id of this UserTruncated. # noqa: E501 :type: str """ if type(external_id) is Undefined: external_id = None self.nulls.discard("external_id") elif external_id is None: self.nulls.add("external_id") else: self.nulls.discard("external_id") self._external_id = external_id def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, UserTruncated): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, UserTruncated): return True return self.to_dict() != other.to_dict()
[ [ [ 162, 168 ], [ 3886, 3892 ] ], [ [ 176, 178 ] ], [ [ 201, 204 ], [ 3082, 3085 ] ], [ [ 263, 276 ], [ 1240, 1253 ] ], [ [ 329, 338 ], [ 1017, 1026 ], [ 2696, 2705 ] ], [ [ 347, 360 ], [ 4126, 4139 ], [ 4337, 4350 ] ] ]
import cv2 as cv import numpy as np import os def preprocess(labels_path, sep_labels_path): # list all files on labels_path labels_filenames = os.listdir(labels_path) count = 0 for label_filename in labels_filenames: label_path = os.path.join(labels_path, label_filename) print(f'segmenting {label_filename}') masks = segment_labels(label_path) for att in masks: mask = masks[att] path = f"{sep_labels_path}/{label_filename[:-4]}_{att}.png" print(f'{count} - writing {path}') cv.imwrite(path, mask) count += 1 # cv.imwrite(f'{label_filename[:-4]}_{mask}', mask) def segment_labels(label_path): atts = { "background": (0, 0, 0), "mouth": (255, 0, 0), "eyes": (0, 255, 0), "nose": (0, 0, 255), "face": (128, 128, 128), "hair": (255, 255, 0), "eyebrows": (255, 0, 255), "ears": (0, 255, 255), "teeth": (255, 255, 255), "beard": (255, 192, 192), "sunglasses": (0, 128, 128), } label = cv.imread(label_path) mask = np.zeros(label.shape, dtype=np.uint8) masks = {} for att in atts: color = atts[att] mask = cv.inRange(label, color, color) masks[att] = mask # cv.imshow(att, mask) # cv.waitKey(0) # cv.imwrite(f"{sep_labels_path}/{label_path[:-4]}_{att}.png", mask) return masks # separate_masks("./labels.png") preprocess("./organized_dataset/labels", "./organized_dataset/segmented_labels")
[ [ [ 7, 16 ], [ 576, 578 ], [ 1105, 1107 ], [ 1256, 1258 ] ], [ [ 24, 35 ], [ 1138, 1140 ], [ 1166, 1168 ] ], [ [ 43, 45 ], [ 152, 154 ], [ 256, 258 ] ], [ [ 51, 61 ], [ 1500, 1510 ] ], [ [ 689, 703 ], [ 361, 375 ] ] ]
"""EPR Socket interface.""" from __future__ import annotations import abc import logging from contextlib import contextmanager from typing import TYPE_CHECKING, Callable, ContextManager, List, Optional, Tuple, Union from netqasm.logging.glob import get_netqasm_logger from netqasm.qlink_compat import ( EPRRole, EPRType, LinkLayerOKTypeK, LinkLayerOKTypeM, LinkLayerOKTypeR, RandomBasis, TimeUnit, ) from netqasm.sdk.build_epr import EprMeasBasis, basis_to_rotation from netqasm.sdk.builder import EntRequestParams, EprKeepResult, EprMeasureResult from netqasm.sdk.futures import RegFuture from .qubit import FutureQubit, Qubit if TYPE_CHECKING: from netqasm.sdk import connection T_LinkLayerOkList = Union[ List[LinkLayerOKTypeK], List[LinkLayerOKTypeM], List[LinkLayerOKTypeR] ] class EPRSocket(abc.ABC): """EPR socket class. Used to generate entanglement with a remote node. An EPR socket represents a connection with a single remote node through which EPR pairs can be generated. Its main interfaces are the `create` and `recv` methods. A typical use case for two nodes is that they both create an EPR socket to the other node, and during the protocol, one of the nodes does `create` operations on its socket while the other node does `recv` operations. A `create` operation asks the network stack to initiate generation of EPR pairs with the remote node. Depending on the type of generation, the result of this operation can be qubit objects or measurement outcomes. A `recv` operation asks the network stack to wait for the remote node to initiate generation of EPR pairs. Again, the result can be qubit objects or measurement outcomes. Each `create` operation on one node must be matched by a `recv` operation on the other node. Since "creating" and "receiving" must happen at the same time, a node that is doing a `create` operation on its socket cannot advance until the other node does the corresponding `recv`. This is different from classical network sockets where a "send" operation (roughly anologous to `create` in an EPR socket) does not block on the remote node receiving it. An EPR socket is identified by a triple consisting of (1) the remote node ID, (2) the local socket ID and (3) the remote socket ID. Two nodes that want to generate EPR pairs with each other should make sure that the IDs in their local sockets match. """ def __init__( self, remote_app_name: str, epr_socket_id: int = 0, remote_epr_socket_id: int = 0, min_fidelity: int = 100, ): """Create an EPR socket. It still needs to be registered with the network stack separately. Registering and opening the EPR socket is currently done automatically by the connection that uses this EPR socket, specifically when a context is opened with that connection. :param remote_app_name: name of the remote party (i.e. the role, like "client", not necessarily the node name like "delft") :param epr_socket_id: local socket ID, defaults to 0 :param remote_epr_socket_id: remote socket ID, defaults to 0. Note that this must match with the local socket ID of the remote node's EPR socket. :param min_fidelity: minimum desired fidelity for EPR pairs generated over this socket, in percentages (i.e. range 0-100). Defaults to 100. """ self._conn: Optional[connection.BaseNetQASMConnection] = None self._remote_app_name: str = remote_app_name self._remote_node_id: Optional[ int ] = None # Gets set when the connection is set self._epr_socket_id: int = epr_socket_id self._remote_epr_socket_id: int = remote_epr_socket_id if ( not isinstance(min_fidelity, int) or (min_fidelity < 0) or min_fidelity > 100 ): raise ValueError( f"min_fidelity must be an integer in the range [0, 100], not {min_fidelity}" ) self._min_fidelity: int = min_fidelity self._logger: logging.Logger = get_netqasm_logger( f"{self.__class__.__name__}({self._remote_app_name}, {self._epr_socket_id})" ) @property def conn(self) -> connection.BaseNetQASMConnection: """Get the underlying :class:`NetQASMConnection`""" if self._conn is None: raise RuntimeError("EPRSocket does not have an open connection") return self._conn @conn.setter def conn(self, conn: connection.BaseNetQASMConnection): self._conn = conn self._remote_node_id = self._get_node_id(app_name=self._remote_app_name) @property def remote_app_name(self) -> str: """Get the remote application name""" return self._remote_app_name @property def remote_node_id(self) -> int: """Get the remote node ID""" if self._remote_node_id is None: raise RuntimeError("Remote Node ID has not been initialized") return self._remote_node_id @property def epr_socket_id(self) -> int: """Get the EPR socket ID""" return self._epr_socket_id @property def remote_epr_socket_id(self) -> int: """Get the remote EPR socket ID""" return self._remote_epr_socket_id @property def min_fidelity(self) -> int: """Get the desired minimum fidelity""" return self._min_fidelity def create_keep( self, number: int = 1, post_routine: Optional[Callable] = None, sequential: bool = False, time_unit: TimeUnit = TimeUnit.MICRO_SECONDS, max_time: int = 0, expect_phi_plus: bool = True, min_fidelity_all_at_end: Optional[int] = None, max_tries: Optional[int] = None, ) -> List[Qubit]: """Ask the network stack to generate EPR pairs with the remote node and keep them in memory. A `create_keep` operation must always be matched by a `recv_keep` operation on the remote node. If `sequential` is False (default), this operation returns a list of Qubit objects representing the local qubits that are each one half of the generated pairs. These qubits can then be manipulated locally just like locally initialized qubits, by e.g. applying gates or measuring them. Each qubit also contains information about the entanglement generation that lead to its creation, and can be accessed by its `entanglement_info` property. A typical example for just generating one pair with another node would be: .. code-block:: q = epr_socket.create_keep()[0] # `q` can now be used as a normal qubit If `sequential` is False (default), the all requested EPR pairs are generated at once, before returning the results (qubits or entanglement info objects). If `sequential` is True, a callback function (`post_routine`) should be specified. After generating one EPR pair, this callback will be called, before generating the next pair. This method can e.g. be used to generate many EPR pairs (more than the number of physical qubits available), by measuring (and freeing up) each qubit before the next pair is generated. For example: .. code-block:: outcomes = alice.new_array(num) def post_create(conn, q, pair): q.H() outcome = outcomes.get_future_index(pair) q.measure(outcome) epr_socket.create_keep(number=num, post_routine=post_create, sequential=True) :param number: number of EPR pairs to generate, defaults to 1 :param post_routine: callback function for each genated pair. Only used if `sequential` is True. The callback should take three arguments `(conn, q, pair)` where * `conn` is the connection (e.g. `self`) * `q` is the entangled qubit (of type `FutureQubit`) * `pair` is a register holding which pair is handled (0, 1, ...) :param sequential: whether to use callbacks after each pair, defaults to False :param time_unit: which time unit to use for the `max_time` parameter :param max_time: maximum number of time units (see `time_unit`) the Host is willing to wait for entanglement generation of a single pair. If generation does not succeed within this time, the whole subroutine that this request is part of is reset and run again by the quantum node controller. :param expect_phi_plus: whether to assume that the EPR pairs that are created are in the Phi+ (or Phi_00) state. Defaults to True. If True, the compiler will make sure that if the physical link actually produced another Bell state, the behavior seen by the application is still as if a Phi+ state was actually produced. :param min_fidelity_all_at_end: the minimum fidelity that *all* entangled qubits should ideally still have at the moment the last qubit has been generated. For example, when specifying `number=2` and `min_fidelity_all_at_end=80`, the the program will automatically try to make sure that both qubits have a fidelity of at least 80% when the second qubit has been generated. It will attempt to do this by automatically re-trying the entanglement generation if the fidelity constraint is not satisfied. This is however an *attempt*, and not a guarantee!. :param max_tries: maximum number of re-tries should be made to try and achieve the `min_fidelity_all_at_end` constraint. :return: list of qubits created """ qubits, _ = self.conn.builder.sdk_create_epr_keep( params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=post_routine, sequential=sequential, time_unit=time_unit, max_time=max_time, expect_phi_plus=expect_phi_plus, min_fidelity_all_at_end=min_fidelity_all_at_end, max_tries=max_tries, ), ) return qubits def create_keep_with_info( self, number: int = 1, post_routine: Optional[Callable] = None, sequential: bool = False, time_unit: TimeUnit = TimeUnit.MICRO_SECONDS, max_time: int = 0, expect_phi_plus: bool = True, min_fidelity_all_at_end: Optional[int] = None, ) -> Tuple[List[Qubit], List[EprKeepResult]]: """Same as create_keep but also return the EPR generation information coming from the network stack. For more information see the documentation of `create_keep`. :param number: number of pairs to generate, defaults to 1 :return: tuple with (1) list of qubits created, (2) list of EprKeepResult objects """ qubits, info = self.conn._builder.sdk_create_epr_keep( params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=post_routine, sequential=sequential, time_unit=time_unit, max_time=max_time, expect_phi_plus=expect_phi_plus, min_fidelity_all_at_end=min_fidelity_all_at_end, ), ) return qubits, info def create_measure( self, number: int = 1, time_unit: TimeUnit = TimeUnit.MICRO_SECONDS, max_time: int = 0, expect_phi_plus: bool = True, basis_local: EprMeasBasis = None, basis_remote: EprMeasBasis = None, rotations_local: Tuple[int, int, int] = (0, 0, 0), rotations_remote: Tuple[int, int, int] = (0, 0, 0), random_basis_local: Optional[RandomBasis] = None, random_basis_remote: Optional[RandomBasis] = None, ) -> List[EprMeasureResult]: """Ask the network stack to generate EPR pairs with the remote node and measure them immediately (on both nodes). A `create_measure` operation must always be matched by a `recv_measure` operation on the remote node. This operation returns a list of Linklayer response objects. These objects contain information about the entanglement generation and includes the measurement outcome and basis used. Note that all values are `Future` objects. This means that the current subroutine must be flushed before the values become defined. An example for generating 10 pairs with another node that are immediately measured: .. code-block:: # list of Futures that become defined when subroutine is flushed outcomes = [] with NetQASMConnection("alice", epr_sockets=[epr_socket]): ent_infos = epr_socket.create(number=10, tp=EPRType.M) for ent_info in ent_infos: outcomes.append(ent_info.measurement_outcome) The basis to measure in can also be specified. There are 3 ways to specify a basis: * using one of the `EprMeasBasis` variants * by specifying 3 rotation angles, interpreted as an X-rotation, a Y-rotation and another X-rotation. For example, setting `rotations_local` to (8, 0, 0) means that before measuring, an X-rotation of 8*pi/16 = pi/2 radians is applied to the qubit. * using one of the `RandomBasis` variants, in which case one of the bases of that variant is chosen at random just before measuring NOTE: the node that initiates the entanglement generation, i.e. the one that calls `create` on its EPR socket, also controls the measurement bases of the receiving node (by setting e.g. `rotations_remote`). The receiving node cannot change this. :param number: number of EPR pairs to generate, defaults to 1 :param time_unit: which time unit to use for the `max_time` parameter :param max_time: maximum number of time units (see `time_unit`) the Host is willing to wait for entanglement generation of a single pair. If generation does not succeed within this time, the whole subroutine that this request is part of is reset and run again by the quantum node controller. :param expect_phi_plus: whether to assume that the EPR pairs that are created are in the Phi+ (or Phi_00) state. Defaults to True. If True, the compiler will make sure that if the physical link actually produced another Bell state, the behavior seen by the application is still as if a Phi+ state was actually produced. :param basis_local: basis to measure in on this node for M-type requests :param basis_remote: basis to measure in on the remote node for M-type requests :param rotations_local: rotations to apply before measuring on this node :param rotations_remote: rotations to apply before measuring on remote node :param random_basis_local: random bases to choose from when measuring on this node :param random_basis_remote: random bases to choose from when measuring on the remote node :return: list of entanglement info objects per created pair. """ if basis_local is not None: rotations_local = basis_to_rotation(basis_local) if basis_remote is not None: rotations_remote = basis_to_rotation(basis_remote) return self.conn.builder.sdk_create_epr_measure( params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=None, sequential=False, time_unit=time_unit, max_time=max_time, expect_phi_plus=expect_phi_plus, random_basis_local=random_basis_local, random_basis_remote=random_basis_remote, rotations_local=rotations_local, rotations_remote=rotations_remote, ), ) def create_rsp( self, number: int = 1, time_unit: TimeUnit = TimeUnit.MICRO_SECONDS, max_time: int = 0, expect_phi_plus: bool = True, basis_local: EprMeasBasis = None, rotations_local: Tuple[int, int, int] = (0, 0, 0), random_basis_local: Optional[RandomBasis] = None, min_fidelity_all_at_end: Optional[int] = None, ) -> List[EprMeasureResult]: """Ask the network stack to do remote preparation with the remote node. A `create_rsp` operation must always be matched by a `recv_erp` operation on the remote node. This operation returns a list of Linklayer response objects. These objects contain information about the entanglement generation and includes the measurement outcome and basis used. Note that all values are `Future` objects. This means that the current subroutine must be flushed before the values become defined. An example for generating 10 pairs with another node that are immediately measured: .. code-block:: m: LinkLayerOKTypeM = epr_socket.create_rsp(tp=EPRType.R)[0] print(m.measurement_outcome) # remote node now has a prepared qubit The basis to measure in can also be specified. There are 3 ways to specify a basis: * using one of the `EprMeasBasis` variants * by specifying 3 rotation angles, interpreted as an X-rotation, a Y-rotation and another X-rotation. For example, setting `rotations_local` to (8, 0, 0) means that before measuring, an X-rotation of 8*pi/16 = pi/2 radians is applied to the qubit. * using one of the `RandomBasis` variants, in which case one of the bases of that variant is chosen at random just before measuring :param number: number of EPR pairs to generate, defaults to 1 :param time_unit: which time unit to use for the `max_time` parameter :param max_time: maximum number of time units (see `time_unit`) the Host is willing to wait for entanglement generation of a single pair. If generation does not succeed within this time, the whole subroutine that this request is part of is reset and run again by the quantum node controller. :param expect_phi_plus: whether to assume that the EPR pairs that are created are in the Phi+ (or Phi_00) state. Defaults to True. If True, the compiler will make sure that if the physical link actually produced another Bell state, the behavior seen by the application is still as if a Phi+ state was actually produced. :param basis_local: basis to measure in on this node for M-type requests :param basis_remote: basis to measure in on the remote node for M-type requests :param rotations_local: rotations to apply before measuring on this node :param rotations_remote: rotations to apply before measuring on remote node :param random_basis_local: random bases to choose from when measuring on this node :param random_basis_remote: random bases to choose from when measuring on the remote node :return: list of entanglement info objects per created pair. """ if basis_local is not None: rotations_local = basis_to_rotation(basis_local) return self.conn.builder.sdk_create_epr_rsp( params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=None, sequential=False, time_unit=time_unit, max_time=max_time, expect_phi_plus=expect_phi_plus, random_basis_local=random_basis_local, rotations_local=rotations_local, min_fidelity_all_at_end=min_fidelity_all_at_end, ) ) def create( self, number: int = 1, post_routine: Optional[Callable] = None, sequential: bool = False, tp: EPRType = EPRType.K, time_unit: TimeUnit = TimeUnit.MICRO_SECONDS, max_time: int = 0, basis_local: EprMeasBasis = None, basis_remote: EprMeasBasis = None, rotations_local: Tuple[int, int, int] = (0, 0, 0), rotations_remote: Tuple[int, int, int] = (0, 0, 0), random_basis_local: Optional[RandomBasis] = None, random_basis_remote: Optional[RandomBasis] = None, ) -> Union[List[Qubit], List[EprMeasureResult], List[LinkLayerOKTypeM]]: """Ask the network stack to generate EPR pairs with the remote node. A `create` operation must always be matched by a `recv` operation on the remote node. If the type of request is Create and Keep (CK, or just K) and if `sequential` is False (default), this operation returns a list of Qubit objects representing the local qubits that are each one half of the generated pairs. These qubits can then be manipulated locally just like locally initialized qubits, by e.g. applying gates or measuring them. Each qubit also contains information about the entanglement generation that lead to its creation, and can be accessed by its `entanglement_info` property. A typical example for just generating one pair with another node would be: .. code-block:: q = epr_socket.create()[0] # `q` can now be used as a normal qubit If the type of request is Measure Directly (MD, or just M), this operation returns a list of Linklayer response objects. These objects contain information about the entanglement generation and includes the measurement outcome and basis used. Note that all values are `Future` objects. This means that the current subroutine must be flushed before the values become defined. An example for generating 10 pairs with another node that are immediately measured: .. code-block:: # list of Futures that become defined when subroutine is flushed outcomes = [] with NetQASMConnection("alice", epr_sockets=[epr_socket]): ent_infos = epr_socket.create(number=10, tp=EPRType.M) for ent_info in ent_infos: outcomes.append(ent_info.measurement_outcome) For "Measure Directly"-type requests, the basis to measure in can also be specified. There are 3 ways to specify a basis: * using one of the `EprMeasBasis` variants * by specifying 3 rotation angles, interpreted as an X-rotation, a Y-rotation and another X-rotation. For example, setting `rotations_local` to (8, 0, 0) means that before measuring, an X-rotation of 8*pi/16 = pi/2 radians is applied to the qubit. * using one of the `RandomBasis` variants, in which case one of the bases of that variant is chosen at random just before measuring NOTE: the node that initiates the entanglement generation, i.e. the one that calls `create` on its EPR socket, also controls the measurement bases of the receiving node (by setting e.g. `rotations_remote`). The receiving node cannot change this. If `sequential` is False (default), the all requested EPR pairs are generated at once, before returning the results (qubits or entanglement info objects). If `sequential` is True, a callback function (`post_routine`) should be specified. After generating one EPR pair, this callback will be called, before generating the next pair. This method can e.g. be used to generate many EPR pairs (more than the number of physical qubits available), by measuring (and freeing up) each qubit before the next pair is generated. For example: .. code-block:: outcomes = alice.new_array(num) def post_create(conn, q, pair): q.H() outcome = outcomes.get_future_index(pair) q.measure(outcome) epr_socket.create(number=num, post_routine=post_create, sequential=True) :param number: number of EPR pairs to generate, defaults to 1 :param post_routine: callback function for each genated pair. Only used if `sequential` is True. The callback should take three arguments `(conn, q, pair)` where * `conn` is the connection (e.g. `self`) * `q` is the entangled qubit (of type `FutureQubit`) * `pair` is a register holding which pair is handled (0, 1, ...) :param sequential: whether to use callbacks after each pair, defaults to False :param tp: type of entanglement generation, defaults to EPRType.K. Note that corresponding `recv` of the remote node's EPR socket must specify the same type. :param time_unit: which time unit to use for the `max_time` parameter :param max_time: maximum number of time units (see `time_unit`) the Host is willing to wait for entanglement generation of a single pair. If generation does not succeed within this time, the whole subroutine that this request is part of is reset and run again by the quantum node controller. :param basis_local: basis to measure in on this node for M-type requests :param basis_remote: basis to measure in on the remote node for M-type requests :param rotations_local: rotations to apply before measuring on this node (for M-type requests) :param rotations_remote: rotations to apply before measuring on remote node (for M-type requests) :param random_basis_local: random bases to choose from when measuring on this node (for M-type requests) :param random_basis_remote: random bases to choose from when measuring on the remote node (for M-type requests) :return: For K-type requests: list of qubits created. For M-type requests: list of entanglement info objects per created pair. """ self._logger.warning( "EPRSocket.create() is deprecated. Use one of " "create_keep, create_measure, or create_rsp instead." ) if tp == EPRType.K: return self.create_keep( number=number, post_routine=post_routine, sequential=sequential, time_unit=time_unit, max_time=max_time, ) elif tp == EPRType.M: return self.create_measure( number=number, time_unit=time_unit, max_time=max_time, basis_local=basis_local, basis_remote=basis_remote, rotations_local=rotations_local, rotations_remote=rotations_remote, random_basis_local=random_basis_local, random_basis_remote=random_basis_remote, ) elif tp == EPRType.R: return self.create_rsp( number=number, time_unit=time_unit, max_time=max_time, basis_local=basis_local, random_basis_local=random_basis_local, ) assert False def create_context( self, number: int = 1, sequential: bool = False, time_unit: TimeUnit = TimeUnit.MICRO_SECONDS, max_time: int = 0, ) -> ContextManager[Tuple[FutureQubit, RegFuture]]: """Create a context that is executed for each generated EPR pair consecutively. Creates EPR pairs with a remote node and handles each pair by the operations defined in a subsequent context. See the example below. .. code-block:: with epr_socket.create_context(number=10) as (q, pair): q.H() m = q.measure() NOTE: even though all pairs are handled consecutively, they are still generated concurrently by the network stack. By setting `sequential` to True, the network stack only generates the next pair after the context for the previous pair has been executed, similar to using a callback (`post_routine`) in the `create` method. :param number: number of EPR pairs to generate, defaults to 1 :param sequential: whether to generate pairs sequentially, defaults to False """ return self.conn.builder.sdk_create_epr_context( params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=None, sequential=sequential, time_unit=time_unit, max_time=max_time, ) ) def recv_keep( self, number: int = 1, post_routine: Optional[Callable] = None, sequential: bool = False, min_fidelity_all_at_end: Optional[int] = None, max_tries: Optional[int] = None, ) -> List[Qubit]: """Ask the network stack to wait for the remote node to generate EPR pairs, which are kept in memory. A `recv_keep` operation must always be matched by a `create_keep` operation on the remote node. The number of generated pairs must also match. For more information see the documentation of `create_keep`. :param number: number of pairs to generate, defaults to 1 :param post_routine: callback function used when `sequential` is True :param sequential: whether to call the callback after each pair generation, defaults to False :param min_fidelity_all_at_end: the minimum fidelity that *all* entangled qubits should ideally still have at the moment the last qubit has been generated. For example, when specifying `number=2` and `min_fidelity_all_at_end=80`, the the program will automatically try to make sure that both qubits have a fidelity of at least 80% when the second qubit has been generated. It will attempt to do this by automatically re-trying the entanglement generation if the fidelity constraint is not satisfied. This is however an *attempt*, and not a guarantee!. :param max_tries: maximum number of re-tries should be made to try and achieve the `min_fidelity_all_at_end` constraint. :return: list of qubits created """ if self.conn is None: raise RuntimeError("EPRSocket does not have an open connection") qubits, _ = self.conn._builder.sdk_recv_epr_keep( params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=post_routine, sequential=sequential, min_fidelity_all_at_end=min_fidelity_all_at_end, max_tries=max_tries, ), ) return qubits def recv_keep_with_info( self, number: int = 1, post_routine: Optional[Callable] = None, sequential: bool = False, min_fidelity_all_at_end: Optional[int] = None, max_tries: Optional[int] = None, ) -> Tuple[List[Qubit], List[EprKeepResult]]: """Same as recv_keep but also return the EPR generation information coming from the network stack. For more information see the documentation of `recv_keep`. :param number: number of pairs to generate, defaults to 1 :return: tuple with (1) list of qubits created, (2) list of EprKeepResult objects """ qubits, info = self.conn._builder.sdk_recv_epr_keep( params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=post_routine, sequential=sequential, min_fidelity_all_at_end=min_fidelity_all_at_end, max_tries=max_tries, ), ) return qubits, info def recv_measure( self, number: int = 1, ) -> List[EprMeasureResult]: """Ask the network stack to wait for the remote node to generate EPR pairs, which are immediately measured (on both nodes). A `recv_measure` operation must always be matched by a `create_measure` operation on the remote node. The number and type of generation must also match. For more information see the documentation of `create_measure`. :param number: number of pairs to generate, defaults to 1 :param post_routine: callback function used when `sequential` is True :param sequential: whether to call the callback after each pair generation, defaults to False :return: list of entanglement info objects per created pair. """ if self.conn is None: raise RuntimeError("EPRSocket does not have an open connection") return self.conn.builder.sdk_recv_epr_measure( params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=None, sequential=False, ), ) def recv_rsp( self, number: int = 1, min_fidelity_all_at_end: Optional[int] = None, max_tries: Optional[int] = None, ) -> List[Qubit]: """Ask the network stack to wait for remote state preparation from another node. A `recv_rsp` operation must always be matched by a `create_rsp` operation on the remote node. The number and type of generation must also match. For more information see the documentation of `create_rsp`. :param number: number of pairs to generate, defaults to 1 :return: list of qubits created """ if self.conn is None: raise RuntimeError("EPRSocket does not have an open connection") qubits, _ = self.conn.builder.sdk_recv_epr_rsp( params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=None, sequential=False, min_fidelity_all_at_end=min_fidelity_all_at_end, max_tries=max_tries, ), ) return qubits def recv_rsp_with_info( self, number: int = 1, min_fidelity_all_at_end: Optional[int] = None, max_tries: Optional[int] = None, ) -> Tuple[List[Qubit], List[EprKeepResult]]: """Same as recv_rsp but also return the EPR generation information coming from the network stack. For more information see the documentation of `recv_rsp`. :param number: number of pairs to generate, defaults to 1 :return: tuple with (1) list of qubits created, (2) list of EprKeepResult objects """ if self.conn is None: raise RuntimeError("EPRSocket does not have an open connection") qubits, infos = self.conn.builder.sdk_recv_epr_rsp( params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=None, sequential=False, min_fidelity_all_at_end=min_fidelity_all_at_end, max_tries=max_tries, ), ) return qubits, infos def recv( self, number: int = 1, post_routine: Optional[Callable] = None, sequential: bool = False, tp: EPRType = EPRType.K, ) -> Union[List[Qubit], List[EprMeasureResult], List[LinkLayerOKTypeR]]: """Ask the network stack to wait for the remote node to generate EPR pairs. A `recv` operation must always be matched by a `create` operation on the remote node. See also the documentation of `create`. The number and type of generation must also match. In case of Measure Directly requests, it is the initiating node (that calls `create`) which specifies the measurement bases. This should not and cannot be done in `recv`. For more information see the documentation of `create`. :param number: number of pairs to generate, defaults to 1 :param post_routine: callback function used when `sequential` is True :param sequential: whether to call the callback after each pair generation, defaults to False :param tp: type of entanglement generation, defaults to EPRType.K :return: For K-type requests: list of qubits created. For M-type requests: list of entanglement info objects per created pair. """ self._logger.warning( "EPRSocket.recv() is deprecated. Use one of " "recv_keep, recv_measure, or recv_rsp instead." ) if tp == EPRType.K: return self.recv_keep( number=number, post_routine=post_routine, sequential=sequential, ) elif tp == EPRType.M: return self.recv_measure(number=number) elif tp == EPRType.R: return self.recv_rsp(number=number) assert False @contextmanager def recv_context( self, number: int = 1, sequential: bool = False, ): """Receives EPR pair with a remote node (see doc of :meth:`~.create_context`)""" try: # NOTE loop_register is the register used for looping over the generated pairs ( pre_commands, loop_register, ent_results_array, output, pair, ) = self.conn.builder._pre_epr_context( role=EPRRole.RECV, params=EntRequestParams( remote_node_id=self.remote_node_id, epr_socket_id=self._epr_socket_id, number=number, post_routine=None, sequential=sequential, ), ) yield output, pair finally: self.conn.builder._post_epr_context( pre_commands=pre_commands, number=number, loop_register=loop_register, ent_results_array=ent_results_array, pair=pair, ) def _get_node_id(self, app_name: str) -> int: return self.conn.network_info.get_node_id_for_app(app_name=app_name)
[ [ [ 52, 63 ] ], [ [ 72, 75 ], [ 841, 844 ] ], [ [ 83, 90 ], [ 4207, 4214 ] ], [ [ 114, 128 ], [ 38529, 38543 ] ], [ [ 148, 161 ], [ 664, 677 ] ], [ [ 163, 171 ], [ 5657, 5665 ], [ 10652, 10660 ], [ 20807, 20815 ], [ 29851, 29859 ], [ 32146, 32154 ], [ 36792, 36800 ] ], [ [ 173, 187 ], [ 28398, 28412 ] ], [ [ 189, 193 ], [ 750, 754 ], [ 774, 778 ], [ 798, 802 ], [ 5933, 5937 ], [ 10893, 10897 ], [ 10906, 10910 ], [ 12352, 12356 ], [ 17074, 17078 ], [ 21309, 21313 ], [ 21322, 21326 ], [ 21346, 21350 ], [ 30008, 30012 ], [ 32309, 32313 ], [ 32322, 32326 ], [ 33236, 33240 ], [ 34574, 34578 ], [ 35758, 35762 ], [ 35771, 35775 ], [ 36892, 36896 ], [ 36905, 36909 ], [ 36929, 36933 ] ], [ [ 195, 203 ], [ 3534, 3542 ], [ 3667, 3675 ], [ 5648, 5656 ], [ 5861, 5869 ], [ 5902, 5910 ], [ 10643, 10651 ], [ 10856, 10864 ], [ 12254, 12262 ], [ 12313, 12321 ], [ 16980, 16988 ], [ 17043, 17051 ], [ 20798, 20806 ], [ 21205, 21213 ], [ 21264, 21272 ], [ 29842, 29850 ], [ 29936, 29944 ], [ 29977, 29985 ], [ 32137, 32145 ], [ 32231, 32239 ], [ 32272, 32280 ], [ 34502, 34510 ], [ 34543, 34551 ], [ 35680, 35688 ], [ 35721, 35729 ], [ 36783, 36791 ] ], [ [ 205, 210 ], [ 10887, 10892 ], [ 12132, 12137 ], [ 12192, 12197 ], [ 16918, 16923 ], [ 21083, 21088 ], [ 21143, 21148 ], [ 28413, 28418 ], [ 32303, 32308 ], [ 35752, 35757 ] ], [ [ 212, 217 ], [ 739, 744 ], [ 21303, 21308 ], [ 36886, 36891 ] ], [ [ 252, 270 ], [ 4224, 4242 ] ], [ [ 310, 317 ], [ 39068, 39075 ] ], [ [ 323, 330 ], [ 20881, 20888 ], [ 36866, 36873 ], [ 20871, 20878 ], [ 27180, 27187 ], [ 27446, 27453 ], [ 27929, 27936 ], [ 36856, 36863 ], [ 38169, 38176 ], [ 38361, 38368 ], [ 38443, 38450 ] ], [ [ 336, 352 ], [ 755, 771 ] ], [ [ 358, 374 ], [ 779, 795 ], [ 21351, 21367 ] ], [ [ 380, 396 ], [ 803, 819 ], [ 36934, 36950 ] ], [ [ 402, 413 ], [ 12263, 12274 ], [ 12322, 12333 ], [ 16989, 17000 ], [ 21214, 21225 ], [ 21273, 21284 ] ], [ [ 419, 427 ], [ 5739, 5747 ], [ 10734, 10742 ], [ 11933, 11941 ], [ 16762, 16770 ], [ 20922, 20930 ], [ 28338, 28346 ], [ 5728, 5736 ], [ 10723, 10731 ], [ 11922, 11930 ], [ 16751, 16759 ], [ 20911, 20919 ], [ 28327, 28335 ] ], [ [ 465, 477 ], [ 12043, 12055 ], [ 12086, 12098 ], [ 16872, 16884 ], [ 20994, 21006 ], [ 21037, 21049 ] ], [ [ 479, 496 ], [ 15885, 15902 ], [ 15984, 16001 ], [ 20081, 20098 ] ], [ [ 529, 545 ], [ 10046, 10062 ], [ 11366, 11382 ], [ 16093, 16109 ], [ 20185, 20201 ], [ 29439, 29455 ], [ 31663, 31679 ], [ 32776, 32792 ], [ 34165, 34181 ], [ 35209, 35225 ], [ 36330, 36346 ], [ 39105, 39121 ] ], [ [ 547, 560 ], [ 10911, 10924 ], [ 32327, 32340 ], [ 35776, 35789 ] ], [ [ 562, 578 ], [ 12357, 12373 ], [ 17079, 17095 ], [ 21327, 21343 ], [ 33241, 33257 ], [ 36910, 36926 ] ], [ [ 611, 620 ], [ 28432, 28441 ] ], [ [ 641, 652 ], [ 28419, 28430 ] ], [ [ 654, 659 ], [ 5938, 5943 ], [ 10898, 10903 ], [ 21314, 21319 ], [ 30013, 30018 ], [ 32314, 32319 ], [ 34579, 34584 ], [ 35763, 35768 ], [ 36897, 36902 ] ], [ [ 707, 717 ], [ 3543, 3553 ], [ 4380, 4390 ], [ 4651, 4661 ] ], [ [ 719, 736 ] ], [ [ 831, 840 ] ] ]
# Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The view layer of logic for the BM gCal Assistant. The logic here defines the behavior of the webhook when messages are received from users messaging through Business Messages. """ import base64 import datetime import hashlib import json import os import uuid from businessmessages import businessmessages_v1_client as bm_client from businessmessages.businessmessages_v1_messages import ( BusinessmessagesConversationsMessagesCreateRequest, BusinessMessagesMessage, BusinessMessagesRepresentative, BusinessMessagesSuggestion, BusinessMessagesSuggestedReply, BusinessmessagesConversationsEventsCreateRequest, BusinessMessagesEvent, BusinessMessagesAuthenticationRequest, BusinessMessagesAuthenticationRequestOauth ) from django.http import HttpResponse from django.views.decorators.csrf import csrf_exempt from google_cal_app.models import Conversation from googleapiclient.discovery import build from oauth2client import client from oauth2client.service_account import ServiceAccountCredentials import requests # The location of the service account credentials SERVICE_ACCOUNT_LOCATION = 'resources/bm-agent-service-account-credentials.json' # Set of commands the bot understands CMD_LOGIN = 'login' CMD_MY_DAY_SUMMARY = 'day-summary' CMD_FOCUS_SPRINT_SLOTS = 'focus-sprints' CMD_CANCEL_ALL_MEETINGS = 'cancel-all-meetings' CMD_YES_CANCEL = 'yes-cancel' CMD_NO_CANCEL = 'no-do-not-cancel' # The representative type that all messages are sent as BOT_REPRESENTATIVE = BusinessMessagesRepresentative( representativeType=BusinessMessagesRepresentative .RepresentativeTypeValueValuesEnum.BOT, displayName='BM gCal Assistant', avatarImage='https://lh3.googleusercontent.com/9PMLInqtfgnRnV-9QUgYj8W-ZAutv-49KsYmHthZayM9YnCsd01P0eNhbqtu9QoIF31tKzgwo-x1oCkVIQas5Q' ) LARGE_DATE = datetime.datetime(9999, 12, 30, 12, 59, 59, 59) DATE_FORMAT = '%Y-%m-%dT%H:%M:%S' @csrf_exempt def callback(request): """Callback URL. Processes messages sent from the user. Args: request (HttpRequest): The request object that django passes to the function Returns: An :HttpResponse: containing browser renderable HTML. """ if request.method == 'POST': request_data = request.body.decode('utf8').replace("'", '"') request_body = json.loads(request_data) print('request_body: %s', request_body) # Extract the conversation id and message text conversation_id = request_body.get('conversationId') conv = get_conversation(conversation_id) print('conversation_id: %s', conversation_id) try: display_name = request_body.get('context').get('userInfo').get( 'displayName') except Exception as e: print(e) display_name = None # Check that the message and text body exist if 'message' in request_body and 'text' in request_body['message']: message = request_body['message']['text'] print('message: %s', message) route_message(message, conv) elif 'suggestionResponse' in request_body: message = request_body['suggestionResponse']['postbackData'] print('message: %s', message) route_message(message, conv) elif 'authenticationResponse' in request_body: try: auth_code = request_body.get('authenticationResponse').get('code') redirect_uri = request_body.get('authenticationResponse').get('redirectUri') print(f'redirect_uri extracted from authenticationResponse {redirect_uri}') # Exchange auth_code with OAuth provider and get access_token code_verifier = conv.code_verifier if code_verifier is None or auth_code is None: print('There was an error.') else: access_token = request_access_token(auth_code, code_verifier, redirect_uri) # Save the access token in an encrypted format using save_token send_day_summary_message(conv, access_token) except Exception as e: print(f'Login error: {e}') elif 'userStatus' in request_body: if 'isTyping' in request_body['userStatus']: print('User is typing') elif 'requestedLiveAgent' in request_body['userStatus']: print('User requested transfer to live agent') return HttpResponse('Response.') return HttpResponse('This webhook expects a POST request.') def request_access_token(auth_code, code_verifier, redirect_uri): """Requests access_token from identity provider. Args: auth_code (str): Authorization code to request access_token code_verifier (str): pair of code_challenge and code_verifier for PKCE. """ obj = { 'client_secret': os.environ['OAUTH_CLIENT_SECRET'], 'client_id': os.environ['OAUTH_CLIENT_ID'], 'grant_type': 'authorization_code', 'code': auth_code, 'code_verifier': code_verifier, 'redirect_uri': redirect_uri } res = requests.post('https://oauth2.googleapis.com/token', data=obj) res_dict = json.loads(res.text) access_token = res_dict.get('access_token') if access_token is None: print(f'Could not find the access token: {res.content}') return None print(f'We found the access_token.') return access_token def get_conversation(conversation_id): """Returns a google_cal_app.Conversation object. Args: conversation_id (str): The unique id for this user and agent. """ conv = Conversation.objects.filter(id=conversation_id) if not conv: return Conversation(id=conversation_id).save() else: return conv[0] def route_message(message, conv): """Routes the message received from the user to create a response. Args: message (str): The message text received from the user. conv (Conversation): The unique id for this user and agent. """ normalized_message = message.lower() print(f'Routing message: {normalized_message}') if normalized_message == CMD_LOGIN: invoke_login_chip(conv) else: echo_message(message, conv) def fetch_events(access_token, today): """Fetches events from Calendar API. Args: access_token (str): The user's access_token to query data with. today (datetime.Date): Date object representing todays date. Returns: event_items (list): A list of sorted event items. """ credentials = client.AccessTokenCredentials( access_token, 'USER_AGENT') service = build('calendar', 'v3', credentials=credentials) events = service.events().list( calendarId='primary', timeMax=f'{today}T23:59:59-07:00', timeMin=f'{today}T06:00:00-07:00').execute() event_items = events.get('items') event_items.sort( key=lambda x: LARGE_DATE if (x.get('start') is None or x.get('start').get('dateTime') is None) else datetime.datetime.strptime( x.get('start').get('dateTime')[:19], DATE_FORMAT)) print("Returning") return event_items def send_day_summary_message(conv, access_token): """Fetches calendar data with access_token and sends it to the conversation. Args: conv (Conversation): The unique id for this user and agent. """ try: print("Send summary of my day") today = str(datetime.datetime.now().date()) event_items = fetch_events(access_token, today) print(f"Events: {event_items}") event_set = set() event_list_message = '' for event in event_items: try: if event.get('status') == 'confirmed' and today in event.get( 'start').get('dateTime') and event.get( 'summary') not in event_set: event_list_message = event_list_message + '- ' + event.get( 'summary') + '\n' event_set.add(event.get('summary')) except Exception as e: print(f'Exception A: {e}') if len(event_set) > 4: message_obj = BusinessMessagesMessage( messageId=str(uuid.uuid4().int), representative=BOT_REPRESENTATIVE, text='Looks like you have a lot of meetings today!') send_message(message_obj, conv.id) message_obj = BusinessMessagesMessage( messageId=str(uuid.uuid4().int), representative=BOT_REPRESENTATIVE, text='Here\'s the list of items or your calendar...') send_message(message_obj, conv.id) message_obj = BusinessMessagesMessage( messageId=str(uuid.uuid4().int), representative=BOT_REPRESENTATIVE, suggestions=get_suggestions(), text=event_list_message) send_message(message_obj, conv.id) except Exception as e: print(f'Exception B: {e}') def invoke_login_chip(conv, message=None): """Invokes the login chip within the conversation. Args: conv (Conversation): The unique id for this user and agent. message (str): The message text received from the user. """ message = message or 'To see your calendar summary, please sign in!' message_id = str(uuid.uuid4()) # Generate a code_verifier and code_challenge used in the OAuth 2.0 PKCE flow. # code_challenge is shared with Google to send to kick start the auth flow # with the identity provider. Then exchange the auth_code along with the # code_verifier to the identity provider to get an access_token to make # requests on behalf of the user. random_val = str(uuid.uuid1()).encode() base64_random = base64.urlsafe_b64encode(random_val) code_verifier = base64_random.decode('utf-8') hashed_code_verifier = hashlib.sha256(code_verifier.encode('utf-8')).digest() utf8_decoded_verifier = base64.urlsafe_b64encode(hashed_code_verifier).decode( 'utf-8') code_challenge = utf8_decoded_verifier.replace('=', '') message_obj = BusinessMessagesMessage( messageId=str(uuid.uuid4().int), representative=BOT_REPRESENTATIVE, suggestions=get_auth_chip_suggestion( os.environ['OAUTH_CLIENT_ID'], code_challenge, ['profile','https://www.googleapis.com/auth/calendar.readonly']), text=message, fallback='Your device does not support suggestions') send_message(message_obj, conv.id) print(f'The code verifier is: {code_verifier}') conv.code_verifier = code_verifier conv.save() def echo_message(message, conv): """Sends the message received from the user back to the user. Args: message (str): The message text received from the user. conv (Conversation): The unique id for this user and agent. """ message_obj = BusinessMessagesMessage( messageId=str(uuid.uuid4().int), representative=BOT_REPRESENTATIVE, text=f"Hey! Here's the message you sent:\n\n{message}" ) send_message(message_obj, conv.id) def send_message(message, conversation_id): """Posts a message to the Business Messages API. Args: message (obj): The message object payload to send to the user. conversation_id (str): The unique id for this user and agent. """ credentials = ServiceAccountCredentials.from_json_keyfile_name( SERVICE_ACCOUNT_LOCATION, scopes=['https://www.googleapis.com/auth/businessmessages']) bm_credentials = bm_client.BusinessmessagesV1(credentials=credentials) # Send the typing started event create_request = BusinessmessagesConversationsEventsCreateRequest( eventId=str(uuid.uuid4().int), businessMessagesEvent=BusinessMessagesEvent( representative=BOT_REPRESENTATIVE, eventType=BusinessMessagesEvent.EventTypeValueValuesEnum.TYPING_STARTED ), parent='conversations/' + conversation_id) bm_client.BusinessmessagesV1.ConversationsEventsService( client=bm_credentials).Create(request=create_request) # Create the message request create_request = BusinessmessagesConversationsMessagesCreateRequest( businessMessagesMessage=message, parent='conversations/' + conversation_id) bm_client.BusinessmessagesV1.ConversationsMessagesService( client=bm_credentials).Create(request=create_request) # Send the typing stopped event create_request = BusinessmessagesConversationsEventsCreateRequest( eventId=str(uuid.uuid4().int), businessMessagesEvent=BusinessMessagesEvent( representative=BOT_REPRESENTATIVE, eventType=BusinessMessagesEvent.EventTypeValueValuesEnum.TYPING_STOPPED ), parent='conversations/' + conversation_id) bm_client.BusinessmessagesV1.ConversationsEventsService( client=bm_credentials).Create(request=create_request) def get_auth_chip_suggestion(client_id, code_challenge, scopes): """Returns an authorization chip Arguments: client_id (str): client_id from your client configuration with the identity provider code_challenge (str): code_challenge generated from the code_verifier for use with PKCE in OAuth 2.0 access_token exchange scopes (List): A list of scopes you want the access token to grant API access to Returns: A :list: BusinessMessagesSuggestions invoking the auth chip """ return [ BusinessMessagesSuggestion( authenticationRequest=BusinessMessagesAuthenticationRequest( oauth=BusinessMessagesAuthenticationRequestOauth( clientId=client_id, codeChallenge=code_challenge, scopes=scopes))), ] def get_suggestions(): """Creates a list of suggestions. Returns: A :list: A list of sample BusinessMessagesSuggestions. """ return [ BusinessMessagesSuggestion( reply=BusinessMessagesSuggestedReply( text='Let\'s do it again!', postbackData=CMD_LOGIN)), ] def landing_placeholder(request): """Creates an HttpResponse for a web request at the root of the project. Args: request (HttpRequest): The django web request object Returns: An :HttpResponse: containing browser renderable HTML. """ return HttpResponse(""" <h1>Welcome to gCal BM Assistant</h1> <br/><br/> Check out the <a href="https://business-communications.sandbox.google.com/console/"> Business Communications Developer Console</a> to access this agent's test URLs. """)
[ [ [ 790, 796 ], [ 9866, 9872 ], [ 10058, 10064 ] ], [ [ 804, 812 ], [ 2417, 2425 ], [ 7731, 7739 ], [ 7328, 7336 ] ], [ [ 820, 827 ], [ 9977, 9984 ] ], [ [ 835, 839 ], [ 2892, 2896 ], [ 5550, 5554 ] ], [ [ 847, 849 ], [ 5235, 5237 ], [ 5289, 5291 ], [ 10362, 10364 ] ], [ [ 857, 861 ], [ 8421, 8425 ], [ 8656, 8660 ], [ 8886, 8890 ], [ 9448, 9452 ], [ 9825, 9829 ], [ 10248, 10252 ], [ 11018, 11022 ], [ 11792, 11796 ], [ 12602, 12606 ] ], [ [ 892, 931 ], [ 11616, 11625 ], [ 12050, 12059 ], [ 12361, 12370 ], [ 12860, 12869 ] ], [ [ 996, 1046 ], [ 12218, 12268 ] ], [ [ 1052, 1075 ], [ 8372, 8395 ], [ 8609, 8632 ], [ 8839, 8862 ], [ 10203, 10226 ], [ 10973, 10996 ] ], [ [ 1077, 1107 ], [ 2095, 2125 ], [ 2150, 2180 ] ], [ [ 1113, 1139 ], [ 13530, 13556 ], [ 13932, 13958 ] ], [ [ 1141, 1171 ], [ 13976, 14006 ] ], [ [ 1177, 1225 ], [ 11724, 11772 ], [ 12534, 12582 ] ], [ [ 1227, 1248 ], [ 11839, 11860 ], [ 11927, 11948 ], [ 12649, 12670 ], [ 12737, 12758 ] ], [ [ 1254, 1291 ], [ 13588, 13625 ] ], [ [ 1293, 1335 ], [ 13645, 13687 ] ], [ [ 1362, 1374 ], [ 4834, 4846 ], [ 4870, 4882 ], [ 14347, 14359 ] ], [ [ 1416, 1427 ], [ 2502, 2513 ] ], [ [ 1462, 1474 ], [ 5967, 5979 ], [ 6041, 6053 ] ], [ [ 1513, 1518 ], [ 6946, 6951 ] ], [ [ 1544, 1550 ], [ 6869, 6875 ] ], [ [ 1592, 1617 ], [ 11447, 11472 ] ], [ [ 1626, 1634 ], [ 5473, 5481 ] ], [ [ 1686, 1710 ], [ 11503, 11527 ] ], [ [ 1806, 1815 ], [ 6474, 6483 ], [ 14063, 14072 ] ], [ [ 1826, 1844 ] ], [ [ 1862, 1884 ] ], [ [ 1903, 1926 ] ], [ [ 1952, 1966 ] ], [ [ 1982, 1995 ] ], [ [ 2074, 2092 ], [ 8465, 8483 ], [ 8698, 8716 ], [ 8928, 8946 ], [ 10288, 10306 ], [ 11058, 11076 ], [ 11887, 11905 ], [ 12697, 12715 ] ], [ [ 2404, 2414 ], [ 7228, 7238 ] ], [ [ 2465, 2476 ], [ 7403, 7414 ] ], [ [ 2518, 2526 ] ], [ [ 4929, 4949 ], [ 4325, 4345 ] ], [ [ 5789, 5805 ], [ 3082, 3098 ] ], [ [ 6114, 6127 ], [ 3554, 3567 ], [ 3740, 3753 ] ], [ [ 6559, 6571 ], [ 7781, 7793 ] ], [ [ 7465, 7489 ], [ 4471, 4495 ] ], [ [ 9123, 9140 ], [ 6489, 6506 ] ], [ [ 10720, 10732 ], [ 6525, 6537 ] ], [ [ 11187, 11199 ], [ 8555, 8567 ], [ 8785, 8797 ], [ 9025, 9037 ], [ 10577, 10589 ], [ 11146, 11158 ] ], [ [ 12983, 13007 ], [ 10326, 10350 ] ], [ [ 13783, 13798 ], [ 8968, 8983 ] ], [ [ 14086, 14105 ] ] ]
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrapper to adapt a Distrax bijector for use in TFP.""" from typing import Any, Optional import chex from distrax._src.bijectors import bijector from distrax._src.utils import math import jax import jax.numpy as jnp from tensorflow_probability.substrates import jax as tfp tfb = tfp.bijectors tfd = tfp.distributions Array = chex.Array Bijector = bijector.Bijector def tfp_compatible_bijector( base_bijector: Bijector, name: Optional[str] = None): """Create a TFP-compatible bijector from a Distrax bijector. Given a Distrax bijector, return a wrapped bijector that behaves as a TFP bijector, to be used in TFP meta-bijectors and the TransformedDistribution. In particular, the wrapped bijector implements the methods `[forward|inverse]_event_ndims`, `[forward|inverse]_event_shape`, `[forward|inverse]_event_shape_tensor`, `[forward|inverse]_log_det_jacobian`, and the properties `[forward|inverse]_min_event_ndims`. Other attributes are delegated to the `base_bijector`. The methods of the resulting object do not take a `name` argument, unlike their TFP equivalents. The `shape` methods are implemented by tracing the `forward` and `inverse` methods of the bijector, applied to a zero tensor of the requested dtype. If the `forward` or `inverse` methods are not traceable or cannot be applied to a zero tensor, then we cannot guarantee the correctness of the result. Args: base_bijector: A Distrax bijector. name: The bijector name. Returns: An object that behaves like a TFP bijector. """ name_ = name class TFPCompatibleBijector(base_bijector.__class__): """Class to wrap a Distrax bijector.""" def __init__(self): self._is_injective = True self._is_permutation = False self._parts_interact = False self.dtype = None self.has_static_min_event_ndims = True self.forward_min_event_ndims = base_bijector.event_ndims_in self.inverse_min_event_ndims = base_bijector.event_ndims_out def __getattr__(self, name: str): return getattr(base_bijector, name) def forward_and_log_det(self, x: Array) -> Array: """See `Bijector.forward_and_log_det`.""" return base_bijector.forward_and_log_det(x) @property def name(self) -> str: """The name of the wrapped bijector.""" return name_ or f"TFPCompatible{base_bijector.name}" def experimental_batch_shape(self, x_event_ndims=None, y_event_ndims=None): raise NotImplementedError() def experimental_batch_shape_tensor( self, x_event_ndims=None, y_event_ndims=None): raise NotImplementedError() def forward_dtype(self, _: jnp.dtype) -> None: """Returns None, making no promise regarding dtypes.""" return None def inverse_dtype(self, _: jnp.dtype) -> None: """Returns None, making no promise regarding dtypes.""" return None def forward_event_ndims(self, event_ndims: int) -> int: """Returns the number of event dimensions of the output of `forward`.""" extra_event_ndims = self._check_ndims( "Forward", event_ndims, base_bijector.event_ndims_in) return base_bijector.event_ndims_out + extra_event_ndims def inverse_event_ndims(self, event_ndims: int) -> int: """Returns the number of event dimensions of the output of `inverse`.""" extra_event_ndims = self._check_ndims( "Inverse", event_ndims, base_bijector.event_ndims_out) return base_bijector.event_ndims_in + extra_event_ndims def forward_event_shape(self, event_shape) -> tfp.tf2jax.TensorShape: """Returns the shape of the output of `forward` as a `TensorShape`.""" self._check_shape("Forward", event_shape, base_bijector.event_ndims_in) forward_event_shape = jax.eval_shape( base_bijector.forward, jnp.zeros(event_shape)).shape return tfp.tf2jax.TensorShape(forward_event_shape) def inverse_event_shape(self, event_shape) -> tfp.tf2jax.TensorShape: """Returns the shape of the output of `inverse` as a `TensorShape`.""" self._check_shape("Inverse", event_shape, base_bijector.event_ndims_out) inverse_event_shape = jax.eval_shape( base_bijector.inverse, jnp.zeros(event_shape)).shape return tfp.tf2jax.TensorShape(inverse_event_shape) def forward_event_shape_tensor(self, event_shape) -> Array: """Returns the shape of the output of `forward` as a `jnp.array`.""" self._check_shape("Forward", event_shape, base_bijector.event_ndims_in) forward_event_shape = jax.eval_shape( base_bijector.forward, jnp.zeros(event_shape)).shape return jnp.array(forward_event_shape, dtype=jnp.int32) def inverse_event_shape_tensor(self, event_shape) -> Array: """Returns the shape of the output of `inverse` as a `jnp.array`.""" self._check_shape("Inverse", event_shape, base_bijector.event_ndims_out) inverse_event_shape = jax.eval_shape( base_bijector.inverse, jnp.zeros(event_shape)).shape return jnp.array(inverse_event_shape, dtype=jnp.int32) def forward_log_det_jacobian( self, x: Array, event_ndims: Optional[int] = None) -> Array: """See `Bijector.forward_log_det_jacobian`.""" extra_event_ndims = self._check_ndims( "Forward", event_ndims, base_bijector.event_ndims_in) fldj = base_bijector.forward_log_det_jacobian(x) return math.sum_last(fldj, extra_event_ndims) def inverse_log_det_jacobian( self, y: Array, event_ndims: Optional[int] = None) -> Array: """See `Bijector.inverse_log_det_jacobian`.""" extra_event_ndims = self._check_ndims( "Inverse", event_ndims, base_bijector.event_ndims_out) ildj = base_bijector.inverse_log_det_jacobian(y) return math.sum_last(ildj, extra_event_ndims) def _check_ndims( self, direction: str, event_ndims: int, expected_ndims: int) -> int: """Checks that `event_ndims` are correct and returns any extra ndims.""" if event_ndims is not None and event_ndims < expected_ndims: raise ValueError(f"{direction} `event_ndims` of {self.name} must be at " f"least {expected_ndims} but was passed {event_ndims} " f"instead.") return 0 if event_ndims is None else event_ndims - expected_ndims def _check_shape( self, direction: str, event_shape: Any, expected_ndims: int): """Checks that `event_shape` is correct, raising ValueError otherwise.""" if len(event_shape) < expected_ndims: raise ValueError(f"{direction} `event_shape` of {self.name} must have " f"at least {expected_ndims} dimensions, but was " f"{event_shape} which has only {len(event_shape)} " f"dimensions instead.") return TFPCompatibleBijector()
[ [ [ 774, 777 ], [ 7111, 7114 ] ], [ [ 779, 787 ], [ 1137, 1145 ], [ 5852, 5860 ], [ 6225, 6233 ] ], [ [ 796, 800 ], [ 1027, 1031 ] ], [ [ 836, 844 ], [ 1049, 1057 ] ], [ [ 876, 880 ], [ 6114, 6118 ], [ 6488, 6492 ] ], [ [ 888, 891 ], [ 4476, 4479 ], [ 4871, 4874 ], [ 5253, 5256 ], [ 5640, 5643 ] ], [ [ 899, 915 ], [ 3362, 3365 ], [ 3494, 3497 ], [ 4525, 4528 ], [ 4920, 4923 ], [ 5302, 5305 ], [ 5345, 5348 ], [ 5382, 5385 ], [ 5689, 5692 ], [ 5732, 5735 ], [ 5769, 5772 ] ], [ [ 962, 972 ], [ 980, 983 ], [ 1000, 1003 ], [ 4269, 4272 ], [ 4568, 4571 ], [ 4663, 4666 ], [ 4963, 4966 ] ], [ [ 974, 977 ] ], [ [ 994, 997 ] ], [ [ 1019, 1024 ], [ 2832, 2837 ], [ 2822, 2827 ], [ 5065, 5070 ], [ 5451, 5456 ], [ 5877, 5882 ], [ 5832, 5837 ], [ 6250, 6255 ], [ 6205, 6210 ] ], [ [ 1038, 1046 ], [ 1117, 1125 ] ], [ [ 1073, 1096 ] ] ]
import pandas import numpy as np import matplotlib.pyplot as plt import scipy.stats as st from pylab import rcParams df = pandas.read_csv('rewards_loc3.csv') ucb,ts,ovr,egr,egr2,agr,agr2,efr,ac,aac,sft = df['ucb'],df['ts'],df['ovr'],\ df['egr'],df['egr2'],df['agr'],df['agr2'],df['efr'],df['ac'],df['aac'],df['sft'] #y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = np.mean(ucb), np.mean(ts) \ #,np.mean(ovr), np.mean(egr), np.mean(egr2) \ #,np.mean(agr), np.mean(agr2), np.mean(efr) \ #,np.mean(ac), np.mean(aac), np.mean(sft) def get_mean_reward(reward_lst): mean_rew=list() for r in range(len(reward_lst)): mean_rew.append(sum(reward_lst[:r+1]) / ((r+1))) return mean_rew y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = get_mean_reward(ucb), get_mean_reward(ts) \ ,get_mean_reward(ovr), get_mean_reward(egr), get_mean_reward(egr2) \ ,get_mean_reward(agr), get_mean_reward(agr2), get_mean_reward(efr) \ ,get_mean_reward(ac), get_mean_reward(aac), get_mean_reward(sft) x1, x2 = [index for index in range(len(ucb))], [index for index in range(len(ts))] x3, x4 = [index for index in range(len(df['ovr']))], [index for index in range(len(df['egr']))] x5, x6 = [index for index in range(len(df['egr2']))], [index for index in range(len(df['agr']))] x7, x8 = [index for index in range(len(df['agr2']))], [index for index in range(len(df['efr']))] x9, x10 = [index for index in range(len(df['ac']))], [index for index in range(len(df['aac']))] x11 = [index for index in range(len(df['sft']))] def CI_model(y, confidence = 0.95): std_err_y = st.sem(y) n_y = len(y) h_y = std_err_y * st.t.ppf((1 + confidence) / 2, n_y - 1) return h_y h_y1, h_y2, h_y3, h_y4, h_y5, h_y6, h_y7, h_y8, h_y9, h_y10, h_y11 = CI_model(ucb), CI_model(ts), CI_model(ovr),\ CI_model(egr), CI_model(egr2), CI_model(agr), CI_model(agr2), CI_model(efr), CI_model(ac), CI_model(aac), CI_model(sft) plt.errorbar(x1, y1, yerr= h_y1, label='Bootstrapped Upper-Confidence Bound (C.I.=80%)') plt.errorbar(x2, y2, yerr= h_y2, label='Bootstrapped Thompson Sampling') plt.errorbar(x3, y3, yerr= h_y3, label='Separate Classifiers + Beta Prior') plt.errorbar(x4, y4, yerr= h_y4, label='Epsilon-Greedy (p0=20%, decay=0.9999') plt.errorbar(x5, y5, yerr= h_y5, label='Epsilon-Greedy (p0=20%, no decay') plt.errorbar(x6, y6, yerr= h_y6, label='Adaptive Greedy (decaying threshold)') plt.errorbar(x7, y7, yerr= h_y7, label='Adaptive Greedy (p0=30%, decaying percentile)') plt.errorbar(x8, y8, yerr= h_y8, label='Explore First (n=1,500)') plt.errorbar(x9, y9, yerr= h_y9, label='Active Explorer') plt.errorbar(x10, y10, yerr= h_y10, label='Adaptive Active Greedy') plt.errorbar(x11, y11, yerr= h_y11, label='Softmax Explorer') #plt.plot(np.repeat(y.mean(axis=0).max(),len(rewards_sft)),linewidth=4,ls='dashed', label='Overall Best Arm (no context)') ax = plt.subplot(111) plt.xlabel('Rounds (models were updated every 50 rounds)', size=10) plt.ylabel('Cummulative Mean Reward', size=10) plt.title('Comparison of Online Contextual Bandit Policies in location 3') # Shrink current axis by 20% box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # Put a legend to the right of the current axis ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig("location_3.png", bbox_inches='tight', dpi = 600)
[ [ [ 7, 13 ], [ 124, 130 ] ], [ [ 21, 32 ] ], [ [ 40, 64 ], [ 1905, 1908 ], [ 1994, 1997 ], [ 2067, 2070 ], [ 2143, 2146 ], [ 2222, 2225 ], [ 2297, 2300 ], [ 2376, 2379 ], [ 2464, 2467 ], [ 2530, 2533 ], [ 2588, 2591 ], [ 2656, 2659 ], [ 2847, 2850 ], [ 2866, 2869 ], [ 2934, 2937 ], [ 2981, 2984 ], [ 3276, 3279 ] ], [ [ 72, 89 ], [ 1566, 1568 ], [ 1615, 1617 ] ], [ [ 108, 116 ] ], [ [ 119, 121 ], [ 207, 209 ], [ 217, 219 ], [ 226, 228 ], [ 238, 240 ], [ 248, 250 ], [ 259, 261 ], [ 269, 271 ], [ 280, 282 ], [ 290, 292 ], [ 299, 301 ], [ 309, 311 ], [ 1116, 1118 ], [ 1160, 1162 ], [ 1212, 1214 ], [ 1257, 1259 ], [ 1309, 1311 ], [ 1354, 1356 ], [ 1407, 1409 ], [ 1450, 1452 ], [ 1499, 1501 ] ], [ [ 161, 164 ], [ 762, 765 ], [ 1033, 1036 ], [ 1749, 1752 ] ], [ [ 165, 167 ], [ 784, 786 ], [ 1071, 1073 ], [ 1764, 1766 ] ], [ [ 168, 171 ], [ 807, 810 ], [ 1778, 1781 ] ], [ [ 172, 175 ], [ 829, 832 ], [ 1794, 1797 ] ], [ [ 176, 180 ], [ 851, 855 ], [ 1809, 1813 ] ], [ [ 181, 184 ], [ 876, 879 ], [ 1825, 1828 ] ], [ [ 185, 189 ], [ 898, 902 ], [ 1840, 1844 ] ], [ [ 190, 193 ], [ 921, 924 ], [ 1856, 1859 ] ], [ [ 194, 196 ], [ 945, 947 ], [ 1871, 1873 ] ], [ [ 197, 200 ], [ 966, 969 ], [ 1885, 1888 ] ], [ [ 201, 204 ], [ 988, 991 ], [ 1900, 1903 ] ], [ [ 535, 550 ], [ 746, 761 ], [ 768, 783 ], [ 791, 806 ], [ 813, 828 ], [ 835, 850 ], [ 860, 875 ], [ 882, 897 ], [ 905, 920 ], [ 929, 944 ], [ 950, 965 ], [ 972, 987 ] ], [ [ 699, 701 ], [ 1922, 1924 ] ], [ [ 703, 705 ], [ 2011, 2013 ] ], [ [ 707, 709 ], [ 2084, 2086 ] ], [ [ 711, 713 ], [ 2160, 2162 ] ], [ [ 715, 717 ], [ 2239, 2241 ] ], [ [ 719, 721 ], [ 2314, 2316 ] ], [ [ 723, 725 ], [ 2393, 2395 ] ], [ [ 727, 729 ], [ 2481, 2483 ] ], [ [ 731, 733 ], [ 2547, 2549 ] ], [ [ 735, 738 ], [ 2606, 2609 ] ], [ [ 740, 743 ], [ 2674, 2677 ] ], [ [ 994, 996 ], [ 1918, 1920 ] ], [ [ 998, 1000 ], [ 2007, 2009 ] ], [ [ 1077, 1079 ], [ 2080, 2082 ] ], [ [ 1081, 1083 ], [ 2156, 2158 ] ], [ [ 1173, 1175 ], [ 2235, 2237 ] ], [ [ 1177, 1179 ], [ 2310, 2312 ] ], [ [ 1270, 1272 ], [ 2389, 2391 ] ], [ [ 1274, 1276 ], [ 2477, 2479 ] ], [ [ 1367, 1369 ], [ 2543, 2545 ] ], [ [ 1371, 1374 ], [ 2601, 2604 ] ], [ [ 1463, 1466 ], [ 2669, 2672 ] ], [ [ 1518, 1526 ], [ 1740, 1748 ], [ 1755, 1763 ], [ 1769, 1777 ], [ 1785, 1793 ], [ 1800, 1808 ], [ 1816, 1824 ], [ 1831, 1839 ], [ 1847, 1855 ], [ 1862, 1870 ], [ 1876, 1884 ], [ 1891, 1899 ] ], [ [ 1671, 1675 ], [ 1932, 1936 ] ], [ [ 1677, 1681 ], [ 2021, 2025 ] ], [ [ 1683, 1687 ], [ 2094, 2098 ] ], [ [ 1689, 1693 ], [ 2170, 2174 ] ], [ [ 1695, 1699 ], [ 2249, 2253 ] ], [ [ 1701, 1705 ], [ 2324, 2328 ] ], [ [ 1707, 1711 ], [ 2403, 2407 ] ], [ [ 1713, 1717 ], [ 2491, 2495 ] ], [ [ 1719, 1723 ], [ 2557, 2561 ] ], [ [ 1725, 1730 ], [ 2617, 2622 ] ], [ [ 1732, 1737 ], [ 2685, 2690 ] ], [ [ 2842, 2844 ], [ 3091, 3093 ], [ 3109, 3111 ], [ 3221, 3223 ] ], [ [ 3085, 3088 ], [ 3126, 3129 ], [ 3134, 3137 ], [ 3142, 3145 ], [ 3159, 3162 ] ] ]
from datetime import datetime from dino.config import UserKeys, RedisKeys, SessionKeys from dino.db.rdbms.models import Channels from dino.db.rdbms.models import Rooms from test.base import BaseTest from test.db import BaseDatabaseTest class DatabaseSqliteTest(BaseDatabaseTest): def setUp(self): self.set_up_env('sqlite') def tearDown(self): from dino.db.rdbms.dbman import Database from dino.db.rdbms.dbman import DeclarativeBase db = Database(self.env) con = db.engine.connect() trans = con.begin() for table in reversed(DeclarativeBase.metadata.sorted_tables): con.execute(table.delete()) trans.commit() con.close() self.env.cache._flushall() def test_get_user_infos(self): self.db.set_user_info(BaseTest.USER_ID, {SessionKeys.gender.value: 'm', 'last_login': datetime.utcnow()}) self.db.set_user_info(BaseTest.OTHER_USER_ID, {SessionKeys.gender.value: 'w', 'last_login': datetime.utcnow()}) self.env.auth.redis.delete(RedisKeys.auth_key(BaseTest.USER_ID)) self.env.auth.redis.delete(RedisKeys.auth_key(BaseTest.OTHER_USER_ID)) infos = self.db.get_user_infos({BaseTest.USER_ID, BaseTest.OTHER_USER_ID}) self.assertEqual('m', infos[BaseTest.USER_ID][SessionKeys.gender.value]) self.assertEqual('w', infos[BaseTest.OTHER_USER_ID][SessionKeys.gender.value]) def test_set_two_owners_on_room(self): self._test_set_two_owners_on_room() def test_is_admin_before_create(self): self._test_is_admin_before_create() def test_is_admin_after_create(self): self._test_is_admin_after_create() def test_is_admin_after_create_set_admin(self): self._test_is_admin_after_create_set_admin() def test_channel_for_room_no_channel(self): self._test_channel_for_room_no_channel() def test_channel_for_room_with_channel_without_room(self): self._test_channel_for_room_with_channel_without_room() def test_channel_for_room_with_channel_with_room(self): self._test_channel_for_room_with_channel_with_room() def test_leave_room_not_joined(self): self._test_leave_room_not_joined() def test_leave_room_joined(self): self._test_leave_room_joined() def test_set_moderator_no_room(self): self._test_set_moderator_no_room() def test_set_moderator_with_room(self): self._test_set_moderator_with_room() def test_set_room_owner_no_room(self): self._test_set_room_owner_no_room() def test_set_room_owner_with_room(self): self._test_set_room_owner_with_room() def test_set_channel_owner_no_channel(self): self._test_set_channel_owner_no_channel() def test_set_channel_owner_with_channel(self): self._test_set_channel_owner_with_channel() def test_get_user_status_before_set(self): self._test_get_user_status_before_set(UserKeys.STATUS_UNAVAILABLE) def test_set_user_offline(self): self._test_set_user_offline(UserKeys.STATUS_UNAVAILABLE) def test_set_user_online(self): self._test_set_user_online(UserKeys.STATUS_AVAILABLE) def test_set_user_invisible(self): self._test_set_user_invisible(UserKeys.STATUS_INVISIBLE) def test_remove_current_rooms_for_user_before_joining(self): self._test_remove_current_rooms_for_user_before_joining() def test_remove_current_rooms_for_user_after_joining(self): self._test_remove_current_rooms_for_user_after_joining() def test_rooms_for_user_before_joining(self): self._test_rooms_for_user_before_joining() def test_create_existing_room_name(self): self._test_create_existing_room_name() def test_rooms_for_user_after_joining(self): self._test_rooms_for_user_after_joining() def test_rooms_for_channel_before_create_channel(self): self._test_rooms_for_channel_before_create_channel() def test_rooms_for_channel_after_create_channel_before_create_room(self): self._test_rooms_for_channel_after_create_channel_before_create_room() def test_rooms_for_channel_after_create_channel_after_create_room(self): self._test_rooms_for_channel_after_create_channel_after_create_room() def test_get_channels_before_create(self): self._test_get_channels_before_create() def test_get_channels_after_create(self): self._test_get_channels_after_create() def test_room_exists(self): self._test_room_exists() def test_create_room_no_channel(self): self._test_create_room_no_channel() def test_create_existing_channel(self): self._test_create_existing_channel() def test_create_channel(self): self._test_create_channel() def test_create_channel_again_to_make_sure_tables_cleared_after_each_test(self): self._test_create_channel() channels = self.db._session().query(Channels).filter(Channels.uuid == BaseDatabaseTest.CHANNEL_ID).all() self.assertEqual(1, len(channels)) def test_create_channel_blank_name(self): self._test_create_channel_blank_name() def test_create_channel_exists(self): self._test_create_channel_exists() def test_create_room(self): self._test_create_room() rooms = self.db._session().query(Rooms).filter(Rooms.uuid == BaseDatabaseTest.ROOM_ID).all() self.assertEqual(1, len(rooms)) def test_create_room_blank_name(self): self._test_create_room_blank_name() def test_create_existing_room(self): self._test_create_existing_room() def test_channel_exists_after_create(self): self._test_channel_exists_after_create() def test_channel_exists_before_create(self): self._test_channel_exists_before_create() def test_room_name_exists_before_create(self): self._test_room_name_exists_before_create() def test_room_name_exists_after_create(self): self._test_room_name_exists_after_create() def test_delete_one_non_existing_acl(self): self._test_delete_one_non_existing_acl() def test_add_one_extra_acl(self): self._test_add_one_extra_acl() def test_get_acl(self): self._test_get_acl() def test_set_acl(self): self._test_set_acl() def test_delete_one_acl(self): self._test_delete_one_acl() def test_set_room_allows_cross_group_messaging(self): self._test_set_room_allows_cross_group_messaging() def test_get_room_allows_cross_group_messaging_no_room(self): self._test_get_room_allows_cross_group_messaging_no_room() def test_get_room_allows_cross_group_messaging(self): self._test_get_room_allows_cross_group_messaging() def test_get_room_does_not_allow_cross_group_messaging(self): self._test_get_room_does_not_allow_cross_group_messaging() def test_room_allows_cross_group_messaging_no_room(self): self._test_room_allows_cross_group_messaging_no_room() def test_room_allows_cross_group_messaging(self): self._test_room_allows_cross_group_messaging() def test_room_does_not_allow_cross_group_messaging_no_room(self): self._test_room_does_not_allow_cross_group_messaging_no_room() def test_create_admin_room(self): self._test_create_admin_room() def test_is_super_user(self): self._test_is_super_user() def test_get_admin_room(self): self._test_get_admin_room() def test_set_owner_and_moderator(self): self._test_set_owner_and_moderator() def test_remove_channel_role(self): self._test_remove_channel_role() def test_remove_room_role(self): self._test_remove_room_role() def test_remove_super_user(self): self._test_remove_super_user() def test_get_super_users(self): self._test_get_super_users() def test_remove_owner(self): self._test_remove_owner() def test_remove_channel_owner(self): self._test_remove_channel_owner() def test_remove_admin(self): self._test_remove_admin() def test_remove_moderator(self): self._test_remove_moderator() def test_set_owner_is_unique(self): self._test_set_owner_is_unique() def test_set_owner_channel_is_unique(self): self._test_set_owner_channel_is_unique() def test_set_moderator_is_unique(self): self._test_set_moderator_is_unique() def test_set_admin_is_unique(self): self._test_set_admin_is_unique() def test_set_super_user_is_unique(self): self._test_set_super_user_is_unique() def test_remove_super_user_without_setting(self): self._test_remove_super_user_without_setting() def test_remove_owner_without_setting(self): self._test_remove_owner_without_setting() def test_remove_channel_owner_without_setting(self): self._test_remove_channel_owner_without_setting() def test_remove_admin_without_setting(self): self._test_remove_admin_without_setting() def test_remove_moderator_without_setting(self): self._test_remove_moderator_without_setting() def test_remove_other_role_channel(self): self._test_remove_other_role_channel() def test_remove_other_role_room(self): self._test_remove_other_role_room() def test_set_admin_no_such_channel(self): self._test_set_admin_no_such_channel() def test_remove_admin_no_such_channel(self): self._test_remove_admin_no_such_room() def test_remove_moderator_no_such_room(self): self._test_remove_moderator_no_such_room() def test_channel_name_exists(self): self._test_channel_name_exists() def test_channel_exists(self): self._test_channel_exists() def test_create_user(self): self._test_create_user() def test_users_in_room(self): self._test_users_in_room() def test_delete_acl_in_channel_for_action(self): self._test_delete_acl_in_channel_for_action() def test_delete_acl_in_room_for_action(self): self._test_delete_acl_in_room_for_action() def test_remove_owner_channel_no_channel(self): self._test_remove_owner_channel_no_channel() def test_remove_owner_channel_not_owner(self): self._test_remove_owner_channel_not_owner() def test_remove_owner_channel_is_owner(self): self._test_remove_owner_channel_is_owner() def test_create_user_exists(self): self._test_create_user_exists() def test_update_acl_in_room_for_action(self): self._test_update_acl_in_room_for_action() def test_update_acl_in_room_for_action_no_channel(self): self._test_update_acl_in_room_for_action_no_channel() def test_update_acl_in_room_for_action_no_room(self): self._test_update_acl_in_room_for_action_no_room() def test_update_acl_in_room_for_action_invalid_action(self): self._test_update_acl_in_room_for_action_invalid_action() def test_update_acl_in_room_for_action_invalid_type(self): self._test_update_acl_in_room_for_action_invalid_type() def test_update_acl_in_room_for_action_invalid_value(self): self._test_update_acl_in_room_for_action_invalid_value() def test_update_acl_in_channel_for_action(self): self._test_update_acl_in_channel_for_action() def test_update_acl_in_channel_for_action_no_channel(self): self._test_update_acl_in_channel_for_action_no_channel() def test_update_acl_in_channel_for_action_invalid_action(self): self._test_update_acl_in_channel_for_action_invalid_action() def test_update_acl_in_channel_for_action_invalid_type(self): self._test_update_acl_in_channel_for_action_invalid_type() def test_update_acl_in_channel_for_action_invalid_value(self): self._test_update_acl_in_channel_for_action_invalid_value() def test_is_banned_from_channel(self): self._test_is_banned_from_channel() def test_is_banned_from_room(self): self._test_is_banned_from_room() def test_is_banned_globally(self): self._test_is_banned_globally() def test_remove_global_ban(self): self._test_remove_global_ban() def test_remove_channel_ban(self): self._test_remove_channel_ban() def test_remove_room_ban(self): self._test_remove_room_ban() def test_was_banned_globally(self): self._test_was_banned_globally() def test_was_banned_from_room(self): self._test_was_banned_from_room() def test_was_banned_from_channel(self): self._test_was_banned_from_channel() def test_get_user_ban_status_channel(self): self._test_get_user_ban_status_channel() def test_get_user_ban_status_room(self): self._test_get_user_ban_status_room() def test_get_user_ban_status_global(self): self._test_get_user_ban_status_global() def test_get_banned_users_global_not_empty_after_ban(self): self._test_get_banned_users_global_not_empty_after_ban() def test_get_banned_users_global_is_empty(self): self._test_get_banned_users_global_is_empty() def test_get_banned_users_global_is_empty_if_expired(self): self._test_get_banned_users_global_is_empty_if_expired() def test_get_banned_users_channel_not_empty_after_ban(self): self._test_get_banned_users_channel_not_empty_after_ban() def test_get_banned_users_channel_is_empty(self): self._test_get_banned_users_channel_is_empty() def test_get_banned_users_channel_is_empty_if_expired(self): self._test_get_banned_users_channel_is_empty_if_expired() def test_get_banned_users_room_not_empty_after_ban(self): self._test_get_banned_users_room_not_empty_after_ban() def test_get_banned_users_room_is_empty(self): self._test_get_banned_users_room_is_empty() def test_get_banned_users_room_is_empty_if_expired(self): self._test_get_banned_users_room_is_empty_if_expired() def test_get_banned_users_is_empty(self): self._test_get_banned_users_is_empty() def test_get_banned_users_for_room(self): self._test_get_banned_users_for_room() def test_get_banned_users_for_channel(self): self._test_get_banned_users_for_channel() def test_get_banned_users_globally(self): self._test_get_banned_users_globally() def test_get_global_ban_timestamp_is_none(self): self._test_get_global_ban_timestamp_is_none() def test_get_global_ban_timestamp_not_none(self): self._test_get_global_ban_timestamp_not_none() def test_get_global_ban_timestamp_empty_if_expired(self): self._test_get_global_ban_timestamp_not_empty_if_expired() def test_get_channel_ban_timestamp_is_none(self): self._test_get_channel_ban_timestamp_is_none() def test_get_channel_ban_timestamp_not_none(self): self._test_get_channel_ban_timestamp_not_none() def test_get_channel_ban_timestamp_empty_if_expired(self): self._test_get_channel_ban_timestamp_not_empty_if_expired() def test_get_room_ban_timestamp_is_none(self): self._test_get_room_ban_timestamp_is_none() def test_get_room_ban_timestamp_not_none(self): self._test_get_room_ban_timestamp_not_none() def test_get_room_ban_timestamp_empty_if_expired(self): self._test_get_room_ban_timestamp_not_empty_if_expired() def test_get_acls_in_channel_for_action_no_channel(self): self._test_get_acls_in_channel_for_action_no_channel() def test_get_acls_in_channel_for_action_no_room(self): self._test_get_acls_in_channel_for_action_no_room() def test_get_all_acls_channel_is_empty(self): self._test_get_all_acls_channel_is_empty() def test_get_all_acls_channel_not_empty(self): self._test_get_all_acls_channel_not_empty() def test_get_all_acls_room_is_empty(self): self._test_get_all_acls_room_is_empty() def test_get_all_acls_room_not_empty(self): self._test_get_all_acls_room_not_empty() def test_channel_for_room_blank_room_id(self): self._test_channel_for_room_blank_room_id() def test_channel_for_room_before_create(self): self._test_channel_for_room_before_create() def test_channel_for_room_after_create(self): self._test_channel_for_room_after_create() def test_channel_for_room_cache(self): self._test_channel_for_room_cache() def test_get_username_before_set(self): self._test_get_username_before_set() def test_get_username_after_set(self): self._test_get_username_after_set() def test_rename_channel(self): self._test_rename_channel() def test_rename_channel_before_create(self): self._test_rename_channel_before_create() def test_rename_channel_empty_name(self): self._test_rename_channel_empty_name() def test_rename_room(self): self._test_rename_room() def test_rename_room_before_create_channel(self): self._test_rename_room_before_create_channel() def test_rename_room_before_create_room(self): self._test_rename_room_before_create_room() def test_rename_room_empty_name(self): self._test_rename_room_empty_name() def test_rename_room_already_exists(self): self._test_rename_room_already_exists() def test_remove_room(self): self._test_remove_room() def test_remove_room_before_create_channel(self): self._test_remove_room_before_create_channel() def test_remove_room_before_create_room(self): self._test_remove_room_before_create_room() def test_admin_room_for_channel_before_exists(self): self._test_admin_room_before_exists() def test_admin_room_for_channel_get_from_cache(self): self._test_admin_room_get_from_cache() def test_room_exists_from_cache(self): self._test_room_exists_from_cache() def test_get_user_status_from_cache(self): self._test_get_user_status_from_cache() def test_get_user_status_after_set(self): self._test_get_user_status_after_set() def test_set_user_invisible_twice_ignores_second(self): self._test_set_user_invisible_twice_ignores_second() def test_set_user_offline_twice_ignores_second(self): self._test_set_user_offline_twice_ignores_second() def test_set_user_online_twice_ignores_second(self): self._test_set_user_online_twice_ignores_second() def test_users_in_room_after_join(self): self._test_users_in_room_after_join() def test_set_user_offline_after_online(self): self._test_set_user_offline_after_online() def test_room_contains_before_create_channel(self): self._test_room_contains_before_create_channel() def test_room_contains_before_create_room(self): self._test_room_contains_before_create_room() def test_room_contains_after_create(self): self._test_room_contains_after_create() def test_room_contains_after_join(self): self._test_room_contains_after_join() def test_room_name_exists_from_cache_after_create(self): self._test_room_name_exists_from_cache_after_create() def test_rename_channel_exists(self): self._test_rename_channel_exists() def test_channel_for_room_from_cache(self): self._test_channel_for_room_from_cache() def test_leave_room_before_create(self): self._test_leave_room_before_create() def test_remove_moderator_twice(self): self._test_remove_moderator_twice() def test_set_owner_channel_after_removing_owner(self): self._test_set_owner_channel_after_removing_owner() def test_delete_acl_in_channel_for_action_invalid_action(self): self._test_delete_acl_in_channel_for_action_invalid_action() def test_delete_acl_in_room_for_action_invalid_action(self): self._test_delete_acl_in_room_for_action_invalid_action() def test_delete_acl_in_channel_for_action_after_create(self): self._test_delete_acl_in_channel_for_action_after_create() def test_delete_acl_in_room_for_action_after_create(self): self._test_delete_acl_in_room_for_action_after_create() def test_update_acl(self): self._test_update_acl() def test_get_all_acls_channel(self): self._test_get_all_acls_channel() def test_get_all_acls_channel_before_create(self): self._test_get_all_acls_channel_before_create() def test_get_all_acls_room(self): self._test_get_all_acls_room() def test_get_all_acls_room_before_create(self): self._test_get_all_acls_room_before_create() def test_update_last_read_for(self): self._test_update_last_read_for() def test_update_username(self): self._test_update_username() def test_get_room_name_from_cache(self): self._test_get_room_name_from_cache() def test_get_channel_name_from_cache(self): self._test_get_channel_name_from_cache() def test_is_banned_globally_after_clearing_cache(self): self._test_is_banned_globally_after_clearing_cache() def test_is_banned_globally_after_clearing_cache_if_expired(self): self._test_is_banned_globally_after_clearing_cache_if_expired() def test_is_banned_from_room_after_clearing_cache(self): self._test_is_banned_from_room_after_clearing_cache() def test_is_banned_from_room_after_clearing_cache_if_expired(self): self._test_is_banned_from_room_after_clearing_cache_if_expired() def test_is_banned_from_channel_after_clearing_cache(self): self._test_is_banned_from_channel_after_clearing_cache() def test_is_banned_from_channel_after_clearing_cache_if_expired(self): self._test_is_banned_from_channel_after_clearing_cache_if_expired()
[ [ [ 21, 29 ], [ 882, 890 ], [ 1002, 1010 ] ], [ [ 55, 63 ], [ 2962, 2970 ], [ 3065, 3073 ], [ 3166, 3174 ], [ 3271, 3279 ] ], [ [ 65, 74 ], [ 1058, 1067 ], [ 1131, 1140 ] ], [ [ 76, 87 ], [ 837, 848 ], [ 957, 968 ], [ 1314, 1325 ], [ 1401, 1412 ] ], [ [ 121, 129 ], [ 4964, 4972 ], [ 4981, 4989 ] ], [ [ 163, 168 ], [ 5363, 5368 ], [ 5377, 5382 ] ], [ [ 191, 199 ], [ 818, 826 ], [ 932, 940 ], [ 1077, 1085 ], [ 1150, 1158 ], [ 1216, 1224 ], [ 1234, 1242 ], [ 1296, 1304 ], [ 1377, 1385 ] ], [ [ 220, 236 ], [ 264, 280 ], [ 4998, 5014 ], [ 5391, 5407 ] ], [ [ 245, 263 ] ] ]
import pandas as pd __author__ = 'slei' class AddHeuristicTSP: """ Finds the shortest path using a heuristic method """ def __init__(self, cities_df): self.df = cities_df self.edges = list((t.origin, t.destination) for t in df.itertuples()) self.distance = dict([((t.origin, t.destination), t.distance) for t in df.itertuples()]) self.cities = list(set(df['destination'])) self.cities_lst = [] self.tour_lst = [] self.distance_lst = [] self.tour_leg_distances_lst = [] self._final_df = None self._shortest_distance = None self._shortest_tour = None def find_subtour(self, starting_city): """ Given a starting city, finds a tour by selecting next shortest distance from list of unvisited cities """ tour = [] tour_distance_lst = [0] cities_unvisited = list(set(self.df['destination'])) initial_city = starting_city current_city = initial_city tour.append(current_city) cities_unvisited.pop(0) total_distance = 0 count = 0 while len(cities_unvisited) > 0: # remove any city that has already been visited from consideration df_unvisited = self.df[self.df['destination'].isin(cities_unvisited)] # filter for rows based on first criterion is_current = df_unvisited['origin'] == current_city df2 = df_unvisited[is_current] # find the nearest city index_min = df2['distance'].idxmin() min_row = df2.loc[index_min] d = min_row.distance destination = min_row.destination # update next city and tour and total distance current_city = destination total_distance = total_distance + d tour_distance_lst.append(d) # update city tracker lists tour.append(current_city) index_i = cities_unvisited.index(current_city) cities_unvisited.pop(index_i) count = count + 1 # check print("next destination: ", destination) print("distance: ", d) print("total_distance: ", total_distance) print("tour: ", tour) print("tour_distance_lst: ", tour_distance_lst) print("cities_unvisited: ", cities_unvisited) print() # adding the distance from last city back to initial city last_city = tour[-1] last_mile = (initial_city, last_city) last_mile_distance = self.distance[last_mile] tour.append(initial_city) total_distance = total_distance + last_mile_distance tour_distance_lst.append(last_mile_distance) # check print("last_mile: ", last_mile) print("last_mile_distance: ", last_mile_distance) print("tour: ", tour) print("total_distance: ", total_distance) print("tour_leg_distances_lst: ", tour_distance_lst) # update lists self.tour_lst.append(tour) self.distance_lst.append(total_distance) self.tour_leg_distances_lst.append(tour_distance_lst) @property def final_df(self): """ Add description here""" if self._final_df is None: self._final_df = self._generate_final_df() return self._final_df def _generate_final_df(self): for c in self.cities: # for every city in the dataset print("city: ", c) # generate a tour for each print("--------------------------------------------------------------------------------") self.find_subtour(c) print('********************************************************************************') print() soln_dict = {'city': self.cities, 'tour': self.tour_lst, 'tour_leg_distances': self.tour_leg_distances_lst, 'distance': self.distance_lst} return pd.DataFrame(soln_dict) @property def shortest_distance(self): """ Add description here""" if self._shortest_distance is None: return self._calculate_shortest_distance() def _calculate_shortest_distance(self): # find the tour with the lowest distance index_min_final = self.final_df['distance'].idxmin() # returns the index location of min value min_row_final = self.final_df.loc[index_min_final] return min_row_final.distance @property def shortest_tour(self): """ Add description here""" if self._shortest_tour is None: return self._generate_shortest_tour() def _generate_shortest_tour(self): index_min_final = self.final_df['distance'].idxmin() # returns the index location of min value min_row_final = self.final_df.loc[index_min_final] return min_row_final.tour # ******************************************************************************** # ******************************************************************************** if __name__ == '__main__': df = pd.read_csv('city_data_add.csv') tsp = AddHeuristicTSP(df) tsp.final_df print("final_df") print(tsp.final_df) print() print("shortest_distance_final", tsp.shortest_distance) print("shortest_tour_final", tsp.shortest_tour)
[ [ [ 7, 19 ], [ 5080, 5082 ], [ 3973, 3975 ] ], [ [ 21, 31 ] ], [ [ 49, 64 ], [ 5123, 5138 ] ], [ [ 5075, 5077 ], [ 5139, 5141 ], [ 252, 254 ], [ 348, 350 ], [ 397, 399 ] ], [ [ 5117, 5120 ], [ 5148, 5151 ], [ 5193, 5196 ], [ 5257, 5260 ], [ 5313, 5316 ] ] ]
import numpy as np from scipy.spatial.distance import euclidean from typing import Union import pandas class CLOSE(object): def __init__(self, data: pandas.DataFrame, measure: Union[str, callable] = 'mse', minPts: int = None, output: bool = False, jaccard: bool = False, weighting: bool = False, exploitation_term: bool = False): """ Params: data (pandas.DataFrame) - pandas dataframe with columns order 'object_id', 'time', 'cluster_id' containing cluster belongings, features .. Note: outliers should have negative labels/cluster_ids, these should be different for different times Optional: measure (str or callable) - for used quality measure, possible measures: 'sse', 'mse', 'mae', 'max', 'dbi', 'exploit' minPts (int) - used minPts for density-based quality measure output (boolean) - whether intermediate results should be printed jaccard (boolean) - whether the jaccard index should be used for proportion weighting (boolean) - whether the weighting function should be used for subsequence_score exploitation_term (boolean) - whether the exploitation term should be included in CLOSE calculation """ self._data = data self._column_names = data.columns.values self._object_column_name = self._column_names[0] self._time_column_name = self._column_names[1] self._cluster_column_name = self._column_names[2] self._jaccard = jaccard self._weighting = weighting self._exp_term = exploitation_term self._minPts = minPts self._output = output self.pos_measures = {### Measures for Clusters 'sse': self.calc_sse, # NOTE: sse is not between 0 and 1 'mse': self.calc_mse, # NOTE: mse is only between 0 and 1, if data is normalized 'mae': self.calc_mae, # NOTE: mae is only between 0 and 1, if data is normalized 'max': self.calc_max_dist, 'dbi': self.calc_min_pts, 'None': self.return_zero, ### Measures for Time Clusterings 'exploit': self.calc_exploit_at_t} if measure in self.pos_measures: self.measure = self.pos_measures[measure] elif callable(measure): self.measure = measure else: self.measure = self.pos_measures['mse'] def rate_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]: """ Optional: start_time (int) - time that should be considered as beginning end_time (int) - time which should be rated up to return_measures (boolean) - whether additional information such as average stability and quality should be returned Returns: CLOSE score (float): rating of clustering regarding all clusters (dict): with key 'stability_evaluation', 'stability', 'quality', 'pre-factor' with additional information if 'return_measures' is True """ cluster_ratings = self.rate_clusters(start_time, end_time) gr_clusters = self._data.groupby(self._cluster_column_name) score = 0 avg_quality = 0 avg_stab = 0 for cluster in cluster_ratings: cluster_objects = gr_clusters.get_group(cluster)[self._object_column_name].unique() cluster_time = gr_clusters.get_group(cluster)[self._time_column_name].iloc[0] feature_list = self.get_feature_list(cluster_objects, cluster_time) measure = self.measure(feature_list) avg_quality += measure avg_stab += cluster_ratings[cluster] score += (cluster_ratings[cluster] * (1 - measure)) num_clusters = len(cluster_ratings) num_timestamps = self.get_num_timestamps(start_time, end_time) if num_clusters <= 0: if self._output: print('Clustering has no Clusters!!') return 0 avg_quality /= num_clusters if self._output: print('Average Quality: ', str(avg_quality)) avg_stab /= num_clusters if self._output: print('Average Stability: ', str(avg_stab)) if self._exp_term: exp_term = self.calc_exploit() factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term else: factor = (1 / num_clusters) * (1 - (num_timestamps / num_clusters)**2) if not return_measures: return score * factor else: return {'stability_evaluation': score * factor, 'stability': avg_stab, 'quality': avg_quality, 'pre-factor': (1 - (num_timestamps / num_clusters) ** 2)} def rate_time_clustering(self, start_time: int = None, end_time: int = None, return_measures: bool = False) -> Union[float, dict]: """ Optional: start_time (optional) - int: time that should be considered as beginning end_time (optional) - int: time which should be rated up to return_measures (boolean) - whether additional information such as average stability and quality should be returned Returns: CLOSE score (float) - rating of clustering regarding all time clusterings (dict): with key 'stability_evaluation', 'stability', 'quality', 'pre-factor' with additional information if 'return_measures' is True """ cluster_ratings = self.rate_clusters(start_time, end_time) num_timestamps, timestamps = self.get_num_timestamps(start_time, end_time, return_timestamps=True) score = 0 if return_measures: quality = 0 stability = 0 for time in timestamps: if not return_measures: score += self.calc_t_clustering_rating(cluster_ratings, time) else: cur_scores = self.calc_t_clustering_rating(cluster_ratings, time, return_measures=True) score += cur_scores['score'] quality += cur_scores['quality'] stability += cur_scores['stability'] if return_measures: quality /= num_timestamps stability /= num_timestamps num_clusters = len(cluster_ratings) if num_clusters <= 0: if self._output: print('Over-Time Clustering has no Clusters!!') return 0 if self._exp_term: exp_term = self.calc_exploit() factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2) * exp_term else: factor = (1 / num_timestamps) * (1 - (num_timestamps / num_clusters) ** 2) if not return_measures: return score * factor else: return {'stability_evaluation': score * factor, 'stability': stability, 'quality': quality, 'pre-factor': factor} def calc_t_clustering_rating(self, cluster_ratings: dict, time: int, return_measures: bool = False) -> Union[float, dict]: """ Params: cluster_ratings (dict) - {<object_id>: <rating>} with ratings of objects time (int) - time that should be considered Optional: return_measures (boolean) - whether additional information such as average stability and quality should be returned Output: CLOSE score (float) - rating of clustering at considered time (dict): with key 'score', 'stability', 'quality' with additional information if 'return_measures' is True """ avg_stab = 0 clusters_at_time = self._data[self._data[self._time_column_name] == time][self._cluster_column_name].unique() clusters_at_time = np.delete(clusters_at_time, np.where(clusters_at_time < 0)) for cluster in clusters_at_time: try: avg_stab += cluster_ratings[cluster] except: continue num_clusters = len(clusters_at_time) if num_clusters <= 0: if self._output: print('Time Clustering at Time ', str(time), ' has no Clusters!!') return 0 avg_stab /= num_clusters if self._output: print('Average Stability at Time ', str(time), ' : ', str(avg_stab)) quality = self.measure(time) if self._output: print('Quality of Clustering at Time ' , str(time), ' : ', str(quality)) t_clustering_score = avg_stab * quality if not return_measures: return t_clustering_score else: return { 'score': t_clustering_score, 'stability': avg_stab, 'quality': quality } def rate_clusters(self, start_time: int = None, end_time: int = None, id: Union[int, str, list] = None) -> dict: """ Optional: start_time (int) - time that should be considered as beginning end_time (int) - time which should be rated up to id (int, str, list or None) - representing the cluster_ids that should be rated. If id is None, all objects are rated Returns: ratings (dict) - {<cluster_id>: <rating>} with ratings of clusters """ ids_to_rate = self.get_ids_to_rate(id, self._cluster_column_name, start_time, end_time) ids = ids_to_rate[:] # don't rate outliers for i in ids_to_rate: if int(i) < 0: ids.remove(i) ratings = self.calc_cluster_rating(ids, start_time) return ratings def calc_cluster_rating(self, ids_to_rate: Union[list, np.ndarray], start_time: int = None) -> dict: """ Params: ids_to_rate (array-like) - list of clusters that should be rated Optional: start_time (int) - time that should be considered as beginning Returns: ratings - dict {<cluster_id>: <rating>} with ratings of clusters """ if start_time is None: start_time = np.min(self._data[self._time_column_name].unique()) ratings = {} cluster_compositions = self.obtain_cluster_compositions() gr_clusters = self._data.groupby(self._cluster_column_name) # iterate over all cluster ids for id in ids_to_rate: time = gr_clusters.get_group(id)[self._time_column_name].iloc[0] # rate the clusters of all timestamps except of the first one if time != start_time: num_merged_clusters = len(cluster_compositions[id]) obj_list = gr_clusters.get_group(id)[self._object_column_name].unique().tolist() obj_ratings = self.calc_object_rating(cluster_compositions, obj_list, time) score = 0 for obj in obj_ratings: score += obj_ratings[obj] try: score /= len(obj_ratings) except ZeroDivisionError: if self._output: print('Cluster ', str(id), ' has no non-outlier members.') else: continue clusters = list(cluster_compositions[id].keys()) num_timestamps = len(self._data.loc[self._data[self._cluster_column_name].isin(clusters)] [self._time_column_name].unique()) try: div = num_merged_clusters / num_timestamps score /= div except ZeroDivisionError: if self._output: print("<<ZeroDivisionError - Cluster Score>> Cluster ID: ", str(id), " Merged Clusters: ", str(num_merged_clusters), " Num Timestamps: ", str(num_timestamps)) else: continue ratings[id] = score # clusters of the first timestamp have a stability of 1.0 else: ratings[id] = 1.0 return ratings def rate_object(self, id: Union[int, str, list] = None, start_time: int = None, end_time: int = None) -> dict: """ Optional: id (int, str, list or None) - representing the data points that should be rated. If id is None, all objects are rated start_time (int) - time that should be considered as beginning end_time (int) - representing the timestamp which should be rated up to Returns: ratings (dict) - {<object_id>: <rating>} with ratings of objects """ ids_to_rate = self.get_ids_to_rate(id, self._object_column_name) if end_time is None: end_time = np.max(self._data[self._time_column_name].unique()) cluster_compositions = self.obtain_cluster_compositions() ratings = self.calc_object_rating(cluster_compositions, ids_to_rate, end_time, start_time) return ratings def calc_object_rating(self, cluster_composition: dict, ids_to_rate: Union[list, np.ndarray], end_time: int, start_time: int = None) -> dict: """ Params: cluster_composition (dict) - {<cluster_id>: {<contained_cluster_id>: <proportion>}} containing the proportions of clusters (contained_cluster_id) that belong to cluster (cluster_id) ids_to_rate (array-like) - list of data points that should be rated end_time (int) - representing the timestamp which should be rated up to Optional: start_time (int) - time that should be considered as beginning Returns: ratings - dict {<object_id>: <rating>} with ratings of objects """ ratings = {} gr_clusters = self._data.groupby(self._object_column_name) # iterate over object ids for id in ids_to_rate: cur_group = gr_clusters.get_group(id) cur_group = cur_group[cur_group[self._time_column_name] <= end_time] if start_time is not None: cur_group = cur_group[cur_group[self._time_column_name] >= start_time] try: # id of the cluster of the last considered timestamp last_cluster = cur_group[cur_group[self._time_column_name] == end_time][self._cluster_column_name].iloc[ 0] except IndexError: print(">>INDEXERROR - LAST CLUSTER<< ID: ", str(id), ", Start Time: ", str(start_time), ", End Time: ", str(end_time)) continue # if object is an outlier for the considered timestamp, it is skipped if int(last_cluster) < 0: continue cluster_ids = cur_group[self._cluster_column_name].unique() object_ratings = [] num_clusters = 0 has_outlier = False for cluster in cluster_ids: if cluster == last_cluster: continue # Add the proportion of clusters before last timestamp, that merged in last cluster else: # outliers get worst rating of 0.0 if int(cluster) < 0: object_ratings.append(0.0) has_outlier = True else: object_ratings.append(cluster_composition[last_cluster][cluster]) num_clusters += 1 if not has_outlier and len(object_ratings) == 0: # print(str(id) + " has no data before t=" + str(end_time)) continue if self._weighting: try: weighting_denominator = 0 for i in range(1, num_clusters + 1): weighting_denominator += i if num_clusters > 0: object_rating = 0 for i in range(num_clusters): object_rating += object_ratings[i] * ((i + 1) / weighting_denominator) else: continue except (TypeError, ZeroDivisionError): # print(str(id) + " is not assigned to any cluster before t=" + str(end_time)) continue else: try: object_rating = np.sum(object_ratings) object_rating /= num_clusters except (TypeError, ZeroDivisionError): # print(str(id) + " is not assigned to any cluster before t=" + str(end_time)) continue ratings[id] = round(object_rating, 3) return ratings def calc_exploit(self) -> float: """ Returns: exploitation_term (float) - exploitation term for whole clustering """ num_objects = len(self._data[self._object_column_name].unique()) num_no_outliers = len(self._data[self._data[self._cluster_column_name] >= 0][self._object_column_name].unique()) return num_no_outliers / num_objects ######## HELPER FUNCTIONS ######## def get_feature_list(self, objects: Union[list, np.ndarray], time: int) -> np.ndarray: """ Params: objects (array-like) - list of objects_ids that belong to considered cluster time (int) - time of cluster that is considered Output: feature_list (list) - list of lists containing the features of objects in the considered cluster """ feature_list = [] for obj in objects: features = self._data[ (self._data[self._object_column_name] == obj) & (self._data[self._time_column_name] == time)] try: features = \ features.drop([self._object_column_name, self._cluster_column_name, self._time_column_name], axis=1).iloc[0].tolist() except IndexError: print(">>INDEXERROR - FEATURE LIST<< ID: ", str(obj), ", Time: ", str(time)) continue if len(features) <= 0: print("No features found for object ", str(obj)) continue feature_list.append(features) return np.array(feature_list) def get_num_timestamps(self, start_time: int, end_time: int, return_timestamps: bool = False) -> int: """ Params: start_time (int) - first timestamp to be considered end_time (int) - last timestamp to be considered Optional: return_timestamps (boolean) - list of all timestamps Returns: num_timestamps (int) - number of timestamps between start_time and end_time """ timestamp_list = self._data[self._time_column_name].unique() if start_time is not None: timestamp_list = [i for i in timestamp_list if i >= start_time] if end_time is not None: timestamp_list = [i for i in timestamp_list if i <= end_time] num_timestamps = len(timestamp_list) if not return_timestamps: return num_timestamps else: return num_timestamps, timestamp_list def get_ids_to_rate(self, id: Union[int, str, list], id_name: str, start_time: int = None, end_time: int = None) -> list: """ Params: id (int, str, list or None) - representing the data points that should be rated. If id is None, all objects are rated id_name (str) - either self._cluster_column_name or self._object_column_name, which ids to extract Optional: start_time (int) - first timestamp to be considered end_time (int) - last timestamp to be considered Returns: ids_to_rate (list) - list of ids that should be rated """ if id is None: data = self._data.copy() if start_time is not None: data = data[data[self._time_column_name] >= start_time] if end_time is not None: data = data[data[self._time_column_name] <= end_time] ids_to_rate = data[id_name].unique().tolist() elif isinstance(id, int) or isinstance(id, str): ids_to_rate = [id] elif isinstance(id, list): ids_to_rate = id[:] else: raise Exception('id has to be int, str, list or None') return ids_to_rate def obtain_cluster_compositions(self) -> dict: """ Returns: cluster_compositions (dict) - dict of dicts {<cluster_id>: {<cluster_id>: <proportion>}} with cluster compositions Example: {5: {1: 1.0, 2: 0.1, 4: 0.5}} describes that 100% of cluster 1, 10% of cluster 2 and 50% of cluster 4 belong to cluster 5 """ cluster_compositions = {} g_clusters = self._data.groupby([self._time_column_name, self._cluster_column_name]) if not self._jaccard: cluster_members = self._data.groupby(self._cluster_column_name).count() # iterate over all clusters - 'group' contains the time and cluster_id # and 'objects' is the corresponding dataframe for group, objects in g_clusters: # Ignore outliers if int(group[1]) < 0: continue objects = objects[self._object_column_name].values.tolist() # temporal intersection # select considered clusters with later timestamps than the current one to check which clusters the # current one merged into and count, how many objects of the current cluster are in the considered clusters # example of a series from the dataframe: [cluster_id, count] with [2, 10] # meaning: 10 objects of the current cluster merged into the cluster with the id 2 temp_intersection = (self._data.loc[(self._data[self._object_column_name].isin(objects)) & (self._data[self._time_column_name] > group[0])]).groupby(self._cluster_column_name).count() # iterate over all clusters which the current cluster has merged into # 'cluster' contains the cluster_id # and 'con_objects' is the corresponding number of objects of the temporal intersection for cluster, num_objects in temp_intersection.iterrows(): # Ignore outliers if int(cluster) < 0: continue # for all considered clusters save the proportion of the current cluster that merged into the considered # one # example: {3: {2: 0.3}, 4: {2: 0.1}} # meaning: 30% of (current) cluster 2 merged into (considered) cluster 3 and 10% into (considered) cluster 4 if cluster not in cluster_compositions: cluster_compositions[cluster] = {} if self._jaccard: # cardinality of the union of both considered clusters card_union = len(self._data.loc[(self._data[self._cluster_column_name] == cluster) | (self._data[self._cluster_column_name] == group[1])] [self._object_column_name].unique()) # jaccard distance cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) / float(card_union), 3) else: cluster_compositions[cluster][group[1]] = round(float(num_objects.values[1]) / float(cluster_members.loc[group[1]].values[1]), 3) if group[1] not in cluster_compositions: cluster_compositions[group[1]] = {} return cluster_compositions ######## QUALITY MEASURES ######## @staticmethod def calc_sse(feature_list: list) -> float: """ Params: feature_list (list) - list of lists containing the features of objects in the considered cluster Returns: sse (float) - sum of squared errors to centroid of cluster """ centroid = np.average(feature_list, axis=0) sse = np.sum(np.power(feature_list - centroid[None, :], 2)) return sse def calc_mse(self, feature_list: list) -> float: """ Params: feature_list (list) - list of lists containing the features of objects in the considered cluster Returns: mse (float) - mean squared error of cluster """ sse = self.calc_sse(feature_list) return sse / len(feature_list) @staticmethod def calc_mae(feature_list: list) -> float: """ Params: feature_list (list) - list of lists containing the features of objects in the considered cluster Returns: mae (float) - mean average errors to centroid of cluster """ centroid = np.average(feature_list, axis=0) mae = np.average(np.abs(feature_list - centroid[None, :])) return mae @staticmethod def calc_max_dist(feature_list: list) -> float: """ Params: feature_list (list) - list of lists containing the features of objects in the considered cluster Returns: max_dist (float) - maximal distance of cluster member to centroid of cluster """ max_dist = 0 for i in range(len(feature_list) - 1): for j in range(i + 1, len(feature_list)): cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j])) if cur_dist > max_dist: max_dist = cur_dist max_dist /= 2 ** (1 / 2) return max_dist def calc_min_pts(self, feature_list: list) -> float: """ Params: feature_list (list) - list of lists containing the features of objects in the considered cluster Returns: avg_dist (float) - average distance of cluster members to their minPts neighbor """ avg_dist = 0 for i in range(len(feature_list)): dist_list = [10] * self._minPts for j in range(len(feature_list)): if i == j: continue cur_dist = euclidean(np.array(feature_list[i]), np.array(feature_list[j])) for k in range(len(dist_list)): if cur_dist < dist_list[k]: dist_list.insert(k, cur_dist) dist_list.pop(self._minPts) avg_dist += dist_list[self._minPts - 1] avg_dist /= len(feature_list) return avg_dist @staticmethod def return_zero(): """ Function is used if no quality measure should be used in CLOSE This is the case when only the exploitation term is considered Returns: 0 """ return 0 def calc_exploit_at_t(self, time: int) -> float: """ Params: time (int) - time to be considered Returns: rating (float) - exploitation rating of time clustering """ num_objects_at_t = len(self._data[self._data[self._time_column_name] == time][self._object_column_name].unique()) num_no_outliers = len(self._data[(self._data[self._time_column_name] == time) & (self._data[self._cluster_column_name] >= 0)][self._object_column_name].unique()) return num_no_outliers / num_objects_at_t
[ [ [ 7, 18 ], [ 8215, 8217 ], [ 8243, 8245 ], [ 10159, 10161 ], [ 10565, 10567 ], [ 13273, 13275 ], [ 13599, 13601 ], [ 16950, 16952 ], [ 17798, 17800 ], [ 17771, 17773 ], [ 18874, 18876 ], [ 24915, 24917 ], [ 24962, 24964 ], [ 24969, 24971 ], [ 25712, 25714 ], [ 25759, 25761 ], [ 25770, 25772 ], [ 26316, 26318 ], [ 26343, 26345 ], [ 27071, 27073 ], [ 27098, 27100 ] ], [ [ 54, 63 ], [ 26306, 26315 ], [ 27061, 27070 ] ], [ [ 83, 88 ], [ 183, 188 ], [ 2741, 2746 ], [ 5251, 5256 ], [ 7493, 7498 ], [ 9300, 9305 ], [ 10147, 10152 ], [ 12610, 12615 ], [ 13587, 13592 ], [ 17759, 17764 ], [ 19856, 19861 ] ], [ [ 96, 102 ], [ 156, 162 ] ], [ [ 111, 116 ] ] ]
import discord from discord.ext import commands from Modules import CONSTANT from Modules.Checks import check_if_role_or_bot_spam class Roles(commands.Cog): def __init__(self, bot: commands.Bot): self.bot = bot @commands.command() @check_if_role_or_bot_spam() async def role(self, ctx: commands.Context, role_type: str, *role_names): """ Add a role. <role_type>: Use 'main', 'sub', or 'unit' to indicate which type of role you want. Your main role will control your nametag colour. [role_names...]: The name of the roles you want to add, names are not case-sensitive. You can enter as many names as you want to. Examples: ">role main Sally" --- will add Sally as your main role and make your nametag yellow. ">role sub Mizzy" --- will add Mizzy as a sub role without affecting your nametag colour. Unit examples: >role unit Cider >role unit Bench >role unit Keikoto Unit roles work the same as sub roles, you can have many of them. You can enter multiple role names for this command. If you enter ">role main" with more than one role name, you will get the first valid role you entered. Examples: ">role sub Sally Sakura Ruri Jun" --- will add all these four sub roles to you. ">role main Sally Sakura Ruri Jun" --- will only add Sally as the main role, if you had Sally as your main role, the operation does nothing. Only the following roles may be added for 'main' and 'sub' roles: Sally, Sakura, Ruri, Jun, Mizzy, Miyako, Kanaeru, Akane, Nagomin, Miu, Meimei, Uta, Nicole, Chiharun, Reika, Reinyan, Ayaka, Moe, Mikami, Rettan, Yuki, Ainacchi, Tsubomi, Tamago, Gouda, Kaoruko, Nana, Miko, Komiya, Aida, Mukai Only the following roles may be added for 'unit' roles: >> Hareta Hi no Bench (use the word "Bench" to add), >> Keikoto saisei keikaku (use the word "Keikoto"), >> Ki no Nuketa Cider (use the word "Cider") """ role_names: list[str] = [x.capitalize() for x in role_names] if not role_names: await ctx.reply("Missing required arguments. ") result_msgs: list[str] = [] if role_type in CONSTANT.ROLES_ID.keys(): for role_name in role_names: if role_name in CONSTANT.GENERAL_ROLEABLES: if role_type == "main": role_ids: list[int] = [role.id for role in ctx.author.roles] main_roles = list(set(role_ids) & set(CONSTANT.ROLES_ID["main"].values())) role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["main"][role_name]) if role in ctx.author.roles: result_msgs.append("You already have that role!") elif main_roles: result_msgs.append("You can't have more than one main role!") else: await ctx.author.add_roles(role) result_msgs.append("Role added.") break elif role_type == "sub": role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["sub"][role_name]) if role in ctx.author.roles: result_msgs.append("You already have that role!") else: await ctx.author.add_roles(role) result_msgs.append("Role added.") else: await ctx.reply("Illegal operation. Check your <role_type> input. ") return elif role_name in CONSTANT.UNIT_ROLABLES: if role_type == "unit": # verify that the type is actually unit role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["unit"][role_name]) if role in ctx.author.roles: result_msgs.append("You already have that role!") else: await ctx.author.add_roles(role) result_msgs.append("Role added.") else: await ctx.reply("Illegal operation. Check your <role_type> input. ") return else: result_msgs.append("Illegal role name. Type `>help role` for a list of acceptable role names. ") else: await ctx.reply("Illegal operation. Check your <role_type> input. ") return final_msg: str = "" for name, result in zip(role_names, result_msgs): final_msg += "**{}**: {} \n".format(name, result) await ctx.reply(final_msg) @commands.command() @check_if_role_or_bot_spam() async def unrole(self, ctx: commands.Context, role_type: str, *role_names): """ Delete a role. <role_type>: Use 'main' or 'sub' to indicate which type of role you wish to delete. If you delete your main role, your nametag colour will change to that of your highest sub role until you add a new main role. [role_names...]: The name of the role you want to delete, names are not case-sensitive. You can enter as many names as you want to. Example: ">unrole main Sally" --- will remove Sally as your main role. --- If you have Meimei as a sub role, your nametag colour will then be light blue until you add a new main role. Multiple role deletion works similarly as >role does, for more help, send ">help role". """ role_names: list[str] = [x.capitalize() for x in role_names] if not role_names: await ctx.reply("Missing required argument. ") result_msgs: list[str] = [] for role_name in role_names: if role_name in CONSTANT.GENERAL_ROLEABLES: if role_type == 'main': if role_name in CONSTANT.ROLES_ID["main"].keys(): role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["main"][role_name]) else: result_msgs.append("Illegal role name for main roles. ") continue elif role_type == 'sub': if role_name in CONSTANT.ROLES_ID["sub"].keys(): role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["sub"][role_name]) else: result_msgs.append("Illegal role name for sub roles. ") continue elif role_type == 'unit': if role_name in CONSTANT.ROLES_ID["unit"].keys(): role: discord.Role = ctx.guild.get_role(CONSTANT.ROLES_ID["unit"][role_name]) else: result_msgs.append("Illegal role name for unit roles. ") continue else: await ctx.send("Invalid selection. ") return if role not in ctx.author.roles: result_msgs.append("You don't have that role!") else: await ctx.author.remove_roles(role) result_msgs.append("Role removed.") else: result_msgs.append("Illegal role name. Type `>help unrole` for a list of acceptable role names. ") final_msg: str = "" for name, result in zip(role_names, result_msgs): final_msg += "**{}**: {} \n".format(name, result) await ctx.reply(final_msg)
[ [ [ 7, 14 ], [ 2800, 2807 ], [ 3393, 3400 ], [ 4072, 4079 ], [ 6351, 6358 ], [ 6703, 6710 ], [ 7055, 7062 ] ], [ [ 39, 47 ], [ 145, 153 ], [ 232, 240 ], [ 5023, 5031 ], [ 188, 196 ], [ 314, 322 ], [ 5107, 5115 ] ], [ [ 69, 77 ], [ 2414, 2422 ], [ 2513, 2521 ], [ 2732, 2740 ], [ 2834, 2842 ], [ 3427, 3435 ], [ 3933, 3941 ], [ 4106, 4114 ], [ 6183, 6191 ], [ 6287, 6295 ], [ 6385, 6393 ], [ 6640, 6648 ], [ 6737, 6745 ], [ 6991, 6999 ], [ 7089, 7097 ] ], [ [ 105, 130 ], [ 256, 281 ], [ 5047, 5072 ] ], [ [ 139, 144 ] ] ]
from django.db import models from django.forms import ModelForm from django.forms import TextInput from .models import agendamento #import datetime #class frm_agendamento(forms.ModelForm): # # data_agendamento = forms.DateField(label="Data",initial=datetime.date.today) # horario_inicio = forms.TimeField(label="Inicio",initial=datetime.datetime.now().strftime('%H:%M')) # horario_fim = forms.TimeField(label="Fim", initial=datetime.datetime.now().strftime('%H:%M')) # # motivo = forms.CharField( # label='Motivo', widget=forms.Textarea # ) class frm_agendamento(ModelForm): #formulario baseado em modelo class Meta: model = agendamento exclude = ('criado_em','google_link') #campo que nao sera usado no formulario widgets = { 'data_agendamento': TextInput( attrs={'class':'form-control datepicker', 'data-date-format':'dd/mm/yyyy'}), 'horario_inicio': TextInput( attrs={'class':'form-control'}), 'horario_fim': TextInput( attrs={'class':'form-control'}), 'motivo': TextInput( attrs={'class':'form-control'}) }
[ [ [ 22, 28 ] ], [ [ 54, 63 ], [ 586, 595 ] ], [ [ 89, 98 ], [ 811, 820 ], [ 929, 938 ], [ 1000, 1009 ], [ 1066, 1075 ] ], [ [ 119, 130 ], [ 661, 672 ] ], [ [ 570, 585 ] ] ]
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes and methods for working with entity types in the ontology.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import typing from typing import Optional, Tuple from yamlformat.validator import base_lib from yamlformat.validator import config_folder_lib from yamlformat.validator import field_lib from yamlformat.validator import findings_lib ENTITY_TYPE_NAME_REGEX = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*(?:_[a-zA-Z0-9]+)*$') FIELD_INCREMENT_STRIPPER_REGEX = re.compile( r'(^[a-z][a-z0-9]*(?:_[a-z][a-z0-9]*)*)((?:_[0-9]+)+)$') FieldParts = typing.NamedTuple('FieldParts', [('namespace', str), ('field', str), ('increment', str)]) OptWrapper = typing.NamedTuple('OptWrapper', [('field', FieldParts), ('optional', bool)]) TypeParts = typing.NamedTuple('TypeParts', [('namespace', str), ('typename', str)]) EntityIdByEntry = typing.NamedTuple('EntityIdByEntry', [('namespace', str), ('typename', str)]) def SeparateFieldNamespace(qualified_field_name: str) -> Tuple[str, str]: """Returns the namespace and its field name as separate values or an Error. Args: qualified_field_name: a qualified field string like `HVAC/run_status` Throws: TypeError: if the field is not qualified """ fqf_parsed = qualified_field_name.split('/') if len(fqf_parsed) == 1: raise TypeError('Type improperly formatted, a namespace is missing: ', fqf_parsed) if len(fqf_parsed) > 2: raise ValueError('Type improperly formatted, too many separators: ', fqf_parsed) return fqf_parsed[0], fqf_parsed[1] def SeparateFieldIncrement(field_name) -> Tuple[str, str]: """Takes as an input a field_name (string) and returns a tuple of strings. The first element is the standard field name and its increment when available. For example: zone_occupancy_status_1 -> [zone_occupancy_status, 1] Args: field_name: the field name to parse. Returns: A tuple of string, the standard field name and its increment if available. """ field_name_part = field_name increment_part = '' match = FIELD_INCREMENT_STRIPPER_REGEX.match(field_name) if match: field_name_part = match.group(1) increment_part = match.group(2) return field_name_part, increment_part class EntityTypeUniverse(findings_lib.Findings): """Helper class to represent the defined universe of EntityTypes. Only contains valid EntityTypes. Attributes; namespace_folder_map: a map of namespace names to EntityTypeFolders. type_namespaces_map: a map of type names to TypeNamespaces. type_ids_map: maps type IDs to entity types. Contains all valid types w/IDs. """ def __init__(self, entity_type_folders): """Init. Args: entity_type_folders: list of EntityTypeFolder objects parsed from files. """ super(EntityTypeUniverse, self).__init__() self.namespace_folder_map = {} self.type_namespaces_map = {} self.type_ids_map = {} self._BuildNamespaceFolderMap(entity_type_folders) self._BuildTypeMaps( [folder.local_namespace for folder in entity_type_folders]) def GetEntityType(self, namespace_name, typename): """Finds entity_type by namespace and typename and returns it or None.""" if namespace_name not in self.type_namespaces_map: return None return self.type_namespaces_map[namespace_name].GetType(typename) def GetNamespace(self, namespace_name): """Finds namespace in the universe by name and returns it or None.""" return self.type_namespaces_map.get(namespace_name, None) def GetNamespaces(self): """Get the entity type namespace objects in this universe. Returns: A list of EntityTypeNamespace objects """ return list(self.type_namespaces_map.values()) def _GetDynamicFindings(self, filter_old_warnings): findings = [] for folder in self.namespace_folder_map.values(): findings += folder.GetFindings(filter_old_warnings) return findings def _BuildTypeMaps(self, type_namespaces): """Creates a dict mapping namespace strings to TypeNamespace objects. Sets the self.type_namespaces_map attribute of the class. Args: type_namespaces: a list of TypeNamespace objects. Raises: RuntimeError: if assumptions about internal data structures are violated. """ for type_namespace in type_namespaces: self.type_namespaces_map[type_namespace.namespace] = type_namespace for entity_type in type_namespace.valid_types_map.values(): if entity_type.uid: if entity_type.uid in self.type_ids_map: dup_id_entry = self.type_ids_map[entity_type.uid] dup_id_type = self.GetEntityType(dup_id_entry.namespace, dup_id_entry.typename) if dup_id_type is None: raise RuntimeError('Duplicate type with uid ' + entity_type.uid + ' should always be mapped') entity_type.AddFinding( findings_lib.DuplicateIdsError(type_namespace.namespace, entity_type, dup_id_type)) dup_id_type.AddFinding( findings_lib.DuplicateIdsError(dup_id_entry.namespace, dup_id_type, entity_type)) self.type_ids_map[entity_type.uid] = EntityIdByEntry( namespace=type_namespace.namespace, typename=entity_type.typename) def _BuildNamespaceFolderMap(self, type_folders): """Creates a dict mapping namespace strings to EntityTypeFolder objects. Sets the self.namespace_folder_map attribute of the class. Args: type_folders: a list of EntityTypeFolder objects. """ for folder in type_folders: self.namespace_folder_map[folder.local_namespace.namespace] = folder class EntityTypeFolder(config_folder_lib.ConfigFolder): """Class representing a namespace folder of entity types. Class fully validates all entity types defined within the namespace folder, collects issues found, and stores all valid entity types. Attributes: local_namespace: TypeNamespace object representing this namespace. """ def __init__(self, folderpath, field_universe=None): """Init. Args: folderpath: required string with full path to the folder containing entity type files. Path should be relative to google3/ and have no leading or trailing /. field_universe: optional FieldsUniverse object. """ super(EntityTypeFolder, self).__init__(folderpath, base_lib.ComponentType.ENTITY_TYPE) self.local_namespace = TypeNamespace(self._namespace_name, field_universe) def Finalize(self): """Call to complete entity creation after all types are added.""" self.local_namespace.QualifyParentNames() def _AddFromConfigHelper(self, document, context): for type_name in document: new_type = self._ConstructType(type_name, document[type_name], context.filepath) self._AddType(new_type) def _ConstructField(self, local_field_names, optional, output_array): for qualified_field_name in local_field_names: field_ns, raw_field_name = field_lib.SplitFieldName(qualified_field_name) std_field_name, increment = SeparateFieldIncrement(raw_field_name) # Field will look local if undefined, but we'll catch the error later # Because we do explict existence checks and it will fail # TODO(berkoben) refactor so validation happens in an order that # prevents this logic lint field_ns = self.local_namespace.GetQualifiedNamespace( field_ns, std_field_name) output_array.append( OptWrapper( field=FieldParts( namespace=field_ns, field=std_field_name, increment=increment), optional=optional)) def _ConstructType(self, type_name, type_contents, filepath): """Reads a entity type config block and generates an EntityType object.""" description = '' parents = None local_field_names = None opt_local_field_names = None is_abstract = False is_canonical = False uid = None expected_keys = set([ 'description', 'implements', 'uses', 'opt_uses', 'is_abstract', 'id', 'is_canonical' ]) if 'description' in type_contents: description = type_contents['description'] if 'implements' in type_contents: parents = type_contents['implements'] if 'uses' in type_contents: local_field_names = type_contents['uses'] if 'opt_uses' in type_contents: opt_local_field_names = type_contents['opt_uses'] if 'is_abstract' in type_contents: is_abstract = type_contents['is_abstract'] if 'is_canonical' in type_contents: is_canonical = type_contents['is_canonical'] if 'id' in type_contents: uid = type_contents['id'] # Generate tuples to represent each field fq_lfn = [] if local_field_names: self._ConstructField(local_field_names, False, fq_lfn) if opt_local_field_names: self._ConstructField(opt_local_field_names, True, fq_lfn) entity_type = EntityType( filepath=filepath, typename=type_name, description=description, parents=parents, local_field_tuples=fq_lfn, is_abstract=is_abstract, inherited_fields_expanded=False, is_canonical=is_canonical, uid=uid, namespace=self.local_namespace) # Add errors to type if there's anything extra in the block. We add to the # entity type because an extra key here is likely a typo in a real key name # that would result in information being lost from the type. for key in type_contents: if key not in expected_keys: entity_type.AddFinding( findings_lib.UnrecognizedKeyError(key, entity_type.file_context)) return entity_type def _AddType(self, entity_type): """Adds entity_type if it is fully valid. If formatting is correct, continues on to field validation. Records all findings in object. Args: entity_type: EntityType object. Returns: True if the entity type was successfully validated and added. False otherwise. """ if not entity_type.IsValid(): self.AddFindings(entity_type.GetFindings()) return False return self.local_namespace.InsertType(entity_type) class TypeNamespace(findings_lib.Findings): """Class representing a namespace of entity types. Attributes: namespace: string valid_types_map: Dict mapping typename strings to EntityType objects. """ def __init__(self, namespace, field_universe=None): super(TypeNamespace, self).__init__() self.namespace = namespace self._field_universe = field_universe self.valid_types_map = {} self._parents_qualified = False def _GetDynamicFindings(self, filter_old_warnings): findings = [] for entity_type in self.valid_types_map.values(): findings += entity_type.GetFindings(filter_old_warnings) return findings def GetType(self, typename): return self.valid_types_map.get(typename, None) def InsertType(self, entity_type): """Validate that declared fields are defined. Adds type if valid and unique. Findings for non-validated fields are applied to this TypeNamespace. Args: entity_type: entity to attempt to add. Returns: True if entity was added successfully. Raises: RuntimeError: if this is called after qualifying parent names """ if self._parents_qualified: raise RuntimeError('Cannot add types after Qualifying parents') if self._ValidateFields(entity_type): typename = entity_type.typename mapped_entity_type = self.valid_types_map.get(typename) if mapped_entity_type is None: self.valid_types_map[typename] = entity_type return True # entity_type is a duplicate type self.AddFinding( findings_lib.DuplicateEntityTypeDefinitionError( self, entity_type, mapped_entity_type.file_context)) return False return False def GetQualifiedNamespace(self, field_ns, field_name): """Returns the namespace name for this field. Args: field_ns: namespace of field as parsed from the config field_name: unqualified field name string Returns: The fully qualified field string. """ if not field_ns and self.IsLocalField(field_name): return self.namespace return field_ns def _BuildQualifiedParentTuple(self, parent_name): """Creates the two-part parent tuple with a fully-qualified namespace. Args: parent_name: string as specified in the config file. Returns: A TypeParts tuple representing this parent. """ namespace_name = self.namespace split = parent_name.split('/') if len(split) != 2: if not self.GetType(parent_name): # parent is in the global namespace namespace_name = '' else: namespace_name = split[0] parent_name = split[1] return TypeParts(namespace=namespace_name, typename=parent_name) def QualifyParentNames(self): """Sets parents attribute of this namespace with fully qualified names.""" if self._parents_qualified: return for entity_type in self.valid_types_map.values(): fq_tuplemap = {} for parent in entity_type.unqualified_parent_names: fq_tuple = self._BuildQualifiedParentTuple(parent) fq_name = '{0}/{1}'.format(fq_tuple.namespace, fq_tuple.typename) fq_tuplemap[fq_name] = fq_tuple entity_type.parent_names = fq_tuplemap self._parents_qualified = True def IsLocalField(self, field_name): """Returns true if this unqualified field is defined in the namespace. Args: field_name: an unqualified field name with no leading '/' """ if not self._field_universe: return False return self._field_universe.IsFieldDefined(field_name, self.namespace) def _ValidateFields(self, entity): """Validates that all fields declared by entity are defined.""" # if field_universe is not defined just return true if not self._field_universe: return True valid = True for field_tuple in entity.local_field_names.values(): if not self._ValidateField(field_tuple.field, entity): valid = False return valid def _ValidateField(self, field_tuple, entity): """Validates that field declared by entity is defined. Field formatting has already been validated. Findings are saved on the TypeNamespace. Args: field_tuple: tuple representing a fully qualified field entity: EntityType Returns: True if field is defined. """ if not self._field_universe.IsFieldDefined(field_tuple.field, field_tuple.namespace): self.AddFinding( findings_lib.UndefinedFieldError(entity, field_tuple.field)) return False return True def BuildQualifiedField(opt_tuple): field_tuple = opt_tuple.field return '{0}/{1}{2}'.format(field_tuple.namespace, field_tuple.field, field_tuple.increment) class EntityType(findings_lib.Findings): """Creates an EntityType object from a set of values describing the type. Attributes: file_context: FileContext object containing file info. typename: string. description: string. parent_names: a list of parent typename strings. local_field_names: the local set of standard field names inherited_field_names: the set of inherited field names. Is always assigned to an empty set at init, to be expanded later. inherited_fields_expanded: boolean. is_canonical: boolean indicating if this is a curated canonical type. uid: the database ID string of this type if uploaded namespace: a reference to the namespace object the entity belongs to Returns: An instance of the EntityType class. """ def __init__(self, begin_line_number=0, filepath='', typename='', description='', parents=None, local_field_tuples=None, is_abstract=False, inherited_fields_expanded=False, is_canonical=False, uid=None, namespace=None): """Init. Args: begin_line_number: int. Starting line number for the entity type definition. filepath: string. google3 path to the file defining the type. typename: required string. description: required string. parents: list of parent typename strings. local_field_tuples: list of OptWrapper tuples is_abstract: boolean indicating if this is an abstract type. inherited_fields_expanded: boolean. Should be false at init. is_canonical: boolean indicating if this is a curated canonical type. uid: the database ID string of this type if uploaded namespace: a reference to the namespace object the entity belongs to """ super(EntityType, self).__init__() self.file_context = findings_lib.FileContext( begin_line_number=begin_line_number, filepath=filepath) self.typename = typename self.description = description self.namespace = namespace self.local_field_names = {} local_field_names = [] if local_field_tuples: local_field_names = [ BuildQualifiedField(opt_parts) for opt_parts in local_field_tuples ] for i, lfn in enumerate(local_field_names): self.local_field_names[lfn] = local_field_tuples[i] self.inherited_field_names = {} self.inherited_fields_expanded = inherited_fields_expanded if parents is None: parents = [] self.parent_names = None self.parent_name_tuples = None self.unqualified_parent_names = parents self._all_fields = None self._has_optional_fields = None self.is_abstract = is_abstract self.is_canonical = is_canonical self.uid = uid # TODO(berkoben) update this method to use tuples if possible self._ValidateType(local_field_names) def HasOptionalFields(self, run_unsafe=False): if not (self.inherited_fields_expanded or run_unsafe): raise RuntimeError('Type has not been expanded') if self._has_optional_fields is not None: return self._has_optional_fields fields = self.GetAllFields() for field in fields.values(): if field.optional: self._has_optional_fields = True return self._has_optional_fields self._has_optional_fields = False return self._has_optional_fields def GetAllFields(self, run_unsafe=False): """Returns the expanded set of fields for this type. Args: run_unsafe: set true to run against a type before fields are fully expanded. Running in this mode does not memoize the result. Returns: A dictionary of fully qualified strings representing fields in the type to OptWrapper tuples representing the contents of the field. Raises: RuntimeError: if fields have not yet been expanded. """ if not (self.inherited_fields_expanded or run_unsafe): raise RuntimeError('Type {0} has not been expanded'.format(self.typename)) if self._all_fields is None: tmp = self.local_field_names.copy() tmp.update(self.inherited_field_names) if run_unsafe: return tmp self._all_fields = tmp return self._all_fields def HasFieldAsWritten(self, fieldname_as_written: str, run_unsafe: bool = False) -> bool: """Returns true if a valid config file value maps to a field in the type. Accepts a field name as written in a configuration file referencing this type. The method applies context-aware namespace omission (i.e. referencing a field without its namespace) to identify the field regardless of the namespace and syntax variation. Note: to minimize redundancy, this method simply wraps. `GetFieldFromConfigText()`. If your application also needs the `Field` use that method instead to eliminate redundant processing. Args: fieldname_as_written: string verbatim from a building or ontology config run_unsafe: set true to allow calls before parent type fields are expanded Returns: True if the Field is defined on the type. False otherwise. """ return self.GetFieldFromConfigText(fieldname_as_written, run_unsafe) is not None def GetFieldFromConfigText(self, fieldname_as_written: str, run_unsafe: bool = False) -> Optional[OptWrapper]: """Returns `OptWrapper` provided string validates against the entity. Accepts a field name as written in a configuration file referencing this type. The method applies all shorthanding rules to identify the field regardless of the namespace and syntax variation. Args: fieldname_as_written: string verbatim from a building or ontology config run_unsafe: set true to allow calls before parent type fields are expanded Returns: `OptWrapper` if field is present, None otherwise """ try: # Check the field as if it's fully qualified. return self.GetField(fieldname_as_written, run_unsafe) except TypeError: pass # Field is unqualified so it is either global or type-namespace-local # Check for a locally defined field first using type's namespace field = self._GetField( self.namespace.namespace + '/' + fieldname_as_written, run_unsafe) if not field: # Check field as if it's in the global namespace field = self._GetField('/' + fieldname_as_written, run_unsafe) return field def HasField(self, fully_qualified_fieldname: str, run_unsafe: bool = False) -> bool: """Returns True if field string validates against the entity's fields. Args: fully_qualified_fieldname: a fully qualified names for example: "HVAC/run_status_1". run_unsafe: set true to run against a type before fields are fully expanded. Running in this mode does not memoize the result. Throws: TypeError: if the field is not fully qualified """ return self.GetField(fully_qualified_fieldname, run_unsafe) is not None def GetField(self, fully_qualified_fieldname: str, run_unsafe: bool = False) -> Optional[OptWrapper]: """Returns `OptWrapper` if field string validates against the entity. Args: fully_qualified_fieldname: a fully qualified names for example: "HVAC/run_status_1". run_unsafe: set true to run against a type before fields are fully expanded. Running in this mode does not memoize the result. Returns: `OptWrapper` if field is present, None otherwise Throws: TypeError: if the field is not fully qualified """ # Throws an error in the case that this isn't a fully qualified field _, _ = SeparateFieldNamespace(fully_qualified_fieldname) return self._GetField(fully_qualified_fieldname, run_unsafe) def _GetField(self, fully_qualified_fieldname: str, run_unsafe: bool = False) -> Optional[OptWrapper]: return self.GetAllFields(run_unsafe).get(fully_qualified_fieldname) def _ValidateType(self, local_field_names): """Validates that the entity type is formatted correctly. Checks for formatting and duplicate fields and parents. Records any errors found. Args: local_field_names: list of local field names for the type. """ # Make sure the typename is non-empty. if not self.typename: self.AddFinding(findings_lib.MissingTypenameError(self)) elif not isinstance(self.typename, str): self.AddFinding( findings_lib.IllegalKeyTypeError(self.typename, self.file_context)) elif not ENTITY_TYPE_NAME_REGEX.match(self.typename): self.AddFinding( findings_lib.InvalidTypenameError(self.typename, self.file_context)) # Make sure the type description is non-empty. if not self.description: self.AddFinding(findings_lib.MissingEntityTypeDescriptionWarning(self)) # Check for duplicate local fields. # this check is case insensitive to catch dupes earlier in the event that # we stop explicitly rejecting upper case characters check_fields = set() for field in local_field_names: field_lower = field.lower() if field_lower in check_fields: self.AddFinding(findings_lib.DuplicateFieldError(self, field)) continue check_fields.add(field_lower) # TODO(berkoben): Add more checks to validate fields in isolation # (in case we don't have a field set to check against) # (i.e. check for chirality, formatting. Could use actual Field objects) # Check formatting of field name if len(field.split('/')) > 2: self.AddFinding(findings_lib.UnrecognizedFieldFormatError(self, field)) # Check for duplicate parent names. parent_names_check = set() for parent_name in self.unqualified_parent_names: if parent_name in parent_names_check: self.AddFinding(findings_lib.DuplicateParentError(self, parent_name)) continue parent_names_check.add(parent_name) # Check formatting of parent name if len(parent_name.split('/')) > 2: self.AddFinding( findings_lib.UnrecognizedParentFormatError(self, parent_name)) # Enforce that the inherited_fields_expanded field is not set if self.inherited_fields_expanded: self.AddFinding(findings_lib.InheritedFieldsSetError(self))
[ [ [ 667, 682 ] ], [ [ 706, 714 ] ], [ [ 738, 752 ] ], [ [ 761, 763 ], [ 1022, 1024 ], [ 1111, 1113 ] ], [ [ 771, 777 ], [ 1198, 1204 ], [ 1364, 1370 ], [ 1499, 1505 ], [ 1633, 1639 ] ], [ [ 797, 805 ], [ 21679, 21687 ], [ 23498, 23506 ], [ 24304, 24312 ] ], [ [ 807, 812 ], [ 1826, 1831 ], [ 2463, 2468 ] ], [ [ 847, 855 ], [ 7445, 7453 ] ], [ [ 889, 906 ], [ 6699, 6716 ] ], [ [ 940, 949 ], [ 8095, 8104 ] ], [ [ 983, 995 ], [ 3120, 3132 ], [ 11343, 11355 ], [ 16156, 16168 ], [ 5841, 5853 ], [ 6024, 6036 ], [ 10725, 10737 ], [ 12896, 12908 ], [ 15848, 15860 ], [ 18093, 18105 ], [ 24774, 24786 ], [ 24893, 24905 ], [ 25052, 25064 ], [ 25224, 25236 ], [ 25613, 25625 ], [ 26026, 26038 ], [ 26276, 26288 ], [ 26509, 26521 ], [ 26700, 26712 ] ], [ [ 997, 1019 ], [ 24974, 24996 ] ], [ [ 1078, 1108 ], [ 2918, 2948 ] ], [ [ 1185, 1195 ], [ 1407, 1417 ], [ 8625, 8635 ] ], [ [ 1351, 1361 ], [ 8593, 8603 ], [ 21688, 21698 ], [ 23507, 23517 ], [ 24313, 24323 ] ], [ [ 1487, 1496 ], [ 14005, 14014 ] ], [ [ 1615, 1630 ], [ 6200, 6215 ] ], [ [ 1773, 1795 ], [ 24073, 24095 ] ], [ [ 2425, 2447 ], [ 8176, 8198 ] ], [ [ 3101, 3119 ], [ 3652, 3670 ] ], [ [ 6682, 6698 ], [ 7357, 7373 ] ], [ [ 11329, 11342 ], [ 7508, 7521 ], [ 11602, 11615 ] ], [ [ 15950, 15969 ], [ 18403, 18422 ] ], [ [ 16145, 16155 ], [ 10064, 10074 ], [ 18039, 18049 ] ] ]
# a = 2 print("check this file")
[]
# technical from .base_output import BaseOutput # default from .matplotlib_plot import MatplotlibPlot from .extrema_printer import ExtremaPrinter # with external dependencies # import are respective __init__ methods # hack-ish, but works (and I am not aware of a more proper way to do so) from .bokeh_plot import BokehPlot from .neptune_logger import NeptuneLogger from .tensorboard_logger import TensorboardLogger from .tensorboard_tf_logger import TensorboardTFLogger # with external dependencies from . import matplotlib_subplots
[ [ [ 38, 48 ] ], [ [ 90, 104 ] ], [ [ 134, 148 ] ], [ [ 318, 327 ] ], [ [ 356, 369 ] ], [ [ 402, 419 ] ], [ [ 455, 474 ] ], [ [ 520, 539 ] ] ]
#!/usr/bin/env python3 # Copyright (c) 2016 The Sikacoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test Hierarchical Deterministic wallet function.""" from test_framework.test_framework import SikacoinTestFramework from test_framework.util import ( assert_equal, connect_nodes_bi, ) import shutil class WalletHDTest(SikacoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']] def run_test (self): tmpdir = self.options.tmpdir # Make sure can't switch off usehd after wallet creation self.stop_node(1) self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet') self.start_node(1) connect_nodes_bi(self.nodes, 0, 1) # Make sure we use hd, keep masterkeyid masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid'] assert_equal(len(masterkeyid), 40) # create an internal key change_addr = self.nodes[1].getrawchangeaddress() change_addrV= self.nodes[1].validateaddress(change_addr) assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key # Import a non-HD private key in the HD wallet non_hd_add = self.nodes[0].getnewaddress() self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add)) # This should be enough to keep the master key and the non-HD key self.nodes[1].backupwallet(tmpdir + "/hd.bak") #self.nodes[1].dumpwallet(tmpdir + "/hd.dump") # Derive some HD addresses and remember the last # Also send funds to each add self.nodes[0].generate(101) hd_add = None num_hd_adds = 300 for i in range(num_hd_adds): hd_add = self.nodes[1].getnewaddress() hd_info = self.nodes[1].validateaddress(hd_add) assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'") assert_equal(hd_info["hdmasterkeyid"], masterkeyid) self.nodes[0].sendtoaddress(hd_add, 1) self.nodes[0].generate(1) self.nodes[0].sendtoaddress(non_hd_add, 1) self.nodes[0].generate(1) # create an internal key (again) change_addr = self.nodes[1].getrawchangeaddress() change_addrV= self.nodes[1].validateaddress(change_addr) assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key self.sync_all() assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) self.log.info("Restore backup ...") self.stop_node(1) # we need to delete the complete regtest directory # otherwise node1 would auto-recover all funds in flag the keypool keys as used shutil.rmtree(tmpdir + "/node1/regtest/blocks") shutil.rmtree(tmpdir + "/node1/regtest/chainstate") shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat") self.start_node(1) # Assert that derivation is deterministic hd_add_2 = None for _ in range(num_hd_adds): hd_add_2 = self.nodes[1].getnewaddress() hd_info_2 = self.nodes[1].validateaddress(hd_add_2) assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'") assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid) assert_equal(hd_add, hd_add_2) connect_nodes_bi(self.nodes, 0, 1) self.sync_all() # Needs rescan self.stop_node(1) self.start_node(1, extra_args=self.extra_args[1] + ['-rescan']) assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) # send a tx and make sure its using the internal chain for the changeoutput txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout'] keypath = "" for out in outs: if out['value'] != 1: keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath'] assert_equal(keypath[0:7], "m/0'/1'") if __name__ == '__main__': WalletHDTest().main ()
[ [ [ 308, 329 ], [ 440, 461 ] ], [ [ 368, 380 ], [ 1071, 1083 ], [ 1271, 1283 ], [ 2059, 2071 ], [ 2131, 2143 ], [ 2530, 2542 ], [ 2644, 2656 ], [ 3386, 3398 ], [ 3460, 3472 ], [ 3522, 3534 ], [ 3750, 3762 ], [ 4274, 4286 ] ], [ [ 386, 402 ], [ 910, 926 ], [ 3561, 3577 ] ], [ [ 413, 419 ], [ 2928, 2934 ], [ 2984, 2990 ], [ 3044, 3050 ] ], [ [ 427, 439 ], [ 4344, 4356 ] ] ]
import torch.nn as nn import torch from torch.autograd import Variable import math import torch.utils.model_zoo as model_zoo from commons.siam_mask.models.features import Features __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(Features): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) # padding = (2 - stride) + (dilation // 2 - 1) padding = 2 - stride assert stride==1 or dilation==1, "stride and dilation must have one equals to zero at least" if dilation > 1: padding = dilation self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=padding, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) if out.size() != residual.size(): print(out.size(), residual.size()) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, layer4=False, layer3=False): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, # 3 bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) # 31x31, 15x15 self.feature_size = 128 * block.expansion if layer3: self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) # 15x15, 7x7 self.feature_size = (256 + 128) * block.expansion else: self.layer3 = lambda x:x # identity if layer4: self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) # 7x7, 3x3 self.feature_size = 512 * block.expansion else: self.layer4 = lambda x:x # identity for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1, dilation=1): downsample = None dd = dilation if stride != 1 or self.inplanes != planes * block.expansion: if stride == 1 and dilation == 1: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) else: if dilation > 1: dd = dilation // 2 padding = dd else: dd = 1 padding = 0 downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=3, stride=stride, bias=False, padding=padding, dilation=dd), nn.BatchNorm2d(planes * block.expansion), ) layers = [] # layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation)) layers.append(block(self.inplanes, planes, stride, downsample, dilation=dd)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, dilation=dilation)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) p0 = self.relu(x) x = self.maxpool(p0) p1 = self.layer1(x) p2 = self.layer2(p1) p3 = self.layer3(p2) return p0, p1, p2, p3 def resnet18(pretrained=False, **kwargs): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(pretrained=False, **kwargs): """Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) return model def resnet50(pretrained=False, **kwargs): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) return model def resnet101(pretrained=False, **kwargs): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) return model def resnet152(pretrained=False, **kwargs): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) return model if __name__ == '__main__': net = resnet50() print(net) net = net.cuda() var = torch.FloatTensor(1,3,127,127).cuda() var = Variable(var) net(var) print('*************') var = torch.FloatTensor(1,3,255,255).cuda() var = Variable(var) net(var)
[ [ [ 7, 21 ], [ 903, 905 ], [ 3194, 3196 ], [ 777, 779 ], [ 1120, 1122 ], [ 1163, 1165 ], [ 1249, 1251 ], [ 1882, 1884 ], [ 1956, 1958 ], [ 2241, 2243 ], [ 2396, 2398 ], [ 2440, 2442 ], [ 2516, 2518 ], [ 2563, 2565 ], [ 3361, 3363 ], [ 3480, 3482 ], [ 3519, 3521 ], [ 3564, 3566 ], [ 4356, 4358 ], [ 4532, 4534 ], [ 4889, 4891 ], [ 4924, 4926 ], [ 5068, 5070 ], [ 5361, 5363 ], [ 5396, 5398 ], [ 5600, 5602 ], [ 6034, 6036 ] ], [ [ 29, 34 ], [ 8117, 8122 ], [ 8230, 8235 ] ], [ [ 62, 70 ], [ 8165, 8173 ], [ 8278, 8286 ] ], [ [ 78, 82 ], [ 4482, 4486 ] ], [ [ 90, 124 ], [ 6587, 6596 ], [ 6928, 6937 ], [ 7269, 7278 ], [ 7613, 7622 ], [ 7958, 7967 ] ], [ [ 171, 179 ], [ 1707, 1715 ] ], [ [ 181, 188 ] ], [ [ 277, 287 ], [ 6606, 6616 ], [ 6947, 6957 ], [ 7288, 7298 ], [ 7632, 7642 ], [ 7977, 7987 ] ], [ [ 689, 696 ], [ 1067, 1074 ], [ 1206, 1213 ] ], [ [ 892, 902 ], [ 1017, 1027 ], [ 6502, 6512 ], [ 6843, 6853 ] ], [ [ 1696, 1706 ], [ 1832, 1842 ], [ 7184, 7194 ], [ 7527, 7537 ], [ 7872, 7882 ] ], [ [ 3187, 3193 ], [ 3315, 3321 ], [ 6495, 6501 ], [ 6836, 6842 ], [ 7177, 7183 ], [ 7520, 7526 ], [ 7865, 7871 ] ], [ [ 6313, 6321 ] ], [ [ 6654, 6662 ] ], [ [ 6995, 7003 ], [ 8059, 8067 ] ], [ [ 7336, 7345 ] ], [ [ 7681, 7690 ] ], [ [ 8053, 8056 ], [ 8080, 8083 ], [ 8095, 8098 ] ], [ [ 8089, 8092 ], [ 8184, 8187 ], [ 8297, 8300 ] ], [ [ 8111, 8114 ], [ 8174, 8177 ] ], [ [ 8159, 8162 ], [ 8188, 8191 ] ], [ [ 8224, 8227 ], [ 8287, 8290 ] ], [ [ 8272, 8275 ], [ 8301, 8304 ] ] ]
import tensorflow as tf from tensorflow.python.client import device_lib def get_available_gpus(): local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if x.device_type == 'GPU'] gpus = get_available_gpus def split_nest(nest, num_or_size_splits, axis=0): """Split nested structure. Examples -------- >>> split_nest({'a': shape(10, 20), 'b': shape(4, 15)}, 2, axis=0) >>> [{'a': shape(5, 20), 'b': shape(2, 15)}, {'a': shape(5, 20), 'b': shape(2, 15)}] """ flatten = tf.nest.flatten(nest) split_flatten = [tf.split(x, num_or_size_splits, axis=axis) for x in flatten] return [tf.nest.pack_sequence_as(nest, x) for x in zip(*split_flatten)] def parameter_server_strategy_run(devices, fn, split_args, split_kwargs=None): split_kwargs = [{}] * len(devices) if split_kwargs is None else split_kwargs assert len(devices) == len(split_args) == len(split_kwargs) split_returns = [] for device, args, kwargs in zip(devices, split_args, split_kwargs): with tf.device(device): args = args if isinstance(args, (list, tuple)) else (args,) split_returns.append(fn(*args, **kwargs)) return split_returns parellel_run = parameter_server_strategy_run def average_gradients(tower_grads): """Calculate the average gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Parameters ---------- tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns ------- List of pairs of (gradient, variable) where the gradient has been averaged across all towers. """ average_grads = [] for grad_and_vars in zip(*tower_grads): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat(axis=0, values=grads) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads
[ [ [ 7, 23 ], [ 554, 556 ], [ 597, 599 ], [ 670, 672 ], [ 1070, 1072 ], [ 2208, 2210 ], [ 2407, 2409 ], [ 2454, 2456 ] ], [ [ 62, 72 ], [ 127, 137 ] ], [ [ 79, 97 ], [ 242, 260 ] ], [ [ 235, 239 ] ], [ [ 267, 277 ] ], [ [ 740, 769 ], [ 1257, 1286 ] ], [ [ 1242, 1254 ] ], [ [ 1293, 1310 ] ] ]
# import gevent.monkey # gevent.monkey.patch_socket() from pyEtherCAT import MasterEtherCAT import time import os #============================================================================# # C95用の簡易EtherCATパッケージです。 # 本来は細かいパケットに付いて理解を深めた上で仕組みを構築していきますが、 # 説明も実験も追いつかず、ひとまずGPIOで高速にON/OFF出来る部分だけを纏めました。 # 動作は Linux(RaspberryPi含む) にて Python3 で動作します。 # sudo python3 test03.py #============================================================================# # ここから簡易ライブラリ #============================================================================# def EtherCAT_Init(nic): cat = MasterEtherCAT.MasterEtherCAT(nic) # ネットワークカードのアドレスを記載 return cat def EtherCAT_SetUp(cat): cat.EEPROM_SetUp(cat.ADP) # EEPROMの設定、特に変更不要 cat.EEPROM_Stasus(enable=0x00, command=0x04) # EEPROMの設定、特に変更不要 ADDR = 0x0120 # AL 制御レジスタ data = 0x0002 # 2h: 動作前ステートを要求する cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[ data & 0xFF, (data >> 8) & 0xFF]) (DATA, WKC) = cat.socket_read() print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8)) ADDR = 0x0120 # AL 制御レジスタ data = 0x0002 # 2h: 動作前ステートを要求する cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[ data & 0xFF, (data >> 8) & 0xFF]) (DATA, WKC) = cat.socket_read() print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8)) ADDR = 0x0120 # AL 制御レジスタ data = 0x0004 # 4h: 安全動作ステートを要求する cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[ data & 0xFF, (data >> 8) & 0xFF]) (DATA, WKC) = cat.socket_read() print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8)) ADDR = 0x0120 # AL 制御レジスタ data = 0x0008 # 8h: 動作ステートを要求する cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[ data & 0xFF, (data >> 8) & 0xFF]) (DATA, WKC) = cat.socket_read() print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8)) def EtherCAT_GPIOMode(cat, data): ADDR = 0x0F00 # デジタル I/O 出力データレジスタ # data = 0x00FF # 出力データ cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[ data & 0xFF, (data >> 8) & 0xFF]) (DATA, WKC) = cat.socket_read() print("[0x{:04x}]= 0x{:04x}".format(ADDR, DATA[0] | DATA[1] << 8)) def EtherCAT_GPIO_Out(cat, data): ADDR = 0x0F10 cat.APWR(IDX=0x00, ADP=cat.ADP, ADO=ADDR, DATA=[data & 0xFF, (data >> 8) & 0xFF]) #(DATA,WKC) = cat.socket_read() #============================================================================# # ここまで 簡易ライブラリ #============================================================================# def main(): cat = EtherCAT_Init("eth0") # EtherCATのネットワーク初期設定 #-- EtherCATのステートマシンを実行に移す処理 cat.ADP = 0x0000 # PCから1台目は0、2台目以降は-1していく EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定 EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力 #-- EtherCATのステートマシンを実行に移す処理 cat.ADP = 0x0000 - 1 # 例 これは2台目 繋がってなければ必要ない EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定 EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力 #-- EtherCATのステートマシンを実行に移す処理 cat.ADP = 0x0000 - 2 # 例 これは3台目 繋がってなければ必要ない EtherCAT_SetUp(cat) # EtherCATスレーブの初期設定 EtherCAT_GPIOMode(cat, 0xFFFF) # EtherCATスレーブのGPIO方向設定 0:入力 1:出力 # -- 1台目のLEDをシフトする TIME = 0.1 cat.ADP = 0x0000 flag = 0 CNT = 0 try: while 1: # time.sleep(TIME) cat.ADP = 0x0000 - 0 EtherCAT_GPIO_Out(cat, 0xFFFF) time.sleep(TIME) cat.ADP = 0x0000 - 1 EtherCAT_GPIO_Out(cat, 0xFFFF) time.sleep(TIME) cat.ADP = 0x0000 - 2 EtherCAT_GPIO_Out(cat, 0xFFFF) time.sleep(TIME) # for i in range(16): # time.sleep(TIME) # EtherCAT_GPIO_Out(cat,0x0001<<i); # for i in range(3): cat.ADP = 0x0000 - 0 EtherCAT_GPIO_Out(cat, 0x0000) time.sleep(TIME) cat.ADP = 0x0000 - 1 EtherCAT_GPIO_Out(cat, 0x0000) time.sleep(TIME) cat.ADP = 0x0000 - 2 EtherCAT_GPIO_Out(cat, 0x0000) time.sleep(TIME) # EtherCAT_GPIO_Out(cat,0x0000); # for i in range(0xFFFF): # EtherCAT_GPIO_Out(cat,i); except KeyboardInterrupt: EtherCAT_GPIO_Out(cat, 0x0000) print("") print("End.") if __name__ == "__main__": main()
[ [ [ 77, 91 ], [ 584, 598 ] ], [ [ 99, 103 ], [ 3502, 3506 ], [ 3607, 3611 ], [ 3712, 3716 ], [ 3963, 3967 ], [ 4068, 4072 ], [ 4173, 4177 ] ], [ [ 111, 113 ] ], [ [ 554, 567 ], [ 2588, 2601 ] ], [ [ 660, 674 ], [ 2719, 2733 ], [ 2932, 2946 ], [ 3145, 3159 ] ], [ [ 1910, 1927 ], [ 2771, 2788 ], [ 2984, 3001 ], [ 3197, 3214 ] ], [ [ 2220, 2237 ], [ 3459, 3476 ], [ 3564, 3581 ], [ 3669, 3686 ], [ 3920, 3937 ], [ 4025, 4042 ], [ 4130, 4147 ], [ 4355, 4372 ] ], [ [ 2569, 2573 ], [ 4459, 4463 ] ] ]
"""Porcupine is a simple editor. You are probably reading this because you want to learn how Porcupine works or write fun plugins for it. I recommend getting started with the plugin API documentation: https://akuli.github.io/porcupine/ """ import sys import appdirs version_info = (0, 99, 2) # this is updated with scripts/release.py __version__ = "%d.%d.%d" % version_info __author__ = "Akuli" __copyright__ = "Copyright (c) 2017-2022 Akuli" __license__ = "MIT" if sys.platform in {"win32", "darwin"}: # these platforms like path names like "Program Files" or "Application Support" dirs = appdirs.AppDirs("Porcupine", "Akuli") else: dirs = appdirs.AppDirs("porcupine", "akuli") # Must be after creating dirs from porcupine import _state # TODO: document get_*_panedwindow get_main_window = _state.get_main_window get_parsed_args = _state.get_parsed_args get_horizontal_panedwindow = _state.get_horizontal_panedwindow get_vertical_panedwindow = _state.get_vertical_panedwindow get_tab_manager = _state.get_tab_manager filedialog_kwargs = _state.filedialog_kwargs quit = _state.quit
[ [ [ 254, 257 ], [ 477, 480 ] ], [ [ 266, 273 ], [ 609, 616 ], [ 664, 671 ] ], [ [ 275, 287 ], [ 371, 383 ] ], [ [ 344, 355 ] ], [ [ 384, 394 ] ], [ [ 405, 418 ] ], [ [ 453, 464 ] ], [ [ 602, 606 ] ], [ [ 657, 661 ] ], [ [ 755, 761 ], [ 816, 822 ], [ 857, 863 ], [ 909, 915 ], [ 970, 976 ], [ 1020, 1026 ], [ 1063, 1069 ], [ 1095, 1101 ] ], [ [ 798, 813 ] ], [ [ 839, 854 ] ], [ [ 880, 906 ] ], [ [ 943, 967 ] ], [ [ 1002, 1017 ] ], [ [ 1043, 1060 ] ], [ [ 1088, 1092 ] ] ]
from .PCA import PCA from .InvariantsMiner import InvariantsMiner from .LogClustering import LogClustering from .LR import LR from .SVM import SVM from .DecisionTree import DecisionTree from .IsolationForest import IsolationForest from .DeepLog import DeepLog from .Autoencoder import Autoencoder from .AutoencoderLSTM import AutoencoderLSTM from .AutoencoderCascade import AutoencoderCascade from .AutoencoderConvolutional import AutoencoderConv
[ [ [ 17, 20 ] ], [ [ 50, 65 ] ], [ [ 93, 106 ] ], [ [ 123, 125 ] ], [ [ 143, 146 ] ], [ [ 173, 185 ] ], [ [ 215, 230 ] ], [ [ 252, 259 ] ], [ [ 285, 296 ] ], [ [ 326, 341 ] ], [ [ 374, 392 ] ], [ [ 431, 446 ] ] ]
import abc class AbstractClassifier: """ Abstract class with specific methods for classifier models (training, validation and test) """ def __init__(self): pass @abc.abstractmethod def train(self, config, train_data): """ Classifier training. :param config: Model configuration. :param train_data: Train dataset with the textual information of each item and its label. :return: A model trained with train_data according to config. """ pass @abc.abstractmethod def validation(self, config, val_data): """ :param config: Model configuration. :param val_data: Validation dataset with the textual information of each item and its label. :return: Validation metrics """ pass @abc.abstractmethod def test(self, config, test_data): """ Classifier testing. :param config: Model configuration. :param test_data: Test dataset with the textual information of each item and its label. :return: Predictions of the model in the test_data, according to config. """ pass
[ [ [ 7, 10 ], [ 186, 189 ], [ 531, 534 ], [ 819, 822 ] ], [ [ 19, 37 ] ] ]
import frappe def execute(): # there is no more status called "Submitted", there was an old issue that used # to set it as Submitted, fixed in this commit frappe.db.sql(""" update `tabPurchase Receipt` set status = 'To Bill' where status = 'Submitted'""")
[ [ [ 7, 13 ], [ 159, 165 ] ], [ [ 19, 26 ] ] ]
import os import unittest import tempfile from unittest import mock import uuid import mlflow import mlflow.db import mlflow.store.db.base_sql_model from mlflow.entities.model_registry import ( RegisteredModel, ModelVersion, RegisteredModelTag, ModelVersionTag, ) from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import ( ErrorCode, RESOURCE_DOES_NOT_EXIST, INVALID_PARAMETER_VALUE, RESOURCE_ALREADY_EXISTS, ) from mlflow.store.model_registry.sqlalchemy_store import SqlAlchemyStore from tests.helper_functions import random_str DB_URI = "sqlite:///" class TestSqlAlchemyStoreSqlite(unittest.TestCase): def _get_store(self, db_uri=""): return SqlAlchemyStore(db_uri) def setUp(self): self.maxDiff = None # print all differences on assert failures fd, self.temp_dbfile = tempfile.mkstemp() # Close handle immediately so that we can remove the file later on in Windows os.close(fd) self.db_url = "%s%s" % (DB_URI, self.temp_dbfile) self.store = self._get_store(self.db_url) def tearDown(self): mlflow.store.db.base_sql_model.Base.metadata.drop_all(self.store.engine) os.remove(self.temp_dbfile) def _rm_maker(self, name, tags=None, description=None): return self.store.create_registered_model(name, tags, description) def _mv_maker( self, name, source="path/to/source", run_id=uuid.uuid4().hex, tags=None, run_link=None, description=None, ): return self.store.create_model_version( name, source, run_id, tags, run_link=run_link, description=description ) def _extract_latest_by_stage(self, latest_versions): return {mvd.current_stage: mvd.version for mvd in latest_versions} def test_create_registered_model(self): name = random_str() + "abCD" rm1 = self._rm_maker(name) self.assertEqual(rm1.name, name) self.assertEqual(rm1.description, None) # error on duplicate with self.assertRaisesRegex( MlflowException, rf"Registered Model \(name={name}\) already exists" ) as exception_context: self._rm_maker(name) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS) # slightly different name is ok for name2 in [name + "extra", name.lower(), name.upper(), name + name]: rm2 = self._rm_maker(name2) self.assertEqual(rm2.name, name2) # test create model with tags name2 = random_str() + "tags" tags = [ RegisteredModelTag("key", "value"), RegisteredModelTag("anotherKey", "some other value"), ] rm2 = self._rm_maker(name2, tags) rmd2 = self.store.get_registered_model(name2) self.assertEqual(rm2.name, name2) self.assertEqual(rm2.tags, {tag.key: tag.value for tag in tags}) self.assertEqual(rmd2.name, name2) self.assertEqual(rmd2.tags, {tag.key: tag.value for tag in tags}) # create with description name3 = random_str() + "-description" description = "the best model ever" rm3 = self._rm_maker(name3, description=description) rmd3 = self.store.get_registered_model(name3) self.assertEqual(rm3.name, name3) self.assertEqual(rm3.description, description) self.assertEqual(rmd3.name, name3) self.assertEqual(rmd3.description, description) # invalid model name will fail with self.assertRaisesRegex( MlflowException, r"Registered model name cannot be empty" ) as exception_context: self._rm_maker(None) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) with self.assertRaisesRegex( MlflowException, r"Registered model name cannot be empty" ) as exception_context: self._rm_maker("") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) def test_get_registered_model(self): name = "model_1" tags = [ RegisteredModelTag("key", "value"), RegisteredModelTag("anotherKey", "some other value"), ] # use fake clock with mock.patch("time.time") as mock_time: mock_time.return_value = 1234 rm = self._rm_maker(name, tags) self.assertEqual(rm.name, name) rmd = self.store.get_registered_model(name=name) self.assertEqual(rmd.name, name) self.assertEqual(rmd.creation_timestamp, 1234000) self.assertEqual(rmd.last_updated_timestamp, 1234000) self.assertEqual(rmd.description, None) self.assertEqual(rmd.latest_versions, []) self.assertEqual(rmd.tags, {tag.key: tag.value for tag in tags}) def test_update_registered_model(self): name = "model_for_update_RM" rm1 = self._rm_maker(name) rmd1 = self.store.get_registered_model(name=name) self.assertEqual(rm1.name, name) self.assertEqual(rmd1.description, None) # update description rm2 = self.store.update_registered_model(name=name, description="test model") rmd2 = self.store.get_registered_model(name=name) self.assertEqual(rm2.name, "model_for_update_RM") self.assertEqual(rmd2.name, "model_for_update_RM") self.assertEqual(rmd2.description, "test model") def test_rename_registered_model(self): original_name = "original name" new_name = "new name" self._rm_maker(original_name) self._mv_maker(original_name) self._mv_maker(original_name) rm = self.store.get_registered_model(original_name) mv1 = self.store.get_model_version(original_name, 1) mv2 = self.store.get_model_version(original_name, 2) self.assertEqual(rm.name, original_name) self.assertEqual(mv1.name, original_name) self.assertEqual(mv2.name, original_name) # test renaming registered model also updates its model versions self.store.rename_registered_model(original_name, new_name) rm = self.store.get_registered_model(new_name) mv1 = self.store.get_model_version(new_name, 1) mv2 = self.store.get_model_version(new_name, 2) self.assertEqual(rm.name, new_name) self.assertEqual(mv1.name, new_name) self.assertEqual(mv2.name, new_name) # test accessing the model with the old name will fail with self.assertRaisesRegex( MlflowException, rf"Registered Model with name={original_name} not found" ) as exception_context: self.store.get_registered_model(original_name) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) # test name another model with the replaced name is ok self._rm_maker(original_name) # cannot rename model to conflict with an existing model with self.assertRaisesRegex( MlflowException, rf"Registered Model \(name={original_name}\) already exists" ) as exception_context: self.store.rename_registered_model(new_name, original_name) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_ALREADY_EXISTS) # invalid model name will fail with self.assertRaisesRegex( MlflowException, r"Registered model name cannot be empty" ) as exception_context: self.store.rename_registered_model(original_name, None) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) with self.assertRaisesRegex( MlflowException, r"Registered model name cannot be empty" ) as exception_context: self.store.rename_registered_model(original_name, "") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) def test_delete_registered_model(self): name = "model_for_delete_RM" self._rm_maker(name) self._mv_maker(name) rm1 = self.store.get_registered_model(name=name) mv1 = self.store.get_model_version(name, 1) self.assertEqual(rm1.name, name) self.assertEqual(mv1.name, name) # delete model self.store.delete_registered_model(name=name) # cannot get model with self.assertRaisesRegex( MlflowException, rf"Registered Model with name={name} not found" ) as exception_context: self.store.get_registered_model(name=name) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) # cannot update a delete model with self.assertRaisesRegex( MlflowException, rf"Registered Model with name={name} not found" ) as exception_context: self.store.update_registered_model(name=name, description="deleted") assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) # cannot delete it again with self.assertRaisesRegex( MlflowException, rf"Registered Model with name={name} not found" ) as exception_context: self.store.delete_registered_model(name=name) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) # model versions are cascade deleted with the registered model with self.assertRaisesRegex( MlflowException, rf"Model Version \(name={name}, version=1\) not found" ) as exception_context: self.store.get_model_version(name, 1) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) def _list_registered_models(self, page_token=None, max_results=10): result = self.store.list_registered_models(max_results, page_token) for idx in range(len(result)): result[idx] = result[idx].name return result def test_list_registered_model(self): self._rm_maker("A") registered_models = self.store.list_registered_models(max_results=10, page_token=None) self.assertEqual(len(registered_models), 1) self.assertEqual(registered_models[0].name, "A") self.assertIsInstance(registered_models[0], RegisteredModel) self._rm_maker("B") self.assertEqual(set(self._list_registered_models()), set(["A", "B"])) self._rm_maker("BB") self._rm_maker("BA") self._rm_maker("AB") self._rm_maker("BBC") self.assertEqual( set(self._list_registered_models()), set(["A", "B", "BB", "BA", "AB", "BBC"]) ) # list should not return deleted models self.store.delete_registered_model(name="BA") self.store.delete_registered_model(name="B") self.assertEqual(set(self._list_registered_models()), set(["A", "BB", "AB", "BBC"])) def test_list_registered_model_paginated_last_page(self): rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)] # test flow with fixed max_results returned_rms = [] result = self._list_registered_models(page_token=None, max_results=25) returned_rms.extend(result) while result.token: result = self._list_registered_models(page_token=result.token, max_results=25) self.assertEqual(len(result), 25) returned_rms.extend(result) self.assertEqual(result.token, None) self.assertEqual(set(rms), set(returned_rms)) def test_list_registered_model_paginated_returns_in_correct_order(self): rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)] # test that pagination will return all valid results in sorted order # by name ascending result = self._list_registered_models(max_results=5) self.assertNotEqual(result.token, None) self.assertEqual(result, rms[0:5]) result = self._list_registered_models(page_token=result.token, max_results=10) self.assertNotEqual(result.token, None) self.assertEqual(result, rms[5:15]) result = self._list_registered_models(page_token=result.token, max_results=20) self.assertNotEqual(result.token, None) self.assertEqual(result, rms[15:35]) result = self._list_registered_models(page_token=result.token, max_results=100) # assert that page token is None self.assertEqual(result.token, None) self.assertEqual(result, rms[35:]) def test_list_registered_model_paginated_errors(self): rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)] # test that providing a completely invalid page token throws with self.assertRaisesRegex( MlflowException, r"Invalid page token, could not base64-decode" ) as exception_context: self._list_registered_models(page_token="evilhax", max_results=20) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # test that providing too large of a max_results throws with self.assertRaisesRegex( MlflowException, r"Invalid value for request parameter max_results" ) as exception_context: self._list_registered_models(page_token="evilhax", max_results=1e15) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # list should not return deleted models self.store.delete_registered_model(name="RM{0:03}".format(0)) self.assertEqual(set(self._list_registered_models(max_results=100)), set(rms[1:])) def test_get_latest_versions(self): name = "test_for_latest_versions" self._rm_maker(name) rmd1 = self.store.get_registered_model(name=name) self.assertEqual(rmd1.latest_versions, []) mv1 = self._mv_maker(name) self.assertEqual(mv1.version, 1) rmd2 = self.store.get_registered_model(name=name) self.assertEqual(self._extract_latest_by_stage(rmd2.latest_versions), {"None": 1}) # add a bunch more mv2 = self._mv_maker(name) self.assertEqual(mv2.version, 2) self.store.transition_model_version_stage( name=mv2.name, version=mv2.version, stage="Production", archive_existing_versions=False ) mv3 = self._mv_maker(name) self.assertEqual(mv3.version, 3) self.store.transition_model_version_stage( name=mv3.name, version=mv3.version, stage="Production", archive_existing_versions=False ) mv4 = self._mv_maker(name) self.assertEqual(mv4.version, 4) self.store.transition_model_version_stage( name=mv4.name, version=mv4.version, stage="Staging", archive_existing_versions=False ) # test that correct latest versions are returned for each stage rmd4 = self.store.get_registered_model(name=name) self.assertEqual( self._extract_latest_by_stage(rmd4.latest_versions), {"None": 1, "Production": 3, "Staging": 4}, ) self.assertEqual( self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=None)), {"None": 1, "Production": 3, "Staging": 4}, ) self.assertEqual( self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=[])), {"None": 1, "Production": 3, "Staging": 4}, ) self.assertEqual( self._extract_latest_by_stage( self.store.get_latest_versions(name=name, stages=["Production"]) ), {"Production": 3}, ) self.assertEqual( self._extract_latest_by_stage( self.store.get_latest_versions(name=name, stages=["production"]) ), {"Production": 3}, ) # The stages are case insensitive. self.assertEqual( self._extract_latest_by_stage( self.store.get_latest_versions(name=name, stages=["pROduction"]) ), {"Production": 3}, ) # The stages are case insensitive. self.assertEqual( self._extract_latest_by_stage( self.store.get_latest_versions(name=name, stages=["None", "Production"]) ), {"None": 1, "Production": 3}, ) # delete latest Production, and should point to previous one self.store.delete_model_version(name=mv3.name, version=mv3.version) rmd5 = self.store.get_registered_model(name=name) self.assertEqual( self._extract_latest_by_stage(rmd5.latest_versions), {"None": 1, "Production": 2, "Staging": 4}, ) self.assertEqual( self._extract_latest_by_stage(self.store.get_latest_versions(name=name, stages=None)), {"None": 1, "Production": 2, "Staging": 4}, ) self.assertEqual( self._extract_latest_by_stage( self.store.get_latest_versions(name=name, stages=["Production"]) ), {"Production": 2}, ) def test_set_registered_model_tag(self): name1 = "SetRegisteredModelTag_TestMod" name2 = "SetRegisteredModelTag_TestMod 2" initial_tags = [ RegisteredModelTag("key", "value"), RegisteredModelTag("anotherKey", "some other value"), ] self._rm_maker(name1, initial_tags) self._rm_maker(name2, initial_tags) new_tag = RegisteredModelTag("randomTag", "not a random value") self.store.set_registered_model_tag(name1, new_tag) rm1 = self.store.get_registered_model(name=name1) all_tags = initial_tags + [new_tag] self.assertEqual(rm1.tags, {tag.key: tag.value for tag in all_tags}) # test overriding a tag with the same key overriding_tag = RegisteredModelTag("key", "overriding") self.store.set_registered_model_tag(name1, overriding_tag) all_tags = [tag for tag in all_tags if tag.key != "key"] + [overriding_tag] rm1 = self.store.get_registered_model(name=name1) self.assertEqual(rm1.tags, {tag.key: tag.value for tag in all_tags}) # does not affect other models with the same key rm2 = self.store.get_registered_model(name=name2) self.assertEqual(rm2.tags, {tag.key: tag.value for tag in initial_tags}) # can not set tag on deleted (non-existed) registered model self.store.delete_registered_model(name1) with self.assertRaisesRegex( MlflowException, rf"Registered Model with name={name1} not found" ) as exception_context: self.store.set_registered_model_tag(name1, overriding_tag) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) # test cannot set tags that are too long long_tag = RegisteredModelTag("longTagKey", "a" * 5001) with self.assertRaisesRegex( MlflowException, r"Registered model value '.+' had length \d+, which exceeded length limit of 5000", ) as exception_context: self.store.set_registered_model_tag(name2, long_tag) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # test can set tags that are somewhat long long_tag = RegisteredModelTag("longTagKey", "a" * 4999) self.store.set_registered_model_tag(name2, long_tag) # can not set invalid tag with self.assertRaisesRegex( MlflowException, r"Tag name cannot be None" ) as exception_context: self.store.set_registered_model_tag(name2, RegisteredModelTag(key=None, value="")) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # can not use invalid model name with self.assertRaisesRegex( MlflowException, r"Registered model name cannot be empty" ) as exception_context: self.store.set_registered_model_tag(None, RegisteredModelTag(key="key", value="value")) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) def test_delete_registered_model_tag(self): name1 = "DeleteRegisteredModelTag_TestMod" name2 = "DeleteRegisteredModelTag_TestMod 2" initial_tags = [ RegisteredModelTag("key", "value"), RegisteredModelTag("anotherKey", "some other value"), ] self._rm_maker(name1, initial_tags) self._rm_maker(name2, initial_tags) new_tag = RegisteredModelTag("randomTag", "not a random value") self.store.set_registered_model_tag(name1, new_tag) self.store.delete_registered_model_tag(name1, "randomTag") rm1 = self.store.get_registered_model(name=name1) self.assertEqual(rm1.tags, {tag.key: tag.value for tag in initial_tags}) # testing deleting a key does not affect other models with the same key self.store.delete_registered_model_tag(name1, "key") rm1 = self.store.get_registered_model(name=name1) rm2 = self.store.get_registered_model(name=name2) self.assertEqual(rm1.tags, {"anotherKey": "some other value"}) self.assertEqual(rm2.tags, {tag.key: tag.value for tag in initial_tags}) # delete tag that is already deleted does nothing self.store.delete_registered_model_tag(name1, "key") rm1 = self.store.get_registered_model(name=name1) self.assertEqual(rm1.tags, {"anotherKey": "some other value"}) # can not delete tag on deleted (non-existed) registered model self.store.delete_registered_model(name1) with self.assertRaisesRegex( MlflowException, rf"Registered Model with name={name1} not found" ) as exception_context: self.store.delete_registered_model_tag(name1, "anotherKey") assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) # can not delete tag with invalid key with self.assertRaisesRegex( MlflowException, r"Tag name cannot be None" ) as exception_context: self.store.delete_registered_model_tag(name2, None) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # can not use invalid model name with self.assertRaisesRegex( MlflowException, r"Registered model name cannot be empty" ) as exception_context: self.store.delete_registered_model_tag(None, "key") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) def test_create_model_version(self): name = "test_for_update_MV" self._rm_maker(name) run_id = uuid.uuid4().hex with mock.patch("time.time") as mock_time: mock_time.return_value = 456778 mv1 = self._mv_maker(name, "a/b/CD", run_id) self.assertEqual(mv1.name, name) self.assertEqual(mv1.version, 1) mvd1 = self.store.get_model_version(mv1.name, mv1.version) self.assertEqual(mvd1.name, name) self.assertEqual(mvd1.version, 1) self.assertEqual(mvd1.current_stage, "None") self.assertEqual(mvd1.creation_timestamp, 456778000) self.assertEqual(mvd1.last_updated_timestamp, 456778000) self.assertEqual(mvd1.description, None) self.assertEqual(mvd1.source, "a/b/CD") self.assertEqual(mvd1.run_id, run_id) self.assertEqual(mvd1.status, "READY") self.assertEqual(mvd1.status_message, None) self.assertEqual(mvd1.tags, {}) # new model versions for same name autoincrement versions mv2 = self._mv_maker(name) mvd2 = self.store.get_model_version(name=mv2.name, version=mv2.version) self.assertEqual(mv2.version, 2) self.assertEqual(mvd2.version, 2) # create model version with tags return model version entity with tags tags = [ModelVersionTag("key", "value"), ModelVersionTag("anotherKey", "some other value")] mv3 = self._mv_maker(name, tags=tags) mvd3 = self.store.get_model_version(name=mv3.name, version=mv3.version) self.assertEqual(mv3.version, 3) self.assertEqual(mv3.tags, {tag.key: tag.value for tag in tags}) self.assertEqual(mvd3.version, 3) self.assertEqual(mvd3.tags, {tag.key: tag.value for tag in tags}) # create model versions with runLink run_link = "http://localhost:3000/path/to/run/" mv4 = self._mv_maker(name, run_link=run_link) mvd4 = self.store.get_model_version(name, mv4.version) self.assertEqual(mv4.version, 4) self.assertEqual(mv4.run_link, run_link) self.assertEqual(mvd4.version, 4) self.assertEqual(mvd4.run_link, run_link) # create model version with description description = "the best model ever" mv5 = self._mv_maker(name, description=description) mvd5 = self.store.get_model_version(name, mv5.version) self.assertEqual(mv5.version, 5) self.assertEqual(mv5.description, description) self.assertEqual(mvd5.version, 5) self.assertEqual(mvd5.description, description) # create model version without runId mv6 = self._mv_maker(name, run_id=None) mvd6 = self.store.get_model_version(name, mv6.version) self.assertEqual(mv6.version, 6) self.assertEqual(mv6.run_id, None) self.assertEqual(mvd6.version, 6) self.assertEqual(mvd6.run_id, None) def test_update_model_version(self): name = "test_for_update_MV" self._rm_maker(name) mv1 = self._mv_maker(name) mvd1 = self.store.get_model_version(name=mv1.name, version=mv1.version) self.assertEqual(mvd1.name, name) self.assertEqual(mvd1.version, 1) self.assertEqual(mvd1.current_stage, "None") # update stage self.store.transition_model_version_stage( name=mv1.name, version=mv1.version, stage="Production", archive_existing_versions=False ) mvd2 = self.store.get_model_version(name=mv1.name, version=mv1.version) self.assertEqual(mvd2.name, name) self.assertEqual(mvd2.version, 1) self.assertEqual(mvd2.current_stage, "Production") self.assertEqual(mvd2.description, None) # update description self.store.update_model_version( name=mv1.name, version=mv1.version, description="test model version" ) mvd3 = self.store.get_model_version(name=mv1.name, version=mv1.version) self.assertEqual(mvd3.name, name) self.assertEqual(mvd3.version, 1) self.assertEqual(mvd3.current_stage, "Production") self.assertEqual(mvd3.description, "test model version") # only valid stages can be set with self.assertRaisesRegex( MlflowException, r"Invalid Model Version stage unknown" ) as exception_context: self.store.transition_model_version_stage( mv1.name, mv1.version, stage="unknown", archive_existing_versions=False ) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # stages are case-insensitive and auto-corrected to system stage names for stage_name in ["STAGING", "staging", "StAgInG"]: self.store.transition_model_version_stage( name=mv1.name, version=mv1.version, stage=stage_name, archive_existing_versions=False, ) mvd5 = self.store.get_model_version(name=mv1.name, version=mv1.version) self.assertEqual(mvd5.current_stage, "Staging") def test_transition_model_version_stage_when_archive_existing_versions_is_false(self): name = "model" self._rm_maker(name) mv1 = self._mv_maker(name) mv2 = self._mv_maker(name) mv3 = self._mv_maker(name) # test that when `archive_existing_versions` is False, transitioning a model version # to the inactive stages ("Archived" and "None") does not throw. for stage in ["Archived", "None"]: self.store.transition_model_version_stage(name, mv1.version, stage, False) self.store.transition_model_version_stage(name, mv1.version, "Staging", False) self.store.transition_model_version_stage(name, mv2.version, "Production", False) self.store.transition_model_version_stage(name, mv3.version, "Staging", False) mvd1 = self.store.get_model_version(name=name, version=mv1.version) mvd2 = self.store.get_model_version(name=name, version=mv2.version) mvd3 = self.store.get_model_version(name=name, version=mv3.version) self.assertEqual(mvd1.current_stage, "Staging") self.assertEqual(mvd2.current_stage, "Production") self.assertEqual(mvd3.current_stage, "Staging") self.store.transition_model_version_stage(name, mv3.version, "Production", False) mvd1 = self.store.get_model_version(name=name, version=mv1.version) mvd2 = self.store.get_model_version(name=name, version=mv2.version) mvd3 = self.store.get_model_version(name=name, version=mv3.version) self.assertEqual(mvd1.current_stage, "Staging") self.assertEqual(mvd2.current_stage, "Production") self.assertEqual(mvd3.current_stage, "Production") def test_transition_model_version_stage_when_archive_existing_versions_is_true(self): name = "model" self._rm_maker(name) mv1 = self._mv_maker(name) mv2 = self._mv_maker(name) mv3 = self._mv_maker(name) msg = ( r"Model version transition cannot archive existing model versions " r"because .+ is not an Active stage" ) # test that when `archive_existing_versions` is True, transitioning a model version # to the inactive stages ("Archived" and "None") throws. for stage in ["Archived", "None"]: with self.assertRaisesRegex(MlflowException, msg): self.store.transition_model_version_stage(name, mv1.version, stage, True) self.store.transition_model_version_stage(name, mv1.version, "Staging", False) self.store.transition_model_version_stage(name, mv2.version, "Production", False) self.store.transition_model_version_stage(name, mv3.version, "Staging", True) mvd1 = self.store.get_model_version(name=name, version=mv1.version) mvd2 = self.store.get_model_version(name=name, version=mv2.version) mvd3 = self.store.get_model_version(name=name, version=mv3.version) self.assertEqual(mvd1.current_stage, "Archived") self.assertEqual(mvd2.current_stage, "Production") self.assertEqual(mvd3.current_stage, "Staging") self.assertEqual(mvd1.last_updated_timestamp, mvd3.last_updated_timestamp) self.store.transition_model_version_stage(name, mv3.version, "Production", True) mvd1 = self.store.get_model_version(name=name, version=mv1.version) mvd2 = self.store.get_model_version(name=name, version=mv2.version) mvd3 = self.store.get_model_version(name=name, version=mv3.version) self.assertEqual(mvd1.current_stage, "Archived") self.assertEqual(mvd2.current_stage, "Archived") self.assertEqual(mvd3.current_stage, "Production") self.assertEqual(mvd2.last_updated_timestamp, mvd3.last_updated_timestamp) for uncanonical_stage_name in ["STAGING", "staging", "StAgInG"]: self.store.transition_model_version_stage(mv1.name, mv1.version, "Staging", False) self.store.transition_model_version_stage(mv2.name, mv2.version, "None", False) # stage names are case-insensitive and auto-corrected to system stage names self.store.transition_model_version_stage( mv2.name, mv2.version, uncanonical_stage_name, True ) mvd1 = self.store.get_model_version(name=mv1.name, version=mv1.version) mvd2 = self.store.get_model_version(name=mv2.name, version=mv2.version) self.assertEqual(mvd1.current_stage, "Archived") self.assertEqual(mvd2.current_stage, "Staging") def test_delete_model_version(self): name = "test_for_delete_MV" initial_tags = [ ModelVersionTag("key", "value"), ModelVersionTag("anotherKey", "some other value"), ] self._rm_maker(name) mv = self._mv_maker(name, tags=initial_tags) mvd = self.store.get_model_version(name=mv.name, version=mv.version) self.assertEqual(mvd.name, name) self.store.delete_model_version(name=mv.name, version=mv.version) # cannot get a deleted model version with self.assertRaisesRegex( MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found" ) as exception_context: self.store.get_model_version(name=mv.name, version=mv.version) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) # cannot update a delete with self.assertRaisesRegex( MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found" ) as exception_context: self.store.update_model_version(mv.name, mv.version, description="deleted!") assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) # cannot delete it again with self.assertRaisesRegex( MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found" ) as exception_context: self.store.delete_model_version(name=mv.name, version=mv.version) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) def test_delete_model_version_redaction(self): name = "test_for_delete_MV_redaction" run_link = "http://localhost:5000/path/to/run" run_id = "12345" source = "path/to/source" self._rm_maker(name) mv = self._mv_maker(name, source=source, run_id=run_id, run_link=run_link) mvd = self.store.get_model_version(name=name, version=mv.version) self.assertEqual(mvd.run_link, run_link) self.assertEqual(mvd.run_id, run_id) self.assertEqual(mvd.source, source) # delete the MV now self.store.delete_model_version(name, mv.version) # verify that the relevant fields are redacted mvd_deleted = self.store._get_sql_model_version_including_deleted( name=name, version=mv.version ) self.assertIn("REDACTED", mvd_deleted.run_link) self.assertIn("REDACTED", mvd_deleted.source) self.assertIn("REDACTED", mvd_deleted.run_id) def test_get_model_version_download_uri(self): name = "test_for_update_MV" self._rm_maker(name) source_path = "path/to/source" mv = self._mv_maker(name, source=source_path, run_id=uuid.uuid4().hex) mvd1 = self.store.get_model_version(name=mv.name, version=mv.version) self.assertEqual(mvd1.name, name) self.assertEqual(mvd1.source, source_path) # download location points to source self.assertEqual( self.store.get_model_version_download_uri(name=mv.name, version=mv.version), source_path ) # download URI does not change even if model version is updated self.store.transition_model_version_stage( name=mv.name, version=mv.version, stage="Production", archive_existing_versions=False ) self.store.update_model_version( name=mv.name, version=mv.version, description="Test for Path" ) mvd2 = self.store.get_model_version(name=mv.name, version=mv.version) self.assertEqual(mvd2.source, source_path) self.assertEqual( self.store.get_model_version_download_uri(name=mv.name, version=mv.version), source_path ) # cannot retrieve download URI for deleted model versions self.store.delete_model_version(name=mv.name, version=mv.version) with self.assertRaisesRegex( MlflowException, rf"Model Version \(name={mv.name}, version={mv.version}\) not found" ) as exception_context: self.store.get_model_version_download_uri(name=mv.name, version=mv.version) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) def test_search_model_versions(self): # create some model versions name = "test_for_search_MV" self._rm_maker(name) run_id_1 = uuid.uuid4().hex run_id_2 = uuid.uuid4().hex run_id_3 = uuid.uuid4().hex mv1 = self._mv_maker(name=name, source="A/B", run_id=run_id_1) self.assertEqual(mv1.version, 1) mv2 = self._mv_maker(name=name, source="A/C", run_id=run_id_2) self.assertEqual(mv2.version, 2) mv3 = self._mv_maker(name=name, source="A/D", run_id=run_id_2) self.assertEqual(mv3.version, 3) mv4 = self._mv_maker(name=name, source="A/D", run_id=run_id_3) self.assertEqual(mv4.version, 4) def search_versions(filter_string): return [mvd.version for mvd in self.store.search_model_versions(filter_string)] # search using name should return all 4 versions self.assertEqual(set(search_versions("name='%s'" % name)), set([1, 2, 3, 4])) # search using run_id_1 should return version 1 self.assertEqual(set(search_versions("run_id='%s'" % run_id_1)), set([1])) # search using run_id_2 should return versions 2 and 3 self.assertEqual(set(search_versions("run_id='%s'" % run_id_2)), set([2, 3])) # search using the IN operator should return all versions self.assertEqual( set( search_versions( "run_id IN ('{run_id_1}','{run_id_2}')".format( run_id_1=run_id_1, run_id_2=run_id_2 ) ) ), set([1, 2, 3]), ) # search using the IN operator with bad lists should return exceptions with self.assertRaisesRegex( MlflowException, ( r"While parsing a list in the query, " r"expected string value or punctuation, " r"but got different type in list" ), ) as exception_context: search_versions("run_id IN (1,2,3)") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # search using the IN operator with empty lists should return exceptions with self.assertRaisesRegex( MlflowException, ( r"While parsing a list in the query, " r"expected a non-empty list of string values, " r"but got empty list" ), ) as exception_context: search_versions("run_id IN ()") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # search using an ill-formed IN operator correctly throws exception with self.assertRaisesRegex( MlflowException, r"Invalid clause\(s\) in filter string" ) as exception_context: search_versions("run_id IN (") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) with self.assertRaisesRegex(MlflowException, r"Invalid filter '.+'") as exception_context: search_versions("run_id IN") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) with self.assertRaisesRegex( MlflowException, ( r"While parsing a list in the query, " r"expected a non-empty list of string values, " r"but got ill-formed list" ), ) as exception_context: search_versions("run_id IN (,)") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) with self.assertRaisesRegex( MlflowException, ( r"While parsing a list in the query, " r"expected a non-empty list of string values, " r"but got ill-formed list" ), ) as exception_context: search_versions("run_id IN ('runid1',,'runid2')") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # search using the IN operator is not allowed with other additional filters with self.assertRaisesRegex( MlflowException, r"Search filter '.+' contains multiple expressions" ) as exception_context: search_versions( "name='{name}]' AND run_id IN ('{run_id_1}','{run_id_2}')".format( name=name, run_id_1=run_id_1, run_id_2=run_id_2 ) ) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # search using source_path "A/D" should return version 3 and 4 self.assertEqual(set(search_versions("source_path = 'A/D'")), set([3, 4])) # search using source_path "A" should not return anything self.assertEqual(len(search_versions("source_path = 'A'")), 0) self.assertEqual(len(search_versions("source_path = 'A/'")), 0) self.assertEqual(len(search_versions("source_path = ''")), 0) # delete mv4. search should not return version 4 self.store.delete_model_version(name=mv4.name, version=mv4.version) self.assertEqual(set(search_versions("")), set([1, 2, 3])) self.assertEqual(set(search_versions(None)), set([1, 2, 3])) self.assertEqual(set(search_versions("name='%s'" % name)), set([1, 2, 3])) self.assertEqual(set(search_versions("source_path = 'A/D'")), set([3])) self.store.transition_model_version_stage( name=mv1.name, version=mv1.version, stage="production", archive_existing_versions=False ) self.store.update_model_version( name=mv1.name, version=mv1.version, description="Online prediction model!" ) mvds = self.store.search_model_versions("run_id = '%s'" % run_id_1) assert 1 == len(mvds) assert isinstance(mvds[0], ModelVersion) assert mvds[0].current_stage == "Production" assert mvds[0].run_id == run_id_1 assert mvds[0].source == "A/B" assert mvds[0].description == "Online prediction model!" def _search_registered_models( self, filter_string, max_results=10, order_by=None, page_token=None ): result = self.store.search_registered_models( filter_string=filter_string, max_results=max_results, order_by=order_by, page_token=page_token, ) return [registered_model.name for registered_model in result], result.token def test_search_registered_models(self): # create some registered models prefix = "test_for_search_" names = [prefix + name for name in ["RM1", "RM2", "RM3", "RM4", "RM4A", "RM4a"]] for name in names: self._rm_maker(name) # search with no filter should return all registered models rms, _ = self._search_registered_models(None) self.assertEqual(rms, names) # equality search using name should return exactly the 1 name rms, _ = self._search_registered_models("name='{}'".format(names[0])) self.assertEqual(rms, [names[0]]) # equality search using name that is not valid should return nothing rms, _ = self._search_registered_models("name='{}'".format(names[0] + "cats")) self.assertEqual(rms, []) # case-sensitive prefix search using LIKE should return all the RMs rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix)) self.assertEqual(rms, names) # case-sensitive prefix search using LIKE with surrounding % should return all the RMs rms, _ = self._search_registered_models("name LIKE '%RM%'") self.assertEqual(rms, names) # case-sensitive prefix search using LIKE with surrounding % should return all the RMs # _e% matches test_for_search_ , so all RMs should match rms, _ = self._search_registered_models("name LIKE '_e%'") self.assertEqual(rms, names) # case-sensitive prefix search using LIKE should return just rm4 rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix + "RM4A")) self.assertEqual(rms, [names[4]]) # case-sensitive prefix search using LIKE should return no models if no match rms, _ = self._search_registered_models("name LIKE '{}%'".format(prefix + "cats")) self.assertEqual(rms, []) # confirm that LIKE is not case-sensitive rms, _ = self._search_registered_models("name lIkE '%blah%'") self.assertEqual(rms, []) rms, _ = self._search_registered_models("name like '{}%'".format(prefix + "RM4A")) self.assertEqual(rms, [names[4]]) # case-insensitive prefix search using ILIKE should return both rm5 and rm6 rms, _ = self._search_registered_models("name ILIKE '{}%'".format(prefix + "RM4A")) self.assertEqual(rms, names[4:]) # case-insensitive postfix search with ILIKE rms, _ = self._search_registered_models("name ILIKE '%RM4a'") self.assertEqual(rms, names[4:]) # case-insensitive prefix search using ILIKE should return both rm5 and rm6 rms, _ = self._search_registered_models("name ILIKE '{}%'".format(prefix + "cats")) self.assertEqual(rms, []) # confirm that ILIKE is not case-sensitive rms, _ = self._search_registered_models("name iLike '%blah%'") self.assertEqual(rms, []) # confirm that ILIKE works for empty query rms, _ = self._search_registered_models("name iLike '%%'") self.assertEqual(rms, names) rms, _ = self._search_registered_models("name ilike '%RM4a'") self.assertEqual(rms, names[4:]) # cannot search by invalid comparator types with self.assertRaisesRegex( MlflowException, r"Expected a quoted string value for attributes" ) as exception_context: self._search_registered_models("name!=something") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # cannot search by run_id with self.assertRaisesRegex( MlflowException, r"Invalid attribute key '.+' specified" ) as exception_context: self._search_registered_models("run_id='%s'" % "somerunID") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # cannot search by source_path with self.assertRaisesRegex( MlflowException, r"Invalid attribute key '.+' specified" ) as exception_context: self._search_registered_models("source_path = 'A/D'") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # cannot search by other params with self.assertRaisesRegex( MlflowException, r"Invalid clause\(s\) in filter string" ) as exception_context: self._search_registered_models("evilhax = true") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # delete last registered model. search should not return the first 5 self.store.delete_registered_model(name=names[-1]) self.assertEqual(self._search_registered_models(None, max_results=1000), (names[:-1], None)) # equality search using name should return no names self.assertEqual(self._search_registered_models("name='{}'".format(names[-1])), ([], None)) # case-sensitive prefix search using LIKE should return all the RMs self.assertEqual( self._search_registered_models("name LIKE '{}%'".format(prefix)), (names[0:5], None) ) # case-insensitive prefix search using ILIKE should return both rm5 and rm6 self.assertEqual( self._search_registered_models("name ILIKE '{}%'".format(prefix + "RM4A")), ([names[4]], None), ) def test_parse_search_registered_models_order_by(self): # test that "registered_models.name ASC" is returned by default parsed = SqlAlchemyStore._parse_search_registered_models_order_by([]) self.assertEqual([str(x) for x in parsed], ["registered_models.name ASC"]) # test that the given 'name' replaces the default one ('registered_models.name ASC') parsed = SqlAlchemyStore._parse_search_registered_models_order_by(["name DESC"]) self.assertEqual([str(x) for x in parsed], ["registered_models.name DESC"]) # test that an exception is raised when order_by contains duplicate fields msg = "`order_by` contains duplicate fields:" with self.assertRaisesRegex(MlflowException, msg): SqlAlchemyStore._parse_search_registered_models_order_by( ["last_updated_timestamp", "last_updated_timestamp"] ) with self.assertRaisesRegex(MlflowException, msg): SqlAlchemyStore._parse_search_registered_models_order_by(["timestamp", "timestamp"]) with self.assertRaisesRegex(MlflowException, msg): SqlAlchemyStore._parse_search_registered_models_order_by( ["timestamp", "last_updated_timestamp"], ) with self.assertRaisesRegex(MlflowException, msg): SqlAlchemyStore._parse_search_registered_models_order_by( ["last_updated_timestamp ASC", "last_updated_timestamp DESC"], ) with self.assertRaisesRegex(MlflowException, msg): SqlAlchemyStore._parse_search_registered_models_order_by( ["last_updated_timestamp", "last_updated_timestamp DESC"], ) def test_search_registered_model_pagination(self): rms = [self._rm_maker("RM{:03}".format(i)).name for i in range(50)] # test flow with fixed max_results returned_rms = [] query = "name LIKE 'RM%'" result, token = self._search_registered_models(query, page_token=None, max_results=5) returned_rms.extend(result) while token: result, token = self._search_registered_models(query, page_token=token, max_results=5) returned_rms.extend(result) self.assertEqual(rms, returned_rms) # test that pagination will return all valid results in sorted order # by name ascending result, token1 = self._search_registered_models(query, max_results=5) self.assertNotEqual(token1, None) self.assertEqual(result, rms[0:5]) result, token2 = self._search_registered_models(query, page_token=token1, max_results=10) self.assertNotEqual(token2, None) self.assertEqual(result, rms[5:15]) result, token3 = self._search_registered_models(query, page_token=token2, max_results=20) self.assertNotEqual(token3, None) self.assertEqual(result, rms[15:35]) result, token4 = self._search_registered_models(query, page_token=token3, max_results=100) # assert that page token is None self.assertEqual(token4, None) self.assertEqual(result, rms[35:]) # test that providing a completely invalid page token throws with self.assertRaisesRegex( MlflowException, r"Invalid page token, could not base64-decode" ) as exception_context: self._search_registered_models(query, page_token="evilhax", max_results=20) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # test that providing too large of a max_results throws with self.assertRaisesRegex( MlflowException, r"Invalid value for request parameter max_results" ) as exception_context: self._search_registered_models(query, page_token="evilhax", max_results=1e15) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) self.assertIn( "Invalid value for request parameter max_results", exception_context.exception.message ) def test_search_registered_model_order_by(self): rms = [] # explicitly mock the creation_timestamps because timestamps seem to be unstable in Windows for i in range(50): with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=i): rms.append(self._rm_maker("RM{:03}".format(i)).name) # test flow with fixed max_results and order_by (test stable order across pages) returned_rms = [] query = "name LIKE 'RM%'" result, token = self._search_registered_models( query, page_token=None, order_by=["name DESC"], max_results=5 ) returned_rms.extend(result) while token: result, token = self._search_registered_models( query, page_token=token, order_by=["name DESC"], max_results=5 ) returned_rms.extend(result) # name descending should be the opposite order of the current order self.assertEqual(rms[::-1], returned_rms) # last_updated_timestamp descending should have the newest RMs first result, _ = self._search_registered_models( query, page_token=None, order_by=["last_updated_timestamp DESC"], max_results=100 ) self.assertEqual(rms[::-1], result) # timestamp returns same result as last_updated_timestamp result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp DESC"], max_results=100 ) self.assertEqual(rms[::-1], result) # last_updated_timestamp ascending should have the oldest RMs first result, _ = self._search_registered_models( query, page_token=None, order_by=["last_updated_timestamp ASC"], max_results=100 ) self.assertEqual(rms, result) # timestamp returns same result as last_updated_timestamp result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp ASC"], max_results=100 ) self.assertEqual(rms, result) # timestamp returns same result as last_updated_timestamp result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp"], max_results=100 ) self.assertEqual(rms, result) # name ascending should have the original order result, _ = self._search_registered_models( query, page_token=None, order_by=["name ASC"], max_results=100 ) self.assertEqual(rms, result) # test that no ASC/DESC defaults to ASC result, _ = self._search_registered_models( query, page_token=None, order_by=["last_updated_timestamp"], max_results=100 ) self.assertEqual(rms, result) with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=1): rm1 = self._rm_maker("MR1").name rm2 = self._rm_maker("MR2").name with mock.patch("mlflow.store.model_registry.sqlalchemy_store.now", return_value=2): rm3 = self._rm_maker("MR3").name rm4 = self._rm_maker("MR4").name query = "name LIKE 'MR%'" # test with multiple clauses result, _ = self._search_registered_models( query, page_token=None, order_by=["last_updated_timestamp ASC", "name DESC"], max_results=100, ) self.assertEqual([rm2, rm1, rm4, rm3], result) result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp ASC", "name DESC"], max_results=100 ) self.assertEqual([rm2, rm1, rm4, rm3], result) # confirm that name ascending is the default, even if ties exist on other fields result, _ = self._search_registered_models( query, page_token=None, order_by=[], max_results=100 ) self.assertEqual([rm1, rm2, rm3, rm4], result) # test default tiebreak with descending timestamps result, _ = self._search_registered_models( query, page_token=None, order_by=["last_updated_timestamp DESC"], max_results=100 ) self.assertEqual([rm3, rm4, rm1, rm2], result) # test timestamp parsing result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp\tASC"], max_results=100 ) self.assertEqual([rm1, rm2, rm3, rm4], result) result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp\r\rASC"], max_results=100 ) self.assertEqual([rm1, rm2, rm3, rm4], result) result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp\nASC"], max_results=100 ) self.assertEqual([rm1, rm2, rm3, rm4], result) result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp ASC"], max_results=100 ) self.assertEqual([rm1, rm2, rm3, rm4], result) # validate order by key is case-insensitive result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp asc"], max_results=100 ) self.assertEqual([rm1, rm2, rm3, rm4], result) result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp aSC"], max_results=100 ) self.assertEqual([rm1, rm2, rm3, rm4], result) result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp desc", "name desc"], max_results=100 ) self.assertEqual([rm4, rm3, rm2, rm1], result) result, _ = self._search_registered_models( query, page_token=None, order_by=["timestamp deSc", "name deSc"], max_results=100 ) self.assertEqual([rm4, rm3, rm2, rm1], result) def test_search_registered_model_order_by_errors(self): query = "name LIKE 'RM%'" # test that invalid columns throw even if they come after valid columns with self.assertRaisesRegex( MlflowException, r"Invalid order by key '.+' specified" ) as exception_context: self._search_registered_models( query, page_token=None, order_by=["name ASC", "creation_timestamp DESC"], max_results=5, ) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # test that invalid columns with random text throw even if they come after valid columns with self.assertRaisesRegex( MlflowException, r"Invalid order_by clause '.+'" ) as exception_context: self._search_registered_models( query, page_token=None, order_by=["name ASC", "last_updated_timestamp DESC blah"], max_results=5, ) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) def test_set_model_version_tag(self): name1 = "SetModelVersionTag_TestMod" name2 = "SetModelVersionTag_TestMod 2" initial_tags = [ ModelVersionTag("key", "value"), ModelVersionTag("anotherKey", "some other value"), ] self._rm_maker(name1) self._rm_maker(name2) run_id_1 = uuid.uuid4().hex run_id_2 = uuid.uuid4().hex run_id_3 = uuid.uuid4().hex self._mv_maker(name1, "A/B", run_id_1, initial_tags) self._mv_maker(name1, "A/C", run_id_2, initial_tags) self._mv_maker(name2, "A/D", run_id_3, initial_tags) new_tag = ModelVersionTag("randomTag", "not a random value") self.store.set_model_version_tag(name1, 1, new_tag) all_tags = initial_tags + [new_tag] rm1mv1 = self.store.get_model_version(name1, 1) self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in all_tags}) # test overriding a tag with the same key overriding_tag = ModelVersionTag("key", "overriding") self.store.set_model_version_tag(name1, 1, overriding_tag) all_tags = [tag for tag in all_tags if tag.key != "key"] + [overriding_tag] rm1mv1 = self.store.get_model_version(name1, 1) self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in all_tags}) # does not affect other model versions with the same key rm1mv2 = self.store.get_model_version(name1, 2) rm2mv1 = self.store.get_model_version(name2, 1) self.assertEqual(rm1mv2.tags, {tag.key: tag.value for tag in initial_tags}) self.assertEqual(rm2mv1.tags, {tag.key: tag.value for tag in initial_tags}) # can not set tag on deleted (non-existed) model version self.store.delete_model_version(name1, 2) with self.assertRaisesRegex( MlflowException, rf"Model Version \(name={name1}, version=2\) not found" ) as exception_context: self.store.set_model_version_tag(name1, 2, overriding_tag) assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) # test cannot set tags that are too long long_tag = ModelVersionTag("longTagKey", "a" * 5001) with self.assertRaisesRegex( MlflowException, r"Model version value '.+' had length \d+, which exceeded length limit of 5000", ) as exception_context: self.store.set_model_version_tag(name1, 1, long_tag) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # test can set tags that are somewhat long long_tag = ModelVersionTag("longTagKey", "a" * 4999) self.store.set_model_version_tag(name1, 1, long_tag) # can not set invalid tag with self.assertRaisesRegex( MlflowException, r"Tag name cannot be None" ) as exception_context: self.store.set_model_version_tag(name2, 1, ModelVersionTag(key=None, value="")) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # can not use invalid model name or version with self.assertRaisesRegex( MlflowException, r"Registered model name cannot be empty" ) as exception_context: self.store.set_model_version_tag(None, 1, ModelVersionTag(key="key", value="value")) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) with self.assertRaisesRegex( MlflowException, r"Model version must be an integer" ) as exception_context: self.store.set_model_version_tag( name2, "I am not a version", ModelVersionTag(key="key", value="value") ) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) def test_delete_model_version_tag(self): name1 = "DeleteModelVersionTag_TestMod" name2 = "DeleteModelVersionTag_TestMod 2" initial_tags = [ ModelVersionTag("key", "value"), ModelVersionTag("anotherKey", "some other value"), ] self._rm_maker(name1) self._rm_maker(name2) run_id_1 = uuid.uuid4().hex run_id_2 = uuid.uuid4().hex run_id_3 = uuid.uuid4().hex self._mv_maker(name1, "A/B", run_id_1, initial_tags) self._mv_maker(name1, "A/C", run_id_2, initial_tags) self._mv_maker(name2, "A/D", run_id_3, initial_tags) new_tag = ModelVersionTag("randomTag", "not a random value") self.store.set_model_version_tag(name1, 1, new_tag) self.store.delete_model_version_tag(name1, 1, "randomTag") rm1mv1 = self.store.get_model_version(name1, 1) self.assertEqual(rm1mv1.tags, {tag.key: tag.value for tag in initial_tags}) # testing deleting a key does not affect other model versions with the same key self.store.delete_model_version_tag(name1, 1, "key") rm1mv1 = self.store.get_model_version(name1, 1) rm1mv2 = self.store.get_model_version(name1, 2) rm2mv1 = self.store.get_model_version(name2, 1) self.assertEqual(rm1mv1.tags, {"anotherKey": "some other value"}) self.assertEqual(rm1mv2.tags, {tag.key: tag.value for tag in initial_tags}) self.assertEqual(rm2mv1.tags, {tag.key: tag.value for tag in initial_tags}) # delete tag that is already deleted does nothing self.store.delete_model_version_tag(name1, 1, "key") rm1mv1 = self.store.get_model_version(name1, 1) self.assertEqual(rm1mv1.tags, {"anotherKey": "some other value"}) # can not delete tag on deleted (non-existed) model version self.store.delete_model_version(name2, 1) with self.assertRaisesRegex( MlflowException, rf"Model Version \(name={name2}, version=1\) not found" ) as exception_context: self.store.delete_model_version_tag(name2, 1, "key") assert exception_context.exception.error_code == ErrorCode.Name(RESOURCE_DOES_NOT_EXIST) # can not delete tag with invalid key with self.assertRaisesRegex( MlflowException, r"Tag name cannot be None" ) as exception_context: self.store.delete_model_version_tag(name1, 2, None) assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) # can not use invalid model name or version with self.assertRaisesRegex( MlflowException, r"Registered model name cannot be empty" ) as exception_context: self.store.delete_model_version_tag(None, 2, "key") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE) with self.assertRaisesRegex( MlflowException, r"Model version must be an integer" ) as exception_context: self.store.delete_model_version_tag(name1, "I am not a version", "key") assert exception_context.exception.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
[ [ [ 7, 9 ], [ 985, 987 ], [ 1220, 1222 ] ], [ [ 17, 25 ], [ 651, 659 ] ], [ [ 34, 42 ], [ 872, 880 ] ], [ [ 64, 68 ], [ 4374, 4378 ], [ 23086, 23090 ], [ 53302, 53306 ], [ 55885, 55889 ], [ 56068, 56072 ] ], [ [ 76, 80 ], [ 1480, 1484 ], [ 23056, 23060 ], [ 35466, 35470 ], [ 37118, 37122 ], [ 37154, 37158 ], [ 37190, 37194 ], [ 60542, 60546 ], [ 60578, 60582 ], [ 60614, 60618 ], [ 64419, 64423 ], [ 64455, 64459 ], [ 64491, 64495 ] ], [ [ 89, 95 ] ], [ [ 103, 112 ] ], [ [ 120, 150 ], [ 1139, 1145 ] ], [ [ 200, 215 ], [ 10448, 10463 ] ], [ [ 221, 233 ], [ 42946, 42958 ] ], [ [ 239, 257 ], [ 2676, 2694 ], [ 2724, 2742 ], [ 4224, 4242 ], [ 4272, 4290 ], [ 17518, 17536 ], [ 17566, 17584 ], [ 17736, 17754 ], [ 18105, 18123 ], [ 19129, 19147 ], [ 19600, 19618 ], [ 19920, 19938 ], [ 20291, 20309 ], [ 20624, 20642 ], [ 20672, 20690 ], [ 20842, 20860 ] ], [ [ 263, 278 ], [ 24289, 24304 ], [ 24322, 24337 ], [ 32751, 32766 ], [ 32796, 32811 ], [ 60357, 60372 ], [ 60402, 60417 ], [ 60832, 60847 ], [ 61199, 61214 ], [ 62374, 62389 ], [ 62839, 62854 ], [ 63156, 63171 ], [ 63535, 63550 ], [ 63900, 63915 ], [ 64234, 64249 ], [ 64279, 64294 ], [ 64709, 64724 ] ], [ [ 312, 327 ], [ 2132, 2147 ], [ 3641, 3656 ], [ 3910, 3925 ], [ 6660, 6675 ], [ 7138, 7153 ], [ 7505, 7520 ], [ 7809, 7824 ], [ 8548, 8563 ], [ 8886, 8901 ], [ 9244, 9259 ], [ 9617, 9632 ], [ 12939, 12954 ], [ 13325, 13340 ], [ 18795, 18810 ], [ 19223, 19238 ], [ 19789, 19804 ], [ 20147, 20162 ], [ 21992, 22007 ], [ 22354, 22369 ], [ 22681, 22696 ], [ 27222, 27237 ], [ 30421, 30436 ], [ 33227, 33242 ], [ 33600, 33615 ], [ 33987, 34002 ], [ 36651, 36666 ], [ 38720, 38735 ], [ 39238, 39253 ], [ 39740, 39755 ], [ 40006, 40021 ], [ 40257, 40272 ], [ 40689, 40704 ], [ 41222, 41237 ], [ 46882, 46897 ], [ 47223, 47238 ], [ 47570, 47585 ], [ 47912, 47927 ], [ 49744, 49759 ], [ 49957, 49972 ], [ 50114, 50129 ], [ 50315, 50330 ], [ 50538, 50553 ], [ 52272, 52287 ], [ 52667, 52682 ], [ 59245, 59260 ], [ 59787, 59802 ], [ 62033, 62048 ], [ 62465, 62480 ], [ 63025, 63040 ], [ 63391, 63406 ], [ 63724, 63739 ], [ 66005, 66020 ], [ 66367, 66382 ], [ 66705, 66720 ], [ 67005, 67020 ] ], [ [ 375, 384 ], [ 2323, 2332 ], [ 3821, 3830 ], [ 4088, 4097 ], [ 6882, 6891 ], [ 7377, 7386 ], [ 7720, 7729 ], [ 8022, 8031 ], [ 8757, 8766 ], [ 9121, 9130 ], [ 9456, 9465 ], [ 9828, 9837 ], [ 13171, 13180 ], [ 13563, 13572 ], [ 19021, 19030 ], [ 19490, 19499 ], [ 20017, 20026 ], [ 20394, 20403 ], [ 22219, 22228 ], [ 22551, 22560 ], [ 22892, 22901 ], [ 27524, 27533 ], [ 33477, 33486 ], [ 33864, 33873 ], [ 34240, 34249 ], [ 36914, 36923 ], [ 39067, 39076 ], [ 39574, 39583 ], [ 39929, 39938 ], [ 40167, 40176 ], [ 40599, 40608 ], [ 41048, 41057 ], [ 41592, 41601 ], [ 47099, 47108 ], [ 47441, 47450 ], [ 47782, 47791 ], [ 48119, 48128 ], [ 52513, 52522 ], [ 52914, 52923 ], [ 59601, 59610 ], [ 60145, 60154 ], [ 62266, 62275 ], [ 62729, 62738 ], [ 63250, 63259 ], [ 63635, 63644 ], [ 64013, 64022 ], [ 66232, 66241 ], [ 66564, 66573 ], [ 66916, 66925 ], [ 67231, 67240 ] ], [ [ 390, 413 ], [ 6897, 6920 ], [ 8772, 8795 ], [ 9136, 9159 ], [ 9471, 9494 ], [ 9843, 9866 ], [ 19036, 19059 ], [ 22234, 22257 ], [ 33492, 33515 ], [ 33879, 33902 ], [ 34255, 34278 ], [ 36929, 36952 ], [ 62281, 62304 ], [ 66247, 66270 ] ], [ [ 419, 442 ], [ 3836, 3859 ], [ 4103, 4126 ], [ 7735, 7758 ], [ 8037, 8060 ], [ 13186, 13209 ], [ 13578, 13601 ], [ 19505, 19528 ], [ 20032, 20055 ], [ 20409, 20432 ], [ 22566, 22589 ], [ 22907, 22930 ], [ 27539, 27562 ], [ 39082, 39105 ], [ 39589, 39612 ], [ 39944, 39967 ], [ 40182, 40205 ], [ 40614, 40637 ], [ 41063, 41086 ], [ 41607, 41630 ], [ 47114, 47137 ], [ 47456, 47479 ], [ 47797, 47820 ], [ 48134, 48157 ], [ 52528, 52551 ], [ 52929, 52952 ], [ 59616, 59639 ], [ 60160, 60183 ], [ 62744, 62767 ], [ 63265, 63288 ], [ 63650, 63673 ], [ 64028, 64051 ], [ 66579, 66602 ], [ 66931, 66954 ], [ 67246, 67269 ] ], [ [ 448, 471 ], [ 2338, 2361 ], [ 7392, 7415 ] ], [ [ 532, 547 ], [ 723, 738 ], [ 49159, 49174 ], [ 49414, 49429 ], [ 49779, 49794 ], [ 49992, 50007 ], [ 50149, 50164 ], [ 50350, 50365 ], [ 50573, 50588 ] ], [ [ 583, 593 ], [ 1907, 1917 ], [ 2625, 2635 ], [ 3167, 3177 ] ], [ [ 595, 601 ], [ 1030, 1036 ] ], [ [ 625, 650 ] ] ]
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. import itertools import os import shutil import tempfile import mock from nose.tools import raises, assert_raises try: from . import parse_s3 from digits.tools.mock_s3_walker import MockS3Walker import_failed = False except ImportError: import_failed = True from digits import test_utils test_utils.skipIfNotFramework('none') class TestUnescape(): @classmethod def setUpClass(cls): if import_failed: test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed') def test_hello(self): assert parse_s3.unescape('hello') == 'hello' def test_space(self): assert parse_s3.unescape('%20') == ' ' class TestValidateS3(): @classmethod def setUpClass(cls): if import_failed: test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed') cls.mock_walker = MockS3Walker() def test_non_existent_bucket(self): result = parse_s3.validate_s3(self.mock_walker, 'nonexistentbucket', '') assert not result def test_empty_bucket(self): result = parse_s3.validate_s3(self.mock_walker, 'emptybucket', '') assert not result def test_valid_endpoint(self): result = parse_s3.validate_s3(self.mock_walker, 'validbucket', '') assert result class TestValidateOutputFile(): @classmethod def setUpClass(cls): if import_failed: test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed') cls.tmpdir = tempfile.mkdtemp() _handle, cls.tmpfile = tempfile.mkstemp(dir=cls.tmpdir) @classmethod def tearDownClass(cls): try: shutil.rmtree(cls.tmpdir) except IOError: pass def test_missing_file(self): assert parse_s3.validate_output_file(None) is True, 'all new files should be valid' def test_file(self): assert parse_s3.validate_output_file(os.path.join(self.tmpdir, 'output.txt')) is True @mock.patch('os.access') def test_local_file(self, mock_access): mock_access.return_value = True assert parse_s3.validate_output_file('not-a-file.txt') is True, 'relative paths should be accepted' @mock.patch('os.access') def test_not_writeable(self, mock_access): mock_access.return_value = False assert parse_s3.validate_output_file(self.tmpfile) is False, 'should not succeed without write permission' def test_existing_file(self): assert parse_s3.validate_output_file(self.tmpfile) is False def test_nonexistent_dir(self): assert parse_s3.validate_output_file( os.path.join( os.path.abspath('not-a-dir'), 'output.txt' ) ) is False class TestValidateInputFile(): @classmethod def setUpClass(cls): if import_failed: test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed') _handle, cls.tmpfile = tempfile.mkstemp() os.close(_handle) @classmethod def tearDownClass(cls): os.remove(cls.tmpfile) def test_missing_file(self): assert parse_s3.validate_input_file('not-a-file.txt') is False, 'should not pass on missing file' @mock.patch('os.access') def test_not_readable(self, mock_access): mock_access.return_value = False assert parse_s3.validate_input_file(self.tmpfile) is False, 'should not succeed without read permission' class TestValidateRange(): @classmethod def setUpClass(cls): if import_failed: test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed') def test_no_range(self): assert parse_s3.validate_range(0) is True def test_min_less(self): assert parse_s3.validate_range(-1, min_value=0) is False def test_min_equal(self): assert parse_s3.validate_range(0, min_value=0) is True def test_min_more(self): assert parse_s3.validate_range(1, min_value=0) is True def test_max_less(self): assert parse_s3.validate_range(9, max_value=10) is True def test_max_equal(self): assert parse_s3.validate_range(10, max_value=10) is True def test_max_more(self): assert parse_s3.validate_range(11, max_value=10) is False def test_allow_none_true(self): assert parse_s3.validate_range(None, allow_none=True) is True def test_allow_none_false(self): assert parse_s3.validate_range(None, allow_none=False) is False def test_string(self): assert parse_s3.validate_range('foo') is False @mock.patch('digits.tools.parse_s3.validate_output_file') @mock.patch('digits.tools.parse_s3.validate_input_file') class TestCalculatePercentages(): @classmethod def setUpClass(cls): if import_failed: test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed') @raises(AssertionError) def test_making_0(self, mock_input, mock_output): parse_s3.calculate_percentages(None, None, None, None, None, None, None) def test_making_1(self, mock_input, mock_output): mock_input.return_value = True mock_output.return_value = True expected_outputs = [ ('train_file', (100, 0, 0)), ('val_file', (0, 100, 0)), ('test_file', (0, 0, 100)) ] for supplied, expected in expected_outputs: args = {k: None for k in ['labels_file', 'train_file', 'percent_train', 'val_file', 'percent_val', 'test_file', 'percent_test']} args.update({supplied: ''}) output = parse_s3.calculate_percentages(**args) assert output == expected, 'expected output of {}, got {}'.format(output, expected) def test_making_2(self, mock_input, mock_output): mock_input.return_value = True mock_output.return_value = True permutes = itertools.combinations(['train', 'val', 'test'], 2) expected_outputs = itertools.izip(permutes, itertools.repeat((32, 68))) for supplied, expected in expected_outputs: args = {k: None for k in ['labels_file', 'train_file', 'percent_train', 'val_file', 'percent_val', 'test_file', 'percent_test']} args.update({k + '_file': '' for k in supplied}) args.update({'percent_' + k: v for k, v in itertools.izip(supplied, expected)}) # Tricky line. itertools returns combinations in sorted order, always. # The order of the returned non-zero values should always be correct. output = [x for x in parse_s3.calculate_percentages(**args) if x != 0] assert output == list(expected), 'expected output of {}, got {}'.format(output, expected) def test_making_3_all_given(self, mock_input, mock_output): mock_input.return_value = True mock_output.return_value = True expected = (25, 30, 45) assert parse_s3.calculate_percentages( labels_file='not-a-file.txt', train_file='not-a-file.txt', percent_train=25, val_file='not-a-file.txt', percent_val=30, test_file='not-a-file.txt', percent_test=45 ) == expected, 'Calculate percentages should return identical values of {}'.format(expected) def test_making_3_2_given(self, mock_input, mock_output): mock_input.return_value = True mock_output.return_value = True expected = 45 assert parse_s3.calculate_percentages( labels_file='not-a-file.txt', train_file='not-a-file.txt', percent_train=25, val_file='not-a-file.txt', percent_val=30, test_file='not-a-file.txt', percent_test=None )[2] == expected, 'Calculate percentages should calculate third value of {}'.format(expected) @raises(AssertionError) def test_making_out_of_range(self, mock_input, mock_output): mock_input.return_value = True mock_output.return_value = True # should raise AssertionError because percentages not between 0-100 are invalid parse_s3.calculate_percentages( labels_file='not-a-file.txt', train_file='not-a-file.txt', percent_train=-1, val_file=None, percent_val=None, test_file=None, percent_test=None ) class TestParseWebListing(): @classmethod def setUpClass(cls): if import_failed: test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed') def test_non_url(self): for url in ['not-a-url', 'http://not-a-url', 'https://not-a-url']: yield self.check_url_raises, url def check_url_raises(self, url): assert_raises(Exception, parse_s3.parse_web_listing, url) def test_mock_url(self): for content, dirs, files in [ # Nothing ('', [], []), # Apache 2.2.22 ( '<head></head><body><table>\n \ <tr><td><a href="/home/">Parent</a></td></tr>\n \ <tr><td><a href="cat1/">cat1/</a></td><td>01-Jan-2015 12:34</td><td> - </td></tr>\n \ <tr><td><a href="cat2/">cat2/</a></td><td>02-Feb-2015 23:45</td><td> - </td></tr>\n \ <tr><td><a href="cat.jpg">cat.jpg</a></td><td>03-Mar-2015 1:23</td><td> 1 </td></tr>\n \ </table</body>\n', ['cat1/', 'cat2/'], ['cat.jpg'], ), # Apache 2.4.7 ( '<html><head></head><body><table>\n \ <tr><td><a href="/home/">Parent</a></td></tr>\n \ <tr><td><a href="dog/">dog/</a></td><td>01-01-2015 12:34</td><td> - </td></tr>\n \ <tr><td><a href="dog1.jpeg">dog1.jpeg</a></td><td>02-02-2015 23:45</td><td> 1 </td></tr>\n \ <tr><td><a href="dog2.png">dog2.png</a></td><td>03-03-2015 1:23</td><td> 2 </td></tr>\n \ </table</body></html>\n', ['dog/'], ['dog1.jpeg', 'dog2.png'], ), # Nginx ( '<html><head></head><body>\n \ <a href="bird.jpg">bird.jpg</a> 01-Jan-1999 01:23 1\n \ <a href="birds/">birds/</a> 02-Feb-1999 12:34 -', ['birds/'], ['bird.jpg'], ), ]: with mock.patch('digits.tools.parse_s3.requests') as mock_requests: response = mock.Mock() response.status_code = mock_requests.codes.ok response.content = content mock_requests.get.return_value = response yield self.check_listing, (dirs, files) def check_listing(self, rc): assert parse_s3.parse_web_listing('any_url') == rc class TestSplitIndices(): @classmethod def setUpClass(cls): if import_failed: test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed') def test_indices(self): for size in [5, 22, 32]: for percent_b in range(0, 100, 31): for percent_c in range(0, 100 - percent_b, 41): yield self.check_split, size, percent_b, percent_c def check_split(self, size, pct_b, pct_c): ideala = size * float(100 - pct_b - pct_c) / 100.0 idealb = size * float(100 - pct_c) / 100.0 idxa, idxb = parse_s3.three_way_split_indices(size, pct_b, pct_c) assert abs(ideala - idxa) <= 2, 'split should be close to {}, is {}'.format(ideala, idxa) assert abs(idealb - idxb) <= 2, 'split should be close to {}, is {}'.format(idealb, idxb) class TestParseS3(): @classmethod def setUpClass(cls): if import_failed: test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed') def test_all_train(self): classes = range(10) mock_walker = MockS3Walker(classes) try: tmpdir = tempfile.mkdtemp() labels_file = tempfile.mkstemp(dir=tmpdir) train_file = tempfile.mkstemp(dir=tmpdir) parse_s3.parse_s3(mock_walker, 'validbucket', 'train/', labels_file[1], percent_train=100, train_file=train_file[1], percent_val=0, percent_test=0) with open(labels_file[1]) as infile: parsed_classes = [line.strip() for line in infile] expected_classes = [str(i) for i in classes] assert parsed_classes == expected_classes, '%s != %s' % (parsed_classes, classes) finally: shutil.rmtree(tmpdir) def test_neg_all_train(self): try: classes = range(1) mock_walker = MockS3Walker(classes) tmpdir = tempfile.mkdtemp() labels_file = tempfile.mkstemp(dir=tmpdir) train_file = tempfile.mkstemp(dir=tmpdir) assert not parse_s3.parse_s3(mock_walker, 'invalidbucket', 'train/', labels_file[1], percent_train=100, train_file=train_file[1], percent_val=0, percent_test=0) finally: shutil.rmtree(tmpdir)
[ [ [ 77, 86 ], [ 6125, 6134 ], [ 6204, 6213 ], [ 6229, 6238 ], [ 6605, 6614 ] ], [ [ 94, 96 ], [ 2064, 2066 ], [ 2766, 2768 ], [ 2796, 2798 ], [ 3150, 3152 ], [ 3222, 3224 ] ], [ [ 104, 110 ], [ 1800, 1806 ], [ 12764, 12770 ], [ 13305, 13311 ] ], [ [ 118, 126 ], [ 1646, 1654 ], [ 1696, 1704 ], [ 3123, 3131 ], [ 12140, 12148 ], [ 12185, 12193 ], [ 12239, 12247 ], [ 12934, 12942 ], [ 12979, 12987 ], [ 13033, 13041 ] ], [ [ 135, 139 ], [ 2119, 2123 ], [ 2341, 2345 ], [ 3391, 3395 ], [ 4766, 4770 ], [ 4824, 4828 ], [ 10522, 10526 ], [ 10612, 10616 ] ], [ [ 163, 169 ], [ 5091, 5097 ], [ 8064, 8070 ] ], [ [ 171, 184 ], [ 8959, 8972 ] ], [ [ 209, 217 ], [ 654, 662 ], [ 734, 742 ], [ 1062, 1070 ], [ 1203, 1211 ], [ 1340, 1348 ], [ 1916, 1924 ], [ 2034, 2042 ], [ 2242, 2250 ], [ 2468, 2476 ], [ 2618, 2626 ], [ 2723, 2731 ], [ 3294, 3302 ], [ 3517, 3525 ], [ 3860, 3868 ], [ 3940, 3948 ], [ 4036, 4044 ], [ 4129, 4137 ], [ 4222, 4230 ], [ 4317, 4325 ], [ 4412, 4420 ], [ 4515, 4523 ], [ 4623, 4631 ], [ 4723, 4731 ], [ 5176, 5184 ], [ 5836, 5844 ], [ 6841, 6849 ], [ 7185, 7193 ], [ 7710, 7718 ], [ 8328, 8336 ], [ 8984, 8992 ], [ 10892, 10900 ], [ 11559, 11567 ], [ 12281, 12289 ], [ 13085, 13093 ] ], [ [ 262, 274 ], [ 989, 1001 ], [ 12084, 12096 ], [ 12891, 12903 ] ], [ [ 279, 292 ], [ 495, 508 ], [ 846, 859 ], [ 1508, 1521 ], [ 2975, 2988 ], [ 3698, 3711 ], [ 4968, 4981 ], [ 8647, 8660 ], [ 11018, 11031 ], [ 11886, 11899 ] ], [ [ 325, 338 ], [ 495, 508 ], [ 846, 859 ], [ 1508, 1521 ], [ 2975, 2988 ], [ 3698, 3711 ], [ 4968, 4981 ], [ 8647, 8660 ], [ 11018, 11031 ], [ 11886, 11899 ] ], [ [ 366, 376 ], [ 379, 389 ], [ 522, 532 ], [ 873, 883 ], [ 1535, 1545 ], [ 3002, 3012 ], [ 3725, 3735 ], [ 4995, 5005 ], [ 8674, 8684 ], [ 11045, 11055 ], [ 11913, 11923 ] ], [ [ 425, 437 ] ], [ [ 774, 788 ] ], [ [ 1428, 1450 ] ], [ [ 2896, 2917 ] ], [ [ 3623, 3640 ] ], [ [ 4886, 4910 ] ], [ [ 8570, 8589 ] ], [ [ 10944, 10960 ] ], [ [ 11817, 11828 ] ] ]
import re import time import typing import logging from calendar import monthrange from datetime import datetime from collections import Iterable from heapq import heappush, heappop from . import types # noqa from . exceptions import BrokerError from . interfaces import App, Plugin, Logger from . utils import cached_property, rewind class MasterLogger(Plugin): def __init__(self, logger: typing.Union[logging.Logger, Logger], **kwargs) -> None: self.logger = logger def register_master_handlers(self): level = self.logger.level ret = {} if level <= logging.ERROR: ret['task_exception'] = self.on_task_exception ret['task_unknown'] = self.on_task_unknown ret['worker_error'] = self.on_worker_error ret['broker_error'] = self.on_broker_error if level <= logging.INFO: ret['worker_start'] = self.on_worker_start ret['task_start'] = self.on_task_start ret['task_done'] = self.on_task_done ret['task_interrupt'] = self.on_task_interrupt if level <= logging.DEBUG: ret['task_expires'] = self.on_task_expires return ret def on_worker_start(self, w, **kwargs): self.logger.info('worker process [%d] started.', w.pid) def on_task_start(self, w, task_name, task_request, **kwargs): self.logger.info('[%d] - received task: %s[%s].', w.pid, task_name, task_request['id']) def on_task_done(self, w, task_name, task_request, running_time, **kwargs): self.logger.info('[%d] - task %s[%s] successed in %ss.', w.pid, task_name, task_request['id'], running_time) def on_task_interrupt(self, w, task_name, task_request, running_time, **kwargs): self.logger.info('[%d] - task %s[%s] killed in %ss.', w.pid, task_name, task_request['id'], running_time) def on_task_expires(self, w, task_name, task_request, **kwargs): self.logger.debug('[%d] - task %s[%s] expires.', w.pid, task_name, task_request['id']) def on_task_unknown(self, w, task_name, **kwargs): self.logger.error('[%d] - received unregistered task `%s`.', w.pid, task_name) def on_task_exception(self, w, task_name, task_request, exc, traceback, running_time, **kwargs): self.logger.error('[%d] - task %s[%s] raised exception: %s\n%s', w.pid, task_name, task_request['id'], repr(exc), traceback) def on_broker_error(self, w, **kwargs): self.logger.error('[%d] - broker error', w.pid) def on_worker_error(self, w, exc, traceback, **kwargs): self.logger.error('[%d] - got exception: %s\n%s', w.pid, repr(exc), traceback) class TaskKiller(Plugin): def __init__(self, app: App, logger: typing.Union[logging.Logger, Logger], **kwargs) -> None: self.app = app self.logger = logger self.running_tasks = set() # type: typing.Set[types.TaskId] self.heap = [] # type: typing.List[typing.Tuple[float, types.TaskId]] def run_in_master(self, curtime): if not self.heap: return while self.heap and self.heap[0][0] <= curtime: tm, task_id = heappop(self.heap) if task_id not in self.running_tasks: continue self.logger.debug('[taskkiller] - kill task %s due to time limit', task_id) if self.heap: return self.heap[0][0] - time.time() def on_task_start(self, w, task_name, task_id, task_headers, start_time, **kwargs): limit = task_headers.get('time_limit') if limit is None: return self.running_tasks.add(task_id) heappush(self.heap, (start_time + limit, task_id)) def on_task_done(self, w, task_name, task_id): if task_id not in self.running_tasks: return self.running_tasks.remove(task_id) class CronBeat(Plugin): def __init__(self, app: App, schedule: str, error_timeout: int, logger: typing.Union[logging.Logger, Logger], **kwargs) -> None: self.app = app self.logger = logger self.error_timeout = error_timeout self.schedule = schedule self.next_run = 0 @classmethod def add_console_args(cls, parser) -> None: parser.add_argument('--schedule', dest='schedule', default='schedule.py', help='Schedule rules') def get_applied_conf(self): return { 'schedule': self.schedule } @cached_property def heap(self): dct = {'crontab': crontab} with open(self.schedule, 'rt') as f: rules = eval(f.read(), dct) if not isinstance(rules, dict): raise TypeError('Must be a dict') start = datetime.now() heap = [] for key, entry in rules.items(): if not entry.get('task'): raise TypeError('`task` must be set') schedule = entry.get('schedule') if not isinstance(schedule, crontab): raise TypeError('`schedule` must be a crontab') schedule = schedule.start(start) heappush(heap, (next(schedule).timestamp(), schedule, entry)) return heap def master_idle(self, curtime): if not self.heap: return if self.next_run > curtime: return self.next_run - curtime task_sent = False while self.heap and self.heap[0][0] <= curtime: _, schedule, entry = self.heap[0] try: self.app.send_task(entry['task'], args=entry.get('args', ()), kwargs=entry.get('kwargs', {})) except BrokerError: self.logger.error('[beat] - cant send task, retry in %ss.', self.error_timeout) self.next_run = self.error_timeout + curtime return self.error_timeout else: self.logger.debug('[beat] - %s sent.', entry['task']) heappop(self.heap) heappush(self.heap, ( next(schedule).timestamp(), schedule, entry)) task_sent = True if self.heap: self.next_run = self.heap[0][0] timeout = self.next_run - curtime if task_sent: self.logger.debug('[beat] - next task in %fs.', timeout) return timeout class crontab_parser: """Parser for Crontab expressions.""" _range = r'(\d+?)-(\d+)' _steps = r'/(\d+)' _number = r'(\d+)' _star = r'\*' def __init__(self, min_, max_): self.max_ = max_ self.min_ = min_ self.pats = ( (re.compile('^' + self._range + self._steps + '$'), self._range_steps), (re.compile('^' + self._range + '$'), self._expand_range), (re.compile('^' + self._star + self._steps + '$'), self._star_steps), (re.compile('^' + self._star + '$'), self._expand_star), (re.compile('^' + self._number + '$'), self._expand_range) ) def parse(self, spec): acc = set() for part in spec.split(','): if not part: raise ValueError('empty part') acc |= set(self._parse_part(part)) return sorted(acc) def _parse_part(self, part): for regex, handler in self.pats: m = regex.match(part) if m: return handler(m.groups()) raise ValueError('invalid filter: %r' % part) def _expand_range(self, toks): fr = self._expand_number(toks[0]) if len(toks) > 1: to = self._expand_number(toks[1]) if to < fr: raise ValueError('invalid range') return list(range(fr, to + 1)) return [fr] def _range_steps(self, toks): if len(toks) != 3 or not toks[2]: raise ValueError('empty filter') return self._expand_range(toks[:2])[::int(toks[2])] def _star_steps(self, toks): if not toks or not toks[0]: raise ValueError('empty filter') return self._expand_star()[::int(toks[0])] def _expand_star(self, *args): return list(range(self.min_, self.max_ + self.min_ + 1)) def _expand_number(self, s): try: i = int(s) except ValueError: raise ValueError('invalid number: %r' % s) if i > self.max_: raise ValueError( 'invalid end range: {0} > {1}.'.format(i, self.max_)) if i < self.min_: raise ValueError( 'invalid beginning range: {0} < {1}.'.format(i, self.min_)) return i class crontab: def __init__(self, minute='*', hour='*', day_of_month='*', month_of_year='*', day_of_week='*'): self._orig_minute = minute self._orig_hour = hour self._orig_day_of_week = day_of_week self._orig_day_of_month = day_of_month self._orig_month_of_year = month_of_year self.hour = self._expand_spec(hour, 0, 23) self.minute = self._expand_spec(minute, 0, 59) self.day_of_week = self._expand_spec(day_of_week, 0, 6) self.day_of_month = self._expand_spec(day_of_month, 1, 31) self.month_of_year = self._expand_spec(month_of_year, 1, 12) def __repr__(self): return ('<crontab: {0._orig_minute} {0._orig_hour} ' '{0._orig_day_of_week} {0._orig_day_of_month} ' '{0._orig_month_of_year}>').format(self) @staticmethod def _expand_spec(cronspec, min_, max_): """Expand cron specification.""" if isinstance(cronspec, int): result = [cronspec] elif isinstance(cronspec, str): result = crontab_parser(min_, max_).parse(cronspec) elif isinstance(cronspec, (list, tuple, set)): result = sorted(cronspec) elif isinstance(cronspec, Iterable): result = sorted(cronspec) else: raise TypeError("Argument cronspec needs to be of any of the " "following types: int, str, or an iterable type. " "%r was given." % type(cronspec)) for number in result: if not isinstance(number, int): raise ValueError("Argument cronspec needs to be an int: " "%r was given." % type(number)) for number in [result[0], result[-1]]: if result[0] < min_ or result[0] > max_: raise ValueError( "Invalid crontab pattern. Valid range is {%d}-{%d}. " "'%r' was found." % (min_, max_, result[0])) return result def start(self, start_date=None): y = start_date.year complete, (month_of_year, day_of_month, hour, minute) = rewind( start_date.timetuple()[1:5], ( self.month_of_year, self.day_of_month, self.hour, self.minute )) if complete: y += 1 while 1: for m in month_of_year: max_d = monthrange(y, m)[1] for d in day_of_month: if d > max_d: break for h in hour: for mi in minute: yield datetime(y, m, d, h, mi) minute = self.minute hour = self.hour day_of_month = self.day_of_month month_of_year = self.month_of_year y += 1
[ [ [ 7, 9 ], [ 7338, 7340 ], [ 7438, 7440 ], [ 7525, 7527 ], [ 7623, 7625 ], [ 7708, 7710 ] ], [ [ 17, 21 ], [ 3766, 3770 ] ], [ [ 29, 35 ], [ 415, 421 ], [ 3053, 3059 ], [ 4499, 4505 ] ], [ [ 43, 50 ], [ 428, 435 ], [ 630, 637 ], [ 890, 897 ], [ 1139, 1146 ], [ 3066, 3073 ], [ 4512, 4519 ] ], [ [ 72, 82 ], [ 11968, 11978 ] ], [ [ 104, 112 ], [ 5343, 5351 ], [ 12202, 12210 ] ], [ [ 137, 145 ], [ 10739, 10747 ] ], [ [ 164, 172 ], [ 4117, 4125 ], [ 5725, 5733 ], [ 6700, 6708 ] ], [ [ 174, 181 ], [ 3494, 3501 ], [ 6665, 6672 ] ], [ [ 196, 201 ] ], [ [ 235, 246 ], [ 6315, 6326 ] ], [ [ 272, 275 ], [ 3023, 3026 ], [ 4400, 4403 ] ], [ [ 277, 283 ], [ 357, 363 ], [ 2968, 2974 ], [ 4345, 4351 ] ], [ [ 285, 291 ], [ 444, 450 ], [ 3082, 3088 ], [ 4528, 4534 ] ], [ [ 312, 327 ], [ 5085, 5100 ] ], [ [ 329, 335 ], [ 11657, 11663 ] ], [ [ 344, 356 ] ], [ [ 2957, 2967 ] ], [ [ 4336, 4344 ] ], [ [ 7064, 7078 ], [ 10569, 10583 ] ], [ [ 9418, 9425 ], [ 5147, 5154 ], [ 5594, 5601 ] ] ]
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from .api import RendezvousHandler, RendezvousParameters from .api import rendezvous_handler_registry as handler_registry from .dynamic_rendezvous import create_handler def _create_static_handler(params: RendezvousParameters) -> RendezvousHandler: from . import static_tcp_rendezvous return static_tcp_rendezvous.create_rdzv_handler(params) def _create_etcd_handler(params: RendezvousParameters) -> RendezvousHandler: from . import etcd_rendezvous return etcd_rendezvous.create_rdzv_handler(params) def _create_etcd_v2_handler(params: RendezvousParameters) -> RendezvousHandler: from .etcd_rendezvous_backend import create_backend from .etcd_store import EtcdStore backend = create_backend(params) store = EtcdStore(backend.client, "/torch/elastic/store") return create_handler(store, backend, params) def _create_c10d_handler(params: RendezvousParameters) -> RendezvousHandler: from .c10d_rendezvous_backend import create_backend backend = create_backend(params) return create_handler(backend.store, backend, params) def _register_default_handlers() -> None: handler_registry.register("etcd", _create_etcd_handler) handler_registry.register("etcd-v2", _create_etcd_v2_handler) handler_registry.register("c10d", _create_c10d_handler) handler_registry.register("static", _create_static_handler) def get_rendezvous_handler(params: RendezvousParameters) -> RendezvousHandler: """ This method is used to obtain a reference to a :py:class`RendezvousHandler`. Custom rendezvous handlers can be registered by :: from torch.distributed.elastid.rendezvous import rendezvous_handler_registry from torch.distributed.elastic.rendezvous.registry import get_rendezvous_handler def create_my_rdzv(params: RendezvousParameters): return MyCustomRdzv(params) rendezvous_handler_registry.register("my_rdzv_backend_name", create_my_rdzv) my_rdzv_handler = get_rendezvous_handler("my_rdzv_backend_name", RendezvousParameters) """ return handler_registry.create_handler(params)
[ [ [ 224, 241 ], [ 438, 455 ], [ 619, 636 ], [ 791, 808 ], [ 1116, 1133 ], [ 1644, 1661 ] ], [ [ 243, 263 ], [ 413, 433 ], [ 594, 614 ], [ 766, 786 ], [ 1091, 1111 ], [ 1619, 1639 ] ], [ [ 281, 328 ], [ 1336, 1352 ], [ 1396, 1412 ], [ 1462, 1478 ], [ 1522, 1538 ], [ 2273, 2289 ] ], [ [ 361, 375 ], [ 1017, 1031 ], [ 1241, 1255 ] ], [ [ 382, 404 ], [ 1558, 1580 ] ], [ [ 565, 585 ], [ 1370, 1390 ] ], [ [ 734, 757 ], [ 1433, 1456 ] ], [ [ 1062, 1082 ], [ 1496, 1516 ] ], [ [ 1294, 1320 ] ], [ [ 1588, 1610 ] ] ]
#!/usr/bin/env python3 from app import app, db, functions from app.functions import Color import subprocess import os import shlex import shutil from config import Config from datetime import datetime from cryptography.fernet import InvalidToken from app.cipher import CipherTest, Cipher, new_cipher_key, encrypt, decrypt from app.workspace import Workspace from app.role import Role from app.user import User from app.profile import Profile from app.list import List, Person from app.email import Email from app.page import Page from app.domain import Domain from app.campaign import Campaign, Campaignpages, WorkerCampaignSchema from app.result import Result, Form, Event from app.server import Server from app.apikey import APIKey import urllib3 # suppress insecure requests warning urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # objects to initialize 'flask shell' with @app.shell_context_processor def make_shell_context(): return { 'db': db, 'User': User, 'Profile': Profile, 'Role': Role, 'Workspace': Workspace, 'List': List, 'Person': Person, 'Email': Email, 'Page': Page, 'Domain': Domain, 'Campaign': Campaign, 'Result': Result, 'Server': Server, 'APIKey': APIKey, 'Form': Form, 'Campaignpages': Campaignpages, 'Event': Event } def init_cipher(): passphrase = '' print(f'{Color.gray}[*] redlure encrypts sensitive database fields{Color.end}') print(f'{Color.gray}[*] Enter a passphrase that will be used in generating the key\n{Color.end}') while passphrase == '': passphrase = input(f'{Color.gray}[+] Passphrase: {Color.red}').encode() print(f'\n[!] WARNING: Do not lose your passphrase - doing so will result in losing access to parts of your database{Color.end}') new_cipher_key(passphrase) input(f'\n{Color.gray}[+] Press enter to continue: {Color.end}') def get_cipher(): cipher_text = CipherTest.query.first().value str = cipher_text.decode() print(f'{Color.gray}{str[:len(str)//2]}\n{str[len(str)//2:]}{Color.end}\n') passphrase = input(f'{Color.gray}[+] Enter the cipher passphrase: {Color.red}').encode() new_cipher_key(passphrase) try: plain_text = decrypt(cipher_text) print(f'[+] {plain_text.decode()}\n{Color.end}') except InvalidToken: print(f'\n[!] Decryption failed - invalid passphrase{Color.end}') exit() def init_db(): if os.path.isdir('migrations'): shutil.rmtree('migrations') print(f'\n{Color.red}[*] Creating database{Color.end}') proc = subprocess.Popen(shlex.split('flask db init'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) proc.wait() proc = subprocess.Popen(shlex.split('flask db migrate'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) proc.wait() proc = subprocess.Popen(shlex.split('flask db upgrade'), stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) proc.wait() print(f'{Color.red}[+] Initializing database values\n{Color.end}') general_ws = Workspace(name='General') db.session.add(general_ws) db.session.commit() administrator = Role(name='redlure admin', role_type='Administrator') general_ws = Workspace.query.filter_by(id=1, name='General').first() if general_ws is not None: administrator.workspaces.append(general_ws) db.session.add(administrator) db.session.commit() admin = User(username='admin', role_id=1) admin.set_password('redlure') db.session.add(admin) db.session.commit() encrypted_val = encrypt(b'Bingo. Welcome to redlure') cipher_test = CipherTest(value=encrypted_val) db.session.add(cipher_test) db.session.commit() key = APIKey() # check for scheduled campaigns that need to be rentered into the queue def check_campaigns(): campaigns = Campaign.query.filter_by(status='Scheduled').all() for campaign in campaigns: if datetime.now() < campaign.start_time: #schema = WorkerCampaignSchema() #campaign_data = schema.dump(campaign) campaign.cast() else: campaign.status = 'Start time missed (server outage)' db.session.commit() def gen_certs(): proc = subprocess.Popen(shlex.split('openssl req -x509 -newkey rsa:4096 -nodes -subj "/" -out redlure-cert.pem -keyout redlure-key.pem -days 365')) proc.wait() def banner(): print(f''' {Color.red} .___{Color.gray}.__ {Color.end} {Color.red}_______ ____ __| _/{Color.gray}| | __ _________ ____ {Color.end} {Color.red}\_ __ \_/ __ \ / __ | {Color.gray}| | | | \_ __ \_/ __ \ {Color.end} {Color.red} | | \/\ ___// /_/ | {Color.gray}| |_| | /| | \/\ ___/ {Color.end} {Color.red} |__| \___ >____ | {Color.gray}|____/____/ |__| \___ >{Color.end} {Color.red} \/ \/ {Color.gray} \/ {Color.end} ''') if __name__ == '__main__': banner() # SECRET_KEY is required if Config.SECRET_KEY == '': print('[!] A secret key is required - set the SECRET_KEY attribute in config.py') print(f'[!] New suggested random secret key: {os.urandom(24)}') exit() # check if db exists yet if not os.path.isfile('redlure.db'): init_cipher() init_db() else: get_cipher() check_campaigns() # generate certs if they dont exist if Config.CERT_PATH == 'redlure-cert.pem' and Config.KEY_PATH == 'redlure-key.pem': if not os.path.isfile('redlure-cert.pem') or not os.path.isfile('redlure-key.pem'): gen_certs() # start the server app.logger.info('redlure-console starting up') #server = subprocess.Popen(['gunicorn', 'app:app', '-b 0.0.0.0:5000', '--certfile', Config.CERT_PATH, '--keyfile', Config.KEY_PATH]) #server.wait() app.run(host='0.0.0.0', ssl_context=(Config.CERT_PATH, Config.KEY_PATH), use_reloader=False)
[ [ [ 39, 42 ], [ 900, 903 ], [ 5758, 5761 ], [ 5965, 5968 ] ], [ [ 44, 46 ], [ 981, 983 ], [ 3157, 3159 ], [ 3188, 3190 ], [ 3444, 3446 ], [ 3478, 3480 ], [ 3583, 3585 ], [ 3609, 3611 ], [ 3742, 3744 ], [ 3774, 3776 ], [ 4274, 4276 ] ], [ [ 48, 57 ] ], [ [ 84, 89 ], [ 1462, 1467 ], [ 1520, 1525 ], [ 1546, 1551 ], [ 1622, 1627 ], [ 1694, 1699 ], [ 1722, 1727 ], [ 1865, 1870 ], [ 1926, 1931 ], [ 1967, 1972 ], [ 2093, 2098 ], [ 2145, 2150 ], [ 2186, 2191 ], [ 2231, 2236 ], [ 2379, 2384 ], [ 2478, 2483 ], [ 2610, 2615 ], [ 2642, 2647 ], [ 3051, 3056 ], [ 3096, 3101 ], [ 4512, 4517 ], [ 4546, 4551 ], [ 4585, 4590 ], [ 4602, 4607 ], [ 4636, 4641 ], [ 4675, 4680 ], [ 4688, 4693 ], [ 4722, 4727 ], [ 4761, 4766 ], [ 4773, 4778 ], [ 4807, 4812 ], [ 4846, 4851 ], [ 4858, 4863 ], [ 4892, 4897 ], [ 4931, 4936 ], [ 4943, 4948 ], [ 4977, 4982 ], [ 5017, 5022 ] ], [ [ 97, 107 ], [ 2667, 2677 ], [ 2721, 2731 ], [ 2745, 2755 ], [ 2792, 2802 ], [ 2849, 2859 ], [ 2873, 2883 ], [ 2920, 2930 ], [ 2977, 2987 ], [ 3001, 3011 ], [ 4323, 4333 ] ], [ [ 115, 117 ], [ 5284, 5286 ], [ 5358, 5360 ], [ 5629, 5631 ], [ 5671, 5673 ], [ 2529, 2531 ] ], [ [ 125, 130 ], [ 2684, 2689 ], [ 2809, 2814 ], [ 2937, 2942 ], [ 4340, 4345 ] ], [ [ 138, 144 ], [ 2566, 2572 ] ], [ [ 164, 170 ], [ 5115, 5121 ], [ 5533, 5539 ], [ 5576, 5582 ], [ 6002, 6008 ], [ 6020, 6026 ] ], [ [ 192, 200 ], [ 4019, 4027 ] ], [ [ 233, 245 ], [ 2403, 2415 ] ], [ [ 269, 279 ], [ 2018, 2028 ], [ 3706, 3716 ] ], [ [ 281, 287 ] ], [ [ 289, 303 ], [ 1883, 1897 ], [ 2257, 2271 ] ], [ [ 305, 312 ], [ 3650, 3657 ] ], [ [ 314, 321 ], [ 2314, 2321 ] ], [ [ 348, 357 ], [ 1078, 1087 ], [ 3127, 3136 ], [ 3300, 3309 ] ], [ [ 379, 383 ], [ 1051, 1055 ], [ 3229, 3233 ] ], [ [ 405, 409 ], [ 1001, 1005 ], [ 3511, 3515 ] ], [ [ 434, 441 ], [ 1026, 1033 ] ], [ [ 463, 467 ], [ 1105, 1109 ] ], [ [ 469, 475 ], [ 1129, 1135 ] ], [ [ 498, 503 ], [ 1154, 1159 ] ], [ [ 525, 529 ], [ 1177, 1181 ] ], [ [ 553, 559 ], [ 1201, 1207 ] ], [ [ 585, 593 ], [ 1229, 1237 ], [ 3926, 3934 ] ], [ [ 595, 608 ], [ 1364, 1377 ] ], [ [ 610, 630 ] ], [ [ 654, 660 ], [ 1257, 1263 ] ], [ [ 662, 666 ], [ 1333, 1337 ] ], [ [ 668, 673 ], [ 1396, 1401 ] ], [ [ 697, 703 ], [ 1283, 1289 ] ], [ [ 727, 733 ], [ 1309, 1315 ], [ 3805, 3811 ] ], [ [ 741, 748 ], [ 787, 794 ], [ 812, 819 ] ], [ [ 932, 950 ] ], [ [ 1414, 1425 ], [ 5396, 5407 ] ], [ [ 1986, 1996 ], [ 5446, 5456 ] ], [ [ 2511, 2518 ], [ 5418, 5425 ] ], [ [ 3891, 3906 ], [ 5467, 5482 ] ], [ [ 4299, 4308 ], [ 5718, 5727 ] ], [ [ 4486, 4492 ], [ 5070, 5076 ] ] ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # Software License Agreement (BSD License) # # Copyright (c) 2021, Kei Okada # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the Copyright holder. nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import argparse import sys import time import rospy import rostest import unittest from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import Select from std_msgs.msg import Float64 CLASSNAME = 'rwt_robot_monitor' class TestRwtRobotMonitor(unittest.TestCase): def sin_cb(self, msg): self.sin_msg = msg self.sin_msg_received = self.sin_msg_received + 1 def __init__(self, *args): super(TestRwtRobotMonitor, self).__init__(*args) rospy.init_node('test_rwt_robot_monitor') def setUp(self): parser = argparse.ArgumentParser() parser.add_argument('--no-headless', action='store_true', help='start webdriver with headless mode') args, unknown = parser.parse_known_args() self.sin_msg = None self.sin_msg_received = 0 rospy.Subscriber('/sin', Float64, self.sin_cb) self.url_base = rospy.get_param("url_roswww_testserver") opts = webdriver.firefox.options.Options() if not args.no_headless: opts.add_argument('-headless') self.browser = webdriver.Firefox(options=opts) self.wait = webdriver.support.ui.WebDriverWait(self.browser, 10) # maximize screen self.browser.find_element_by_tag_name("html").send_keys(Keys.F11) def tearDown(self): try: self.browser.close() self.browser.quit() except: pass def set_ros_websocket_port_settings(self): self.wait.until(EC.presence_of_element_located((By.ID, "button-ros-master-settings"))) settings = self.browser.find_element_by_id("button-ros-master-settings") self.assertIsNotNone(settings, "Object id=button-ros-master-settings not found") settings.click() self.wait.until(EC.presence_of_element_located((By.ID, "input-ros-master-uri"))) uri = self.browser.find_element_by_id("input-ros-master-uri") self.assertIsNotNone(uri, "Object id=input-ros-master-uri not found") uri.clear(); uri.send_keys('ws://localhost:9090/') self.wait.until(EC.presence_of_element_located((By.ID, "button-ros-master-connect"))) connect = self.browser.find_element_by_id("button-ros-master-connect") self.assertIsNotNone(connect, "Object id=button-ros-master-connect") connect.click() def test_rwt_robot_monitor_plotter(self): url = '%s/rwt_robot_monitor/plotter.html' % (self.url_base) rospy.logwarn("Accessing to %s" % url) self.browser.get(url) # check settings self.set_ros_websocket_port_settings() # wait for /First/pref1a topic topic_text = '' while topic_text == '': time.sleep(1) self.wait.until(EC.presence_of_element_located((By.ID, "name-select"))) topic = self.browser.find_element_by_id("name-select") self.assertIsNotNone(topic, "Object id=name-select not found") topic_text = topic.text self.assertTrue(u'/First/pref1a' in topic_text) Select(topic).select_by_value('/First/pref1a') # wait for test topic topic_text = '' while topic_text == '': time.sleep(1) self.wait.until(EC.presence_of_element_located((By.ID, "plot-field-select"))) topic = self.browser.find_element_by_id("plot-field-select") self.assertIsNotNone(topic, "Object id=plot-field-select not found") topic_text = topic.text self.assertTrue(u'test' in topic_text) Select(topic).select_by_value('test') self.wait.until(EC.presence_of_element_located((By.ID, "add-button"))) add = self.browser.find_element_by_id("add-button") self.assertIsNotNone(add, "Object id=add-button") add.click() # check plot is updated self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "g.y"))) y_axis = self.browser.find_element_by_css_selector("g.y") self.assertIsNotNone(y_axis, "Object id=y_axis") y_axis_value = y_axis.text loop = 0 y_axis_value_updated = 0 while loop < 60: loop = loop + 1 time.sleep(1) y_axis = self.browser.find_element_by_css_selector("g.y") rospy.logwarn("check if tick updated {} < {} ({})".format(y_axis_value, y_axis.text, y_axis_value_updated)) if y_axis_value != y_axis.text: y_axis_value_updated = y_axis_value_updated + 1 if y_axis_value_updated >= 2: break y_axis_value = y_axis.text self.assertNotEqual(y_axis_value, y_axis.text) if __name__ == '__main__': try: rostest.run('rwt_robot_monitor', CLASSNAME, TestRwtRobotMonitor, sys.argv) except KeyboardInterrupt: pass print("{} exiting".format(CLASSNAME))
[ [ [ 1653, 1661 ], [ 2372, 2380 ] ], [ [ 1669, 1672 ], [ 6636, 6639 ] ], [ [ 1680, 1684 ], [ 4554, 4558 ], [ 5040, 5044 ], [ 6033, 6037 ] ], [ [ 1692, 1697 ], [ 2291, 2296 ], [ 2657, 2662 ], [ 2728, 2733 ], [ 4303, 4308 ], [ 6129, 6134 ] ], [ [ 1705, 1712 ], [ 6571, 6578 ] ], [ [ 1720, 1728 ], [ 2061, 2069 ] ], [ [ 1751, 1760 ], [ 2785, 2794 ], [ 2920, 2929 ], [ 2973, 2982 ] ], [ [ 1804, 1808 ], [ 3116, 3120 ] ], [ [ 1850, 1852 ], [ 3366, 3368 ], [ 3657, 3659 ], [ 3962, 3964 ], [ 4628, 4630 ], [ 5114, 5116 ], [ 5484, 5486 ], [ 5734, 5736 ] ], [ [ 1892, 1917 ], [ 3334, 3336 ], [ 3625, 3627 ], [ 3930, 3932 ], [ 4596, 4598 ], [ 5082, 5084 ], [ 5452, 5454 ], [ 5702, 5704 ] ], [ [ 1960, 1966 ], [ 4894, 4900 ], [ 5389, 5395 ] ], [ [ 1993, 2000 ], [ 2682, 2689 ] ], [ [ 2002, 2011 ], [ 6604, 6613 ], [ 6719, 6728 ] ], [ [ 2041, 2060 ], [ 6615, 6634 ], [ 2240, 2259 ] ] ]
import unittest from almdrlib.session import Session import re MOCK_AUTH = { "authentication": { "user": { "id": "589B64BB-AE91-4FA9-A6D8-37AC6759BB5D", "account_id": "2", "created": { "at": 1443713420, "by": "693BA145-78C0-4C77-AC1A-5385461839CD" }, "modified": { "at": 1610707251, "by": "system" } }, "account": { "id": "2", "name": "Alert Logic, Inc." }, "token": "123", } } class NameSpace: def __init__(self, **kwargs): self.__dict__ = kwargs class MockResponse(): elapsed = NameSpace(total_seconds=lambda: 123) def __init__(self, code_body): (self.code, self.body) = code_body def json(self): return self.body def status_code(self): return self.code def raise_for_status(self): return None class MockSession(): def __init__(self, map): self.map = map def post(self, url): return MockResponse(self.get_status_body(url)) def request(self, method, url, **kwargs): print("URL", url) return MockResponse(self.get_status_body(url)) def get_status_body(self, url): for k, v in self.map.items(): if re.match(k, url): return v return 200, {} class TestConf(unittest.TestCase): def test_globalep(self): session = Session(global_endpoint="http://api.aesolo.com:8100") assert session.get_url("aetuner", "1234567") == "http://api.aesolo.com:8100" session = Session(global_endpoint="production") session._session = MockSession({r".*aims/v1/authenticate": (200, MOCK_AUTH), r".*residency/default/services/aetuner/endpoint": (200, {"aetuner":"api.alertlogic.com"})}) assert session.get_url("aetuner", "1234567") == "https://api.alertlogic.com" if __name__ == '__main__': unittest.main()
[ [ [ 7, 15 ], [ 1434, 1442 ], [ 2119, 2127 ] ], [ [ 45, 52 ], [ 1501, 1508 ], [ 1658, 1665 ] ], [ [ 60, 62 ], [ 1351, 1353 ] ], [ [ 64, 73 ], [ 1813, 1822 ] ], [ [ 595, 604 ], [ 709, 718 ] ], [ [ 679, 691 ], [ 1093, 1105 ], [ 1221, 1233 ] ], [ [ 985, 996 ], [ 1723, 1734 ] ], [ [ 1425, 1433 ] ] ]
# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-9 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is a reference implementation of a Matrix homeserver. """ import json import os import sys # Check that we're not running on an unsupported Python version. if sys.version_info < (3, 5): print("Synapse requires Python 3.5 or above.") sys.exit(1) # Twisted and canonicaljson will fail to import when this file is executed to # get the __version__ during a fresh install. That's OK and subsequent calls to # actually start Synapse will import these libraries fine. try: from twisted.internet import protocol from twisted.internet.protocol import Factory from twisted.names.dns import DNSDatagramProtocol protocol.Factory.noisy = False Factory.noisy = False DNSDatagramProtocol.noisy = False except ImportError: pass # Use the standard library json implementation instead of simplejson. try: from canonicaljson import set_json_library set_json_library(json) except ImportError: pass __version__ = "1.18.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when # running the packaging tox test. from synapse.util.patch_inline_callbacks import do_patch do_patch()
[ [ [ 717, 721 ], [ 1549, 1553 ] ], [ [ 729, 731 ], [ 1617, 1619 ] ], [ [ 739, 742 ], [ 812, 815 ], [ 894, 897 ] ], [ [ 1162, 1170 ], [ 1280, 1288 ] ], [ [ 1213, 1220 ], [ 1315, 1322 ] ], [ [ 1255, 1274 ], [ 1341, 1360 ] ], [ [ 1510, 1526 ], [ 1532, 1548 ] ], [ [ 1585, 1596 ] ], [ [ 1841, 1849 ], [ 1855, 1863 ] ] ]
import numpy as np import sys import gpflow import VFF from time import time from config import * dim = sys.argv[1] rep = sys.argv[2] print('vff: dimension {}, replicate {}'.format(dim, r)) # data data = np.load('data/data_dim{}_rep{}.npz'.format(dim, 0)) # full_gp def prodkern(dim): return gpflow.kernels.Prod([gpflow.kernels.Matern32(1, active_dims=[i], lengthscales=lengthscale) for i in range(dim)]) k = prodkern(dim) m = gpflow.gpr.GPR(data['Xtrain'], data['Ytrain'], kern=k) m.likelihood.variance = noise_var data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r)) marg_lik = m.compute_log_likelihood().squeeze() mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest'])) file = open("results/full.csv","a") file.write("{}, {}, {}, {}".format(dim, rep, marg_lik, mean_log_pred)) file.close() ########################## # kron results = pd.DataFrame() for dim in dimensions: a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim) k = prodkern(dim) for r in range(repeats): print('kron replicate ',r,'/',repeats) data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r)) for M in num_freqs: if (2*M-1)**dim: a, b = -0.5 * np.ones(dim), 1.5 * np.ones(dim) m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b, kerns=prodkern(dim).kern_list, likelihood=gpflow.likelihoods.Gaussian(), use_two_krons=True) m.likelihood.variance = noise_var # only optimize q(u) m.kerns.fixed = True m.likelihood.fixed = True start = time() m.optimize() marg_lik = m.compute_log_likelihood().squeeze() mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest'])) t = time() - start results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik, mean_log_pred=mean_log_pred, time=t, num_inducing=M), ignore_index=True) # do this inside the loop so we can get partial results if something crashes results.to_csv('results/kron.csv') ########################## # kron_opt results = pd.DataFrame() for dim in dimensions: a, b = -1.5 * np.ones(dim), 1.5 * np.ones(dim) k = prodkern(dim) for r in range(repeats): print('kron_opt replicate ',r,'/',repeats) data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r)) for M in num_freqs: if (2*M-1)**dim: m = VFF.vgp.VGP_kron(data['Xtrain'], data['Ytrain'], np.arange(M), a, b, kerns=k.kern_list, likelihood=gpflow.likelihoods.Gaussian(), use_two_krons=True) m.likelihood.variance = noise_var # build kronecker GP model start = time() m.optimize() marg_lik = m.compute_log_likelihood().squeeze() mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest'])) t = time() - start results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik, mean_log_pred=mean_log_pred, time=t, num_inducing=M), ignore_index=True) results.to_csv('results/kron_opt.csv') ########################## # Sparse results = pd.DataFrame() for dim in dimensions: for r in range(repeats): print('Sparse replicate ',r,'/',repeats) data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r)) num_inducing = (2*num_freqs-1)**dim for M in num_inducing: if M < 500: # build sparse GP model Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_ m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim)) m.likelihood.variance = noise_var start = time() marg_lik = m.compute_log_likelihood().squeeze() mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest'])) t = time() - start results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik, mean_log_pred=mean_log_pred, time=t, num_inducing=M), ignore_index=True) # do this inside the loop so we can get partial results if something crashes results.to_csv('results/sparse_kmeans.csv') ########################## # Sparse GP opt results = pd.DataFrame() for dim in dimensions: for r in range(repeats): print('sparse opt replicate ',r,'/',repeats) data = np.load('data/data_dim{}_rep{}.npz'.format(dim, r)) num_inducing = (2*num_freqs-1)**dim for M in num_inducing: if M < 500: # build sparse GP model Z = KMeans(n_clusters=M).fit(data['Xtrain']).cluster_centers_ m = gpflow.sgpr.SGPR(data['Xtrain'], data['Ytrain'], Z=Z, kern=prodkern(dim)) m.likelihood.variance = noise_var # only optimize Z m.kern.fixed = True m.likelihood.fixed = True start = time() m.optimize() marg_lik = m.compute_log_likelihood().squeeze() mean_log_pred = np.mean(m.predict_density(data['Xtest'], data['Ytest'])) t = time() - start results = results.append(dict(dim=dim, rep=r, marg_lik=marg_lik, mean_log_pred=mean_log_pred, time=t, num_inducing=M), ignore_index=True) # do this inside the loop so we can get partial results if something crashes results.to_csv('results/sparse_opt.csv') ########################## #
[ [ [ 7, 18 ], [ 209, 211 ], [ 565, 567 ], [ 681, 683 ], [ 965, 967 ], [ 985, 987 ], [ 1111, 1113 ], [ 1251, 1253 ], [ 1271, 1273 ], [ 1353, 1355 ], [ 1901, 1903 ], [ 2532, 2534 ], [ 2552, 2554 ], [ 2682, 2684 ], [ 2860, 2862 ], [ 3321, 3323 ], [ 3938, 3940 ], [ 4480, 4482 ], [ 5207, 5209 ], [ 5891, 5893 ] ], [ [ 26, 29 ], [ 107, 110 ], [ 125, 128 ] ], [ [ 37, 43 ], [ 469, 475 ], [ 1489, 1495 ], [ 2984, 2990 ], [ 4228, 4234 ], [ 5497, 5503 ], [ 302, 308 ], [ 323, 329 ] ], [ [ 51, 54 ], [ 1304, 1307 ], [ 2811, 2814 ] ], [ [ 73, 77 ], [ 1769, 1773 ], [ 1978, 1982 ], [ 3189, 3193 ], [ 3398, 3402 ], [ 4377, 4381 ], [ 4557, 4561 ], [ 5759, 5763 ], [ 5968, 5972 ] ], [ [ 98, 99 ], [ 190, 191 ], [ 548, 557 ], [ 613, 614 ], [ 908, 910 ], [ 935, 945 ], [ 1039, 1046 ], [ 1087, 1094 ], [ 1180, 1189 ], [ 1617, 1626 ], [ 2475, 2477 ], [ 2502, 2512 ], [ 2606, 2613 ], [ 2658, 2665 ], [ 2751, 2760 ], [ 3112, 3121 ], [ 3806, 3808 ], [ 3833, 3843 ], [ 3864, 3871 ], [ 3914, 3921 ], [ 4016, 4025 ], [ 4150, 4156 ], [ 4342, 4351 ], [ 5071, 5073 ], [ 5098, 5108 ], [ 5129, 5136 ], [ 5183, 5190 ], [ 5285, 5294 ], [ 5419, 5425 ], [ 5611, 5620 ], [ 380, 391 ] ], [ [ 101, 104 ], [ 185, 188 ], [ 252, 255 ], [ 460, 463 ], [ 608, 611 ], [ 811, 814 ] ], [ [ 119, 122 ], [ 816, 819 ] ], [ [ 202, 206 ], [ 484, 488 ], [ 500, 504 ] ], [ [ 276, 284 ], [ 451, 459 ], [ 1006, 1014 ], [ 1416, 1424 ], [ 2573, 2581 ], [ 4287, 4295 ], [ 5556, 5564 ] ], [ [ 447, 448 ], [ 521, 522 ] ], [ [ 465, 466 ], [ 524, 525 ], [ 628, 629 ], [ 689, 690 ] ], [ [ 558, 562 ], [ 707, 711 ], [ 722, 726 ] ], [ [ 617, 625 ], [ 821, 829 ] ], [ [ 665, 678 ], [ 831, 844 ] ], [ [ 739, 743 ], [ 776, 780 ], [ 848, 852 ] ], [ [ 898, 905 ], [ 2020, 2027 ] ], [ [ 928, 931 ], [ 973, 976 ], [ 993, 996 ], [ 1015, 1018 ], [ 1154, 1157 ], [ 1215, 1218 ], [ 1259, 1262 ], [ 1279, 1282 ], [ 1425, 1428 ], [ 2044, 2047 ] ], [ [ 951, 952 ] ], [ [ 954, 955 ] ], [ [ 1002, 1003 ] ], [ [ 1028, 1029 ], [ 1081, 1082 ], [ 1159, 1160 ], [ 2053, 2054 ] ], [ [ 1104, 1108 ], [ 1321, 1325 ], [ 1337, 1341 ], [ 1927, 1931 ], [ 1942, 1946 ] ], [ [ 1175, 1176 ], [ 1209, 1210 ], [ 1363, 1364 ], [ 2217, 2218 ] ], [ [ 1237, 1238 ], [ 1367, 1368 ] ], [ [ 1240, 1241 ], [ 1370, 1371 ] ], [ [ 1300, 1301 ], [ 1593, 1594 ], [ 1681, 1682 ], [ 1718, 1719 ], [ 1792, 1793 ], [ 1832, 1833 ], [ 1909, 1910 ] ], [ [ 1761, 1766 ], [ 1987, 1992 ] ], [ [ 1821, 1829 ], [ 2065, 2073 ] ], [ [ 1885, 1898 ], [ 2135, 2148 ] ], [ [ 1974, 1975 ], [ 2155, 2156 ] ], [ [ 2010, 2017 ], [ 2391, 2398 ], [ 2020, 2027 ] ], [ [ 2465, 2472 ], [ 3440, 3447 ] ], [ [ 2495, 2498 ], [ 2540, 2543 ], [ 2560, 2563 ], [ 2582, 2585 ], [ 2725, 2728 ], [ 2786, 2789 ], [ 3464, 3467 ] ], [ [ 2518, 2519 ], [ 2874, 2875 ] ], [ [ 2521, 2522 ], [ 2877, 2878 ] ], [ [ 2569, 2570 ], [ 2923, 2924 ] ], [ [ 2595, 2596 ], [ 2652, 2653 ], [ 2730, 2731 ], [ 3473, 3474 ] ], [ [ 2675, 2679 ], [ 2828, 2832 ], [ 2844, 2848 ], [ 3347, 3351 ], [ 3362, 3366 ] ], [ [ 2746, 2747 ], [ 2780, 2781 ], [ 2870, 2871 ], [ 3637, 3638 ] ], [ [ 2807, 2808 ], [ 3088, 3089 ], [ 3212, 3213 ], [ 3252, 3253 ], [ 3329, 3330 ] ], [ [ 3181, 3186 ], [ 3407, 3412 ] ], [ [ 3241, 3249 ], [ 3485, 3493 ] ], [ [ 3305, 3318 ], [ 3555, 3568 ] ], [ [ 3394, 3395 ], [ 3575, 3576 ] ], [ [ 3430, 3437 ], [ 3718, 3725 ], [ 3440, 3447 ] ], [ [ 3796, 3803 ], [ 4599, 4606 ] ], [ [ 3826, 3829 ], [ 3981, 3984 ], [ 4030, 4033 ], [ 4296, 4299 ], [ 4623, 4626 ] ], [ [ 3853, 3854 ], [ 3908, 3909 ], [ 3986, 3987 ], [ 4632, 4633 ] ], [ [ 3931, 3935 ], [ 4175, 4179 ], [ 4245, 4249 ], [ 4261, 4265 ], [ 4506, 4510 ], [ 4521, 4525 ] ], [ [ 3998, 4010 ], [ 4051, 4063 ] ], [ [ 4046, 4047 ], [ 4080, 4081 ], [ 4168, 4169 ], [ 4796, 4797 ] ], [ [ 4146, 4147 ], [ 4279, 4280 ] ], [ [ 4224, 4225 ], [ 4318, 4319 ], [ 4411, 4412 ], [ 4488, 4489 ] ], [ [ 4369, 4374 ], [ 4566, 4571 ] ], [ [ 4400, 4408 ], [ 4644, 4652 ] ], [ [ 4464, 4477 ], [ 4714, 4727 ] ], [ [ 4553, 4554 ], [ 4734, 4735 ] ], [ [ 4589, 4596 ], [ 4970, 4977 ], [ 4599, 4606 ] ], [ [ 5061, 5068 ], [ 6010, 6017 ] ], [ [ 5091, 5094 ], [ 5250, 5253 ], [ 5299, 5302 ], [ 5565, 5568 ], [ 6034, 6037 ] ], [ [ 5118, 5119 ], [ 5177, 5178 ], [ 5255, 5256 ], [ 6043, 6044 ] ], [ [ 5200, 5204 ], [ 5444, 5448 ], [ 5514, 5518 ], [ 5530, 5534 ], [ 5917, 5921 ], [ 5932, 5936 ] ], [ [ 5267, 5279 ], [ 5320, 5332 ] ], [ [ 5315, 5316 ], [ 5349, 5350 ], [ 5437, 5438 ], [ 6207, 6208 ] ], [ [ 5415, 5416 ], [ 5548, 5549 ] ], [ [ 5493, 5494 ], [ 5587, 5588 ], [ 5672, 5673 ], [ 5708, 5709 ], [ 5782, 5783 ], [ 5822, 5823 ], [ 5899, 5900 ] ], [ [ 5751, 5756 ], [ 5977, 5982 ] ], [ [ 5811, 5819 ], [ 6055, 6063 ] ], [ [ 5875, 5888 ], [ 6125, 6138 ] ], [ [ 5964, 5965 ], [ 6145, 6146 ] ], [ [ 6000, 6007 ], [ 6381, 6388 ], [ 6010, 6017 ] ] ]
import pandas as pd from datanator.util import rna_halflife_util import datetime import datanator.config.core import datetime from pymongo.collation import Collation, CollationStrength class Halflife(rna_halflife_util.RnaHLUtil): def __init__(self, cache_dir=None, server=None, src_db=None, protein_col=None, authDB=None, readPreference=None, username=None, password=None, verbose=None, max_entries=None, des_db=None, rna_col=None): """Init Args: cache_dir (:obj:`str`, optional): Cache directory for logs. Defaults to None. server (:obj:`str`, optional): MongoDB server address. Defaults to None. db (:obj:`str`, optional): Database where initial uniprot collection resides. Defaults to None. collection_str (:obj:`str`, optional): name of collection. Defaults to None. authDB (:obj:`str`, optional): MongoDB authentication database. Defaults to None. readPreference (:obj:`str`, optional): MongoDB read preference. Defaults to None. username (:obj:`str`, optional): MongoDB username. Defaults to None. password (:obj:`str`, optional): MongoDB password. Defaults to None. verbose (:obj:`bool`, optional): Wheter to display verbose messages. Defaults to None. max_entries (:obj:`int`, optional): Number of records to be processed. Defaults to None. uniprot_col_db (:obj:`int`, optional): Database to which new uniprot records will be inserted. Defaults to None. """ super().__init__(server=server, username=username, password=password, src_db=src_db, des_db=des_db, protein_col=protein_col, rna_col=rna_col, authDB=authDB, readPreference=readPreference, max_entries=max_entries, verbose=verbose) self.collation = Collation('en', strength=CollationStrength.SECONDARY) self.max_entries = max_entries self.verbose = verbose def fill_uniprot(self, url, sheet_name, usercols='B:D', skiprows=[0,1,2], insertion=True): """Fill uniprot colleciton with ordered_locus_name from excel sheet Args: url (:obj:`str`): URL for Excel sheet. sheet_name (:obj:`str`): sheet name within Excel. usecols (:obj:`int` or :obj:`list` or :obj:`str`): Return a subset of the columns. skiprows (:obj:`list`): rows to skip (0-indexed) insertion (:obj:`bool`): whether to insert new records to uniprot collection. Return: (:obj:`pandas.DataFrame`): Dataframe """ df = self.make_df(url, sheet_name, usecols=usercols, skiprows=skiprows, names=['ordered_locus_name', 'half_life', 'r_squared']) row_count = len(df.index) if insertion: for index, row in df.iterrows(): if index == self.max_entries: break if index % 10 == 0 and self.verbose: print("Inserting locus {} out of {} into uniprot collection.".format(index, row_count)) oln = row['ordered_locus_name'] self.fill_uniprot_by_oln(oln) return df def fill_rna_halflife(self, df, species): """load data into rna_halflife collection Args: df (:obj:`pandas.DataFrame`): dataframe to be loaded into the database species (:obj:`list`): species name and ncbi_id """ row_count = len(df.index) for i, row in df.iterrows(): if i == self.max_entries: break if i % 10 == 0 and self.verbose: print("Processing locus {} out {}".format(i, row_count)) halflives = {} oln = row['ordered_locus_name'] halflives['halflife'] = row['half_life'] * 60 halflives['r_squared'] = row['r_squared'] halflives['unit'] = 's' halflives['reference'] = [{'doi': '10.1093/nar/gks1019', 'pubmed_id': '23125364'}] halflives['growth_medium'] = 'Middlebrook 7H9 with the ADC supplement (Difco) and 0.05% Tween80, at 37 degree celcius.' halflives['ordered_locus_name'] = oln halflives['species'] = species[0] halflives['ncbi_taxonomy_id'] = species[1] gene_name, protein_name = self.uniprot_query_manager.get_gene_protein_name_by_oln(oln) if gene_name is not None: # record exists in uniprot collection with gene_name self.rna_hl_collection.update_one({'gene_name': gene_name}, {'$set': {'modified': datetime.datetime.utcnow()}, '$addToSet': {'halflives': halflives, 'protein_synonyms': protein_name}}, collation=self.collation, upsert=True) elif (gene_name is None and protein_name is not None and protein_name != 'Uncharacterized protein'): # record exists in uniprot collection with non-filler protein_name self.rna_hl_collection.update_one({'protein_name': protein_name}, {'$set': {'modified': datetime.datetime.utcnow(), 'gene_name': gene_name}, '$addToSet': {'halflives': halflives, 'protein_synonyms': protein_name}}, collation=self.collation, upsert=True) else: query = {'halflives.ordered_locus_name': oln} doc = self.rna_hl_collection.find_one(filter=query, collation=self.collation) if doc is not None: self.rna_hl_collection.update_one({'halflives.ordered_locus_name': oln}, {'$set': {'modified': datetime.datetime.utcnow(), 'gene_name': gene_name}, '$addToSet': {'halflives': halflives, 'protein_synonyms': protein_name}}, collation=self.collation, upsert=True) else: doc = {'halflives': [halflives], 'modified': datetime.datetime.utcnow(), 'gene_name': gene_name, 'protein_name': protein_name} self.rna_hl_collection.insert_one(doc) def main(): src_db = 'datanator' des_db = 'datanator' rna_col = 'rna_halflife' protein_col = 'uniprot' username = datanator.config.core.get_config()[ 'datanator']['mongodb']['user'] password = datanator.config.core.get_config( )['datanator']['mongodb']['password'] server = datanator.config.core.get_config( )['datanator']['mongodb']['server'] src = Halflife(server=server, src_db=src_db, protein_col=protein_col, authDB='admin', readPreference='nearest', username=username, password=password, verbose=True, max_entries=float('inf'), des_db=des_db, rna_col=rna_col) url = 'https://oup.silverchair-cdn.com/oup/backfile/Content_public/Journal/nar/41/1/10.1093/nar/gks1019/2/gks1019-nar-00676-a-2012-File003.xlsx?Expires=1578425844&Signature=ZRFUxLdn4-vaBt5gQci~0o56KqyR9nJj9i32ig5X6YcfqiJeV3obEq8leHGdDxx6w~KABgewiQ66HTB7gmuG~2GL-YgxPKYSjt17WrYMkc-0ibw6TMlTvWZZfvw-lPe~wvpmVfNEXnTbP7jHyNLu9jeJ6yhoXvgIyQtzA5PbEI1fyXEgeZzOKMltmITqL3g3APsPsagCTC66rwrBT23Aghh6D314uilT2DZHCc68MH2nyV~qAhFqIQiOj-7VTEKqkDPvPYvuE2KNKXdvW23gk100YV~58ozbt8ijRz5Gr5gPtE~f1Ab5l260EIbWHJNabMRleInJQqUIDPFN4C38PQ__&Key-Pair-Id=APKAIE5G5CRDK6RD3PGA' # df = src.fill_uniprot(url, 'Supplementary Table 1', insertion=False) # src.fill_rna_halflife(df, ['Mycobacterium tuberculosis H37Rv', 83332]) df = src.fill_uniprot(url, 'Supplementary Table 2', skiprows=list(range(0,6))) src.fill_rna_halflife(df, ['Mycolicibacterium smegmatis MC2 155', 246196]) if __name__ == '__main__': main()
[ [ [ 7, 19 ] ], [ [ 47, 64 ], [ 202, 219 ] ], [ [ 72, 80 ] ], [ [ 88, 109 ], [ 6905, 6914 ], [ 6996, 7005 ], [ 7085, 7094 ] ], [ [ 117, 125 ], [ 4682, 4690 ], [ 5344, 5352 ], [ 6114, 6122 ], [ 6600, 6608 ] ], [ [ 156, 165 ], [ 1849, 1858 ] ], [ [ 167, 184 ], [ 1874, 1891 ] ], [ [ 193, 201 ], [ 7169, 7177 ] ], [ [ 6775, 6779 ], [ 8311, 8315 ] ] ]
import os from pathlib import Path import numpy as np import pytest from jina import Flow, Document from jina.clients import Client from jina.logging.profile import TimeContext from jina.parsers import set_client_cli_parser from typing import Dict from jina import DocumentArray, Executor, requests class DumpExecutor(Executor): @requests def dump(self, docs: DocumentArray, parameters: Dict, **kwargs): shards = int(parameters['shards']) dump_path = parameters['dump_path'] shard_size = len(docs) / shards os.makedirs(dump_path, exist_ok=True) for i in range(shards): dump_file = f'{dump_path}/{i}.ndjson' docs_to_be_dumped = docs[int(i * shard_size) : int((i + 1) * shard_size)] docs_to_be_dumped.save(dump_file) class ErrorExecutor(Executor): @requests def dump(self, docs: DocumentArray, **kwargs): if len(docs) > 0: assert False class ReloadExecutor(Executor): def __init__(self, dump_path=None, *args, **kwargs): super().__init__(*args, **kwargs) # backwards compatibility assert 'dump_path' in kwargs['runtime_args'].keys() if dump_path is not None: shard_id = getattr(self.runtime_args, 'pea_id', None) shard_dump_path = os.path.join(dump_path, f'{shard_id}.ndjson') self._docs = DocumentArray.load(shard_dump_path) else: self._docs = DocumentArray() @requests def search(self, docs: DocumentArray, **kwargs): docs.clear() docs.extend(self._docs) class MergeExecutor(Executor): @requests def merge(self, docs_matrix: DocumentArray, **kwargs): merged_docs = DocumentArray() for docs in docs_matrix: merged_docs.extend(docs) return merged_docs def get_client(port): args = set_client_cli_parser().parse_args( ['--host', 'localhost', '--port', str(port)] ) return Client(args) def get_documents(count=10, emb_size=7): for i in range(count): yield Document( id=i, text=f'hello world {i}', embedding=np.random.random(emb_size), tags={'tag_field': f'tag data {i}'}, ) def path_size(dump_path): return ( sum( f.stat().st_size for f in Path(dump_path).glob('**/*') if f.is_file() ) / 1e6 ) @pytest.mark.repeat(20) @pytest.mark.parametrize('shards', [5, 3, 1]) @pytest.mark.parametrize('nr_docs', [7]) @pytest.mark.parametrize('emb_size', [10]) def test_dump_reload(tmpdir, shards, nr_docs, emb_size, times_to_index=2): """showcases using replicas + dump + rolling update with independent clients""" with Flow().add(uses=DumpExecutor, name='dump_exec').add( uses=ErrorExecutor, name='error_exec' ) as flow_dump: merge_executor = MergeExecutor if shards > 1 else None with Flow().add( uses=ReloadExecutor, name='reload_exec', replicas=2, shards=shards, uses_after=merge_executor, ) as flow_reload: for run_number in range(times_to_index): dump_path = os.path.join(tmpdir, f'dump-{run_number}') client_dbms = get_client(flow_dump.port_expose) client_query = get_client(flow_reload.port_expose) docs = list( get_documents( count=nr_docs * (run_number + 1), emb_size=emb_size, ) ) with TimeContext(f'### dumping {len(docs)} docs'): client_dbms.post( on='/dump', inputs=docs, target_peapod='dump_exec', parameters={'dump_path': dump_path, 'shards': shards}, ) print(f'### dump path size: {path_size(dump_path)} MBs') with TimeContext(f'### rolling update on {len(docs)}'): # flow object is used for ctrl requests flow_reload.rolling_update('reload_exec', dump_path) for _ in range(5): result = client_query.post( on='/search', inputs=[Document()], return_results=True ) assert len(docs) == len(result[0].docs)
[ [ [ 7, 9 ], [ 551, 553 ], [ 1309, 1311 ], [ 3286, 3288 ] ], [ [ 30, 34 ], [ 2350, 2354 ] ], [ [ 43, 54 ], [ 2159, 2161 ] ], [ [ 62, 68 ], [ 2439, 2445 ], [ 2463, 2469 ], [ 2509, 2515 ], [ 2550, 2556 ] ], [ [ 87, 91 ], [ 2761, 2765 ], [ 2964, 2968 ] ], [ [ 93, 101 ], [ 2072, 2080 ], [ 4406, 4414 ] ], [ [ 127, 133 ], [ 1975, 1981 ] ], [ [ 167, 178 ], [ 3687, 3698 ], [ 4092, 4103 ] ], [ [ 204, 225 ], [ 1868, 1889 ] ], [ [ 245, 249 ], [ 399, 403 ] ], [ [ 267, 280 ], [ 372, 385 ], [ 875, 888 ], [ 1380, 1393 ], [ 1455, 1468 ], [ 1513, 1526 ], [ 1672, 1685 ], [ 1720, 1733 ] ], [ [ 282, 290 ], [ 322, 330 ], [ 825, 833 ], [ 975, 983 ], [ 1614, 1622 ] ], [ [ 292, 300 ], [ 338, 346 ], [ 841, 849 ], [ 1477, 1485 ], [ 1630, 1638 ] ], [ [ 309, 321 ], [ 2777, 2789 ] ], [ [ 811, 824 ], [ 2831, 2844 ] ], [ [ 960, 974 ], [ 3001, 3015 ] ], [ [ 1600, 1613 ], [ 2913, 2926 ] ], [ [ 1839, 1849 ], [ 3359, 3369 ], [ 3424, 3434 ] ], [ [ 1994, 2007 ], [ 3509, 3522 ] ], [ [ 2252, 2261 ], [ 4042, 4051 ] ], [ [ 2596, 2612 ] ] ]
# (C) Datadog, Inc. 2010-2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) from threading import Thread import functools _thread_by_func = {} class TimeoutException(Exception): """ Raised when a function runtime exceeds the limit set. """ pass class ThreadMethod(Thread): """ Descendant of `Thread` class. Run the specified target method with the specified arguments. Store result and exceptions. From: https://code.activestate.com/recipes/440569/ """ def __init__(self, target, args, kwargs): Thread.__init__(self) self.setDaemon(True) self.target, self.args, self.kwargs = target, args, kwargs self.start() def run(self): try: self.result = self.target(*self.args, **self.kwargs) except Exception as e: self.exception = e else: self.exception = None def timeout(timeout): """ A decorator to timeout a function. Decorated method calls are executed in a separate new thread with a specified timeout. Also check if a thread for the same function already exists before creating a new one. Note: Compatible with Windows (thread based). """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): key = "{0}:{1}:{2}:{3}".format(id(func), func.__name__, args, kwargs) if key in _thread_by_func: # A thread for the same function already exists. worker = _thread_by_func[key] else: worker = ThreadMethod(func, args, kwargs) _thread_by_func[key] = worker worker.join(timeout) if worker.is_alive(): raise TimeoutException() del _thread_by_func[key] if worker.exception: raise worker.exception else: return worker.result return wrapper return decorator
[ [ [ 129, 135 ], [ 316, 322 ], [ 585, 591 ] ], [ [ 143, 152 ], [ 1278, 1287 ] ], [ [ 154, 169 ], [ 1443, 1458 ], [ 1550, 1565 ], [ 1663, 1678 ], [ 1819, 1834 ] ], [ [ 183, 199 ], [ 1783, 1799 ] ], [ [ 303, 315 ], [ 1614, 1626 ] ], [ [ 938, 945 ] ] ]
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from designate import exceptions from designate import objects from designate import policy from designate.scheduler.filters import base LOG = logging.getLogger(__name__) class PoolIDAttributeFilter(base.Filter): """This allows users with the correct role to specify the exact pool_id to schedule the supplied zone to. This is supplied as an attribute on the zone .. code-block:: python :emphasize-lines: 3 { "attributes": { "pool_id": "794ccc2c-d751-44fe-b57f-8894c9f5c842" }, "email": "[email protected]", "name": "example.com." } The pool is loaded to ensure it exists, and then a policy check is performed to ensure the user has the correct role. .. warning:: This should only be enabled if required, as it will raise a 403 Forbidden if a user without the correct role uses it. """ name = 'pool_id_attribute' """Name to enable in the ``[designate:central:scheduler].filters`` option list """ def filter(self, context, pools, zone): """Attempt to load and set the pool to the one provided in the Zone attributes. :param context: :class:`designate.context.DesignateContext` - Context Object from request :param pools: :class:`designate.objects.pool.PoolList` - List of pools to choose from :param zone: :class:`designate.objects.zone.Zone` - Zone to be created :return: :class:`designate.objects.pool.PoolList` -- A PoolList with containing a single pool. :raises: Forbidden, PoolNotFound """ try: if zone.attributes.get('pool_id'): pool_id = zone.attributes.get('pool_id') try: pool = self.storage.get_pool(context, pool_id) except Exception: return objects.PoolList() policy.check('zone_create_forced_pool', context, pool) if pool in pools: pools = objects.PoolList() pools.append(pool) return pools else: return pools except exceptions.RelationNotLoaded: return pools
[ [ [ 638, 652 ], [ 798, 805 ] ], [ [ 676, 686 ], [ 2884, 2894 ] ], [ [ 709, 716 ], [ 2583, 2590 ], [ 2735, 2742 ] ], [ [ 739, 745 ], [ 2618, 2624 ] ], [ [ 786, 790 ], [ 856, 860 ] ], [ [ 792, 795 ] ], [ [ 834, 855 ] ] ]
from .operation import Operation
[ [ [ 24, 33 ] ] ]
# -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### """Utilities related to the ORM.""" __all__ = ('load_code', 'load_computer', 'load_group', 'load_node') def load_entity( entity_loader=None, identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True ): # pylint: disable=too-many-arguments """ Load an entity instance by one of its identifiers: pk, uuid or label If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to automatically infer the type. :param identifier: pk (integer), uuid (string) or label (string) of a Code :param pk: pk of a Code :param uuid: uuid of a Code, or the beginning of the uuid :param label: label of a Code :param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class of the ORM class of the given entity loader. :param bool query_with_dashes: allow to query for a uuid with dashes :returns: the Code instance :raise ValueError: if none or more than one of the identifiers are supplied :raise TypeError: if the provided identifier has the wrong type :raise aiida.common.NotExistent: if no matching Code is found :raise aiida.common.MultipleObjectsError: if more than one Code was found """ from aiida.orm.utils.loaders import OrmEntityLoader, IdentifierType if entity_loader is None or not issubclass(entity_loader, OrmEntityLoader): raise TypeError(f'entity_loader should be a sub class of {type(OrmEntityLoader)}') inputs_provided = [value is not None for value in (identifier, pk, uuid, label)].count(True) if inputs_provided == 0: raise ValueError("one of the parameters 'identifier', pk', 'uuid' or 'label' has to be specified") elif inputs_provided > 1: raise ValueError("only one of parameters 'identifier', pk', 'uuid' or 'label' has to be specified") if pk is not None: if not isinstance(pk, int): raise TypeError('a pk has to be an integer') identifier = pk identifier_type = IdentifierType.ID elif uuid is not None: if not isinstance(uuid, str): raise TypeError('uuid has to be a string type') identifier = uuid identifier_type = IdentifierType.UUID elif label is not None: if not isinstance(label, str): raise TypeError('label has to be a string type') identifier = label identifier_type = IdentifierType.LABEL else: identifier = str(identifier) identifier_type = None return entity_loader.load_entity( identifier, identifier_type, sub_classes=sub_classes, query_with_dashes=query_with_dashes ) def load_code(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True): """ Load a Code instance by one of its identifiers: pk, uuid or label If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to automatically infer the type. :param identifier: pk (integer), uuid (string) or label (string) of a Code :param pk: pk of a Code :param uuid: uuid of a Code, or the beginning of the uuid :param label: label of a Code :param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class of the ORM class of the given entity loader. :param bool query_with_dashes: allow to query for a uuid with dashes :return: the Code instance :raise ValueError: if none or more than one of the identifiers are supplied :raise TypeError: if the provided identifier has the wrong type :raise aiida.common.NotExistent: if no matching Code is found :raise aiida.common.MultipleObjectsError: if more than one Code was found """ from aiida.orm.utils.loaders import CodeEntityLoader return load_entity( CodeEntityLoader, identifier=identifier, pk=pk, uuid=uuid, label=label, sub_classes=sub_classes, query_with_dashes=query_with_dashes ) def load_computer(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True): """ Load a Computer instance by one of its identifiers: pk, uuid or label If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to automatically infer the type. :param identifier: pk (integer), uuid (string) or label (string) of a Computer :param pk: pk of a Computer :param uuid: uuid of a Computer, or the beginning of the uuid :param label: label of a Computer :param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class of the ORM class of the given entity loader. :param bool query_with_dashes: allow to query for a uuid with dashes :return: the Computer instance :raise ValueError: if none or more than one of the identifiers are supplied :raise TypeError: if the provided identifier has the wrong type :raise aiida.common.NotExistent: if no matching Computer is found :raise aiida.common.MultipleObjectsError: if more than one Computer was found """ from aiida.orm.utils.loaders import ComputerEntityLoader return load_entity( ComputerEntityLoader, identifier=identifier, pk=pk, uuid=uuid, label=label, sub_classes=sub_classes, query_with_dashes=query_with_dashes ) def load_group(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True): """ Load a Group instance by one of its identifiers: pk, uuid or label If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to automatically infer the type. :param identifier: pk (integer), uuid (string) or label (string) of a Group :param pk: pk of a Group :param uuid: uuid of a Group, or the beginning of the uuid :param label: label of a Group :param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class of the ORM class of the given entity loader. :param bool query_with_dashes: allow to query for a uuid with dashes :return: the Group instance :raise ValueError: if none or more than one of the identifiers are supplied :raise TypeError: if the provided identifier has the wrong type :raise aiida.common.NotExistent: if no matching Group is found :raise aiida.common.MultipleObjectsError: if more than one Group was found """ from aiida.orm.utils.loaders import GroupEntityLoader return load_entity( GroupEntityLoader, identifier=identifier, pk=pk, uuid=uuid, label=label, sub_classes=sub_classes, query_with_dashes=query_with_dashes ) def load_node(identifier=None, pk=None, uuid=None, label=None, sub_classes=None, query_with_dashes=True): """ Load a node by one of its identifiers: pk or uuid. If the type of the identifier is unknown simply pass it without a keyword and the loader will attempt to infer the type :param identifier: pk (integer) or uuid (string) :param pk: pk of a node :param uuid: uuid of a node, or the beginning of the uuid :param label: label of a Node :param sub_classes: an optional tuple of orm classes to narrow the queryset. Each class should be a strict sub class of the ORM class of the given entity loader. :param bool query_with_dashes: allow to query for a uuid with dashes :returns: the node instance :raise ValueError: if none or more than one of the identifiers are supplied :raise TypeError: if the provided identifier has the wrong type :raise aiida.common.NotExistent: if no matching Node is found :raise aiida.common.MultipleObjectsError: if more than one Node was found """ from aiida.orm.utils.loaders import NodeEntityLoader return load_entity( NodeEntityLoader, identifier=identifier, pk=pk, uuid=uuid, label=label, sub_classes=sub_classes, query_with_dashes=query_with_dashes )
[ [ [ 669, 676 ] ], [ [ 743, 754 ], [ 4532, 4543 ], [ 5960, 5971 ], [ 7362, 7373 ], [ 8691, 8702 ] ], [ [ 3358, 3367 ] ], [ [ 4746, 4759 ] ], [ [ 6178, 6188 ] ], [ [ 7577, 7586 ] ] ]
# -*- coding: utf-8 -*- ########################################################################## # NSAp - Copyright (C) CEA, 2019 # Distributed under the terms of the CeCILL-B license, as published by # the CEA-CNRS-INRIA. Refer to the LICENSE file or to # http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html # for details. ########################################################################## """ Module that provides tools to compute class activation map. """ # Imports import logging import skimage import numpy as np import torch from torch.autograd import Variable import torch.nn.functional as func # Global parameters logger = logging.getLogger("pynet") class FeatureExtractor(object): """ Class for extracting activations and registering gradients from targetted intermediate layers. """ def __init__(self, model, target_layers): self.model = model self.target_layers = target_layers self.gradients = [] def save_gradient(self, grad): self.gradients.append(grad) def __call__(self, x): outputs = [] self.gradients = [] for name, module in self.model._modules.items(): x = module(x) if name in self.target_layers: x.register_hook(self.save_gradient) outputs += [x] return outputs, x class ModelOutputs(object): """ Class for making a forward pass, and getting: 1- the network output. 2- activations from intermeddiate targetted layers. 3- gradients from intermeddiate targetted layers. """ def __init__(self, model, target_layers): self.model = model self.feature_extractor = FeatureExtractor( self.model.features, target_layers) def get_activations_gradient(self): return self.feature_extractor.gradients def get_activations(self, x): return self.feature_extractor(x) def __call__(self, x): if hasattr(self.model, "pre"): x = self.model.pre(x) target_activations, output = self.feature_extractor(x) if hasattr(self.model, "pool"): output = self.model.pool(output) output = output.view(output.size(0), -1) output = self.model.classifier(output) return target_activations, output class GradCam(object): """ Class for computing class activation map. """ def __init__(self, model, target_layers, labels, top=1): self.model = model self.labels = labels self.top = top self.model.eval() self.extractor = ModelOutputs(self.model, target_layers) def forward(self, input): return self.model(input) def __call__(self, input): features, output = self.extractor(input) pred_prob = func.softmax(output, dim=1).data.squeeze() probs, indices = pred_prob.sort(0, True) probs = probs.data.numpy() indices = indices.data.numpy() heatmaps = {} for cnt, (prob, index) in enumerate(zip(probs, indices)): if cnt == self.top: break label = self.labels[str(index)][1] line = "{0:.3f} -> {1}".format(prob, label) logger.info(line) one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32) one_hot[0][index] = 1 one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True) one_hot = torch.sum(one_hot * output) self.model.features.zero_grad() self.model.classifier.zero_grad() one_hot.backward(retain_graph=True) gradients = self.extractor.get_activations_gradient()[-1] gradients = gradients.cpu().data.numpy() pooled_gradients = np.mean(gradients, axis=(0, 2, 3)) activations = features[-1] activations = activations.cpu().data.numpy() for cnt, weight in enumerate(pooled_gradients): activations[:, cnt] *= weight heatmap = np.mean(activations, axis=1).squeeze() heatmap = np.maximum(heatmap, 0) heatmap -= np.min(heatmap) heatmap /= np.max(heatmap) heatmap_highres = skimage.transform.resize( heatmap, input.shape[2:]) heatmaps[label] = (input, heatmap, heatmap_highres) return heatmaps
[ [ [ 498, 505 ], [ 655, 662 ] ], [ [ 513, 520 ], [ 4210, 4217 ] ], [ [ 528, 539 ], [ 3254, 3256 ], [ 3293, 3295 ], [ 3759, 3761 ], [ 4018, 4020 ], [ 4079, 4081 ], [ 4125, 4127 ], [ 4164, 4166 ] ], [ [ 547, 552 ], [ 3370, 3375 ], [ 3439, 3444 ] ], [ [ 580, 588 ], [ 3361, 3369 ] ], [ [ 596, 623 ], [ 2791, 2795 ] ], [ [ 646, 652 ], [ 3214, 3220 ] ], [ [ 690, 706 ], [ 1694, 1710 ] ], [ [ 1367, 1379 ], [ 2586, 2598 ] ], [ [ 2320, 2327 ] ] ]
# -*- coding: utf-8 -*- from PySide2 import QtCore, QtGui, QtWidgets import json import core_functions as cf import numpy as np from UI_labeled_slider import LabeledSlider class Ui_AssignGroup(object): def setupUi(self, AssignGroups): # Note: this is not how it should be done but currently I don't know # how to do it differently. This is only needed to be able to emit # signals to the main window AssignGroups.setObjectName("AssignGroups") AssignGroups.setWindowTitle("Group Assignement Dialog") AssignGroups.resize(509, 317) AssignGroups.setStyleSheet( "QWidget {\n" " background-color: rgb(44, 49, 60);\n" " color: rgb(255, 255, 255);\n" ' font: 63 10pt "Segoe UI";\n' "}\n" "QPushButton {\n" " border: 2px solid rgb(52, 59, 72);\n" " border-radius: 5px;\n" " background-color: rgb(52, 59, 72);\n" "}\n" "QPushButton:hover {\n" " background-color: rgb(57, 65, 80);\n" " border: 2px solid rgb(61, 70, 86);\n" "}\n" "QPushButton:pressed {\n" " background-color: rgb(35, 40, 49);\n" " border: 2px solid rgb(43, 50, 61);\n" "}\n" "QPushButton:checked {\n" " background-color: rgb(35, 40, 49);\n" " border: 2px solid rgb(85, 170, 255);\n" "}" "QLineEdit {\n" " border: 2px solid rgb(61, 70, 86);\n" " border-radius: 5px;\n" " background-color: rgb(52, 59, 72);\n" "}\n" "QSpinBox {\n" " border: 2px solid rgb(61, 70, 86);\n" " border-radius: 5px;\n" " background-color: rgb(52, 59, 72);\n" "}\n" "QDoubleSpinBox {\n" " border: 2px solid rgb(61, 70, 86);\n" " border-radius: 5px;\n" " background-color: rgb(52, 59, 72);\n" "}\n" ) self.verticalLayout = QtWidgets.QVBoxLayout(AssignGroups) self.verticalLayout.setContentsMargins(25, 10, 25, 10) self.verticalLayout.setObjectName("verticalLayout") # # Device settings # self.device_settings_header_label = QtWidgets.QLabel(AssignGroups) # self.device_settings_header_label.setMinimumSize(QtCore.QSize(0, 20)) # self.device_settings_header_label.setStyleSheet( # 'font: 75 bold 10pt "Segoe UI";' # ) # self.device_settings_header_label.setObjectName("device_settings_header_label") # self.verticalLayout.addWidget(self.device_settings_header_label) # self.header_line_1 = QtWidgets.QFrame() # self.header_line_1.setFrameShape(QtWidgets.QFrame.HLine) # self.header_line_1.setFrameShadow(QtWidgets.QFrame.Sunken) # self.verticalLayout.addWidget(self.header_line_1) # self.header_line_1.setStyleSheet( # "QFrame {\n" " border: 2px solid rgb(52, 59, 72);\n" "}\n" # ) # self.manualRowCountGridLayout = 1 # Define dialog in which parameters should be entered # dialog = QtWidgets.QDialog() # dialog.setWindowTitle("Group Assignement Dialog") # Select the scan that shall be evaluated if not self.include_all_scans: self.select_scan_number_label = QtWidgets.QLabel() self.select_scan_number_label.setObjectName("select_scan_number_label") self.verticalLayout.addWidget(self.select_scan_number_label) self.select_scan_number_ComboBox = QtWidgets.QComboBox() self.select_scan_number_ComboBox.setObjectName( "select_scan_number_ComboBox" ) for i in range(self.parameters["no_of_scans"]): self.select_scan_number_ComboBox.addItem(str(int(i + 1))) self.select_scan_number_ComboBox.setCurrentIndex(0) self.verticalLayout.addWidget(self.select_scan_number_ComboBox) # Select the number of groups to define self.no_groups_label = QtWidgets.QLabel() self.verticalLayout.addWidget(self.no_groups_label) self.no_groups_LabeledSlider = LabeledSlider( 1, int(np.size(np.unique(self.parameters["device_number"]))), interval=1, orientation=QtCore.Qt.Horizontal, ) self.verticalLayout.addWidget(self.no_groups_LabeledSlider) self.available_devices_label = QtWidgets.QLabel() self.verticalLayout.addWidget(self.available_devices_label) # if np.size(self.paths) == 1: # verticalLayout.addWidget(self.no_groups_LabeledSlider) # Define the group assignement fields self.group_definition_gridLayout = QtWidgets.QGridLayout() self.group_definition_gridLayout.setSpacing(10) # Group names and its container self.group_name_label = QtWidgets.QLabel() self.group_definition_gridLayout.addWidget(self.group_name_label, 1, 0, 1, 1) self.group_name_LineEdit_container = np.empty(0, dtype="object") self.group_name_LineEdit_container = np.append( self.group_name_LineEdit_container, QtWidgets.QLineEdit() ) self.group_definition_gridLayout.addWidget( self.group_name_LineEdit_container[0], 2, 0 ) # Enter device numbers and its container self.device_assignment_label = QtWidgets.QLabel() self.group_definition_gridLayout.addWidget( self.device_assignment_label, 1, 1, 1, 1 ) self.device_assignment_LineEdit_container = np.empty(0, dtype="object") self.device_assignment_LineEdit_container = np.append( self.device_assignment_LineEdit_container, QtWidgets.QLineEdit() ) self.group_definition_gridLayout.addWidget( self.device_assignment_LineEdit_container[0], 2, 1 ) # Assign a spectrum file to the group if not self.autodetect_spectrum: self.spectrum_file_label = QtWidgets.QLabel() self.group_definition_gridLayout.addWidget( self.spectrum_file_label, 1, 2, 1, 1 ) self.group_spectrum_PushButton_container = np.empty(0, dtype="object") self.group_spectrum_PushButton_container = np.append( self.group_spectrum_PushButton_container, QtWidgets.QPushButton("") ) self.group_spectrum_PushButton_container[0].setStyleSheet( "background-color: red" ) self.group_definition_gridLayout.addWidget( self.group_spectrum_PushButton_container[0], 2, 2 ) # Definition of a plotting color for the group self.group_color_label = QtWidgets.QLabel() self.group_definition_gridLayout.addWidget(self.group_color_label, 1, 3, 1, 1) self.group_colors_PushButton_container = np.empty(0, dtype="object") self.group_colors_PushButton_container = np.append( self.group_colors_PushButton_container, QtWidgets.QPushButton("") ) self.group_colors_PushButton_container[0].setStyleSheet( "background-color: " + str(self.group_color[0]) ) self.group_definition_gridLayout.addWidget( self.group_colors_PushButton_container[0], 2, 3 ) # Define the bottom pushbuttons that allows to close and save the dialog self.leave_horizontalLayout = QtWidgets.QHBoxLayout() self.close_pushButton = QtWidgets.QPushButton("Close") self.save_pushButton = QtWidgets.QPushButton("Save") self.leave_horizontalLayout.addWidget(self.close_pushButton) self.leave_horizontalLayout.addWidget(self.save_pushButton) self.verticalLayout.addLayout(self.group_definition_gridLayout) self.verticalLayout.addLayout(self.leave_horizontalLayout) self.setLayout(self.verticalLayout) self.retranslateUi(AssignGroups) QtCore.QMetaObject.connectSlotsByName(AssignGroups) def retranslateUi(self, AssignGroups): _translate = QtCore.QCoreApplication.translate AssignGroups.setWindowTitle(_translate("AssignGroups", "Assign Groups")) if not self.include_all_scans: self.select_scan_number_label.setText( _translate("AssignGroups", "Select Scan") ) self.no_groups_label.setText( _translate("AssignGroups", "Select Number of Groups") ) self.available_devices_label.setText( _translate( "AssignGroups", "Available Devices for Assignment " + str(self.parameters["device_number"]), ) ) self.group_name_label.setText(_translate("AssignGroups", "Group Name")) self.device_assignment_label.setText( _translate("AssignGroups", "Assign Devices (seperated by ,)") ) self.group_color_label.setText(_translate("AssignGroups", "Color")) if not self.autodetect_spectrum: self.spectrum_file_label.setText(_translate("AssignGroups", "Spectrum"))
[ [ [ 44, 50 ], [ 4654, 4660 ], [ 8339, 8345 ], [ 8456, 8462 ] ], [ [ 52, 57 ] ], [ [ 59, 68 ], [ 2308, 2317 ], [ 3664, 3673 ], [ 3888, 3897 ], [ 4386, 4395 ], [ 4795, 4804 ], [ 5077, 5086 ], [ 5230, 5239 ], [ 5513, 5522 ], [ 5752, 5761 ], [ 6085, 6094 ], [ 6369, 6378 ], [ 6719, 6728 ], [ 7109, 7118 ], [ 7404, 7413 ], [ 7817, 7826 ], [ 7873, 7882 ], [ 7936, 7945 ] ], [ [ 77, 81 ] ], [ [ 89, 109 ] ], [ [ 117, 128 ], [ 4551, 4553 ], [ 4559, 4561 ], [ 5381, 5383 ], [ 5454, 5456 ], [ 5939, 5941 ], [ 6019, 6021 ], [ 6567, 6569 ], [ 6650, 6652 ], [ 7264, 7266 ], [ 7341, 7343 ] ], [ [ 160, 173 ], [ 4505, 4518 ] ], [ [ 182, 196 ] ] ]
# Dictionary class Dict_word_jumbler(object): def __init__(self): self.dict = self.build_dict() def build_dict(self): """"Build a dictionary to hold all of the words/letters""" dic = {} f = open("/usr/share/dict/words", "r") word_list = f.readlines() for word in word_list: word = word.strip().lower() words = ''.join(sorted(word)) dic[words] = word return dic def unscramble(self, words): """Build a function to unscramble the letters""" for word in words: word = word.strip().lower() word_sorted = ''.join(sorted(word)) if word_sorted in self.dict: unscrambled = self.dict[word_sorted] print(unscrambled) else: return None if __name__ == '__main__': # Cartoon prompt for final jumble: # "Farley rolled on the barn floor because of his __-______." words = ['tefon', 'sokik', 'niumem', 'siconu'] jumble = Dict_word_jumbler() jumble.unscramble(words) # # "A bad way for a lawyer to learn the criminal justice system: _____ and _____." # words = ['laisa', 'laurr', 'bureek', 'prouot'] # jumble = Dict_word_jumbler() # jumble.unscramble(words) # # Cartoon prompt for final jumble: "What a dog house is: A ____ ___." # words = ['TARFD', 'JOBUM', 'TENJUK', 'LETHEM'] # jumble = Dict_word_jumbler() # jumble.unscramble(words)
[ [ [ 19, 36 ], [ 1040, 1057 ] ], [ [ 980, 985 ], [ 1082, 1087 ] ], [ [ 1031, 1037 ], [ 1064, 1070 ] ] ]
import pickle dict1 = {'Python ':90,'Java ':95,'C++ ':85} f = open('bin)file.dat','wb') pickle.dump(dict1,f) f.close()
[ [ [ 7, 13 ], [ 89, 95 ] ], [ [ 15, 20 ], [ 101, 106 ] ], [ [ 59, 60 ], [ 107, 108 ], [ 110, 111 ] ] ]
""" Testing ResGraphNet """ import datetime import numpy as np import pandas as pd import torch import os import os.path as osp import matplotlib.pyplot as plt import sys sys.path.append("..") import func.cal as cal device = "cuda:0" if torch.cuda.is_available() else "cpu" # device = "cpu" l_x = 60 # Data sequence length l_y = 1 # Label sequence length lr = 0.0001 # Learning rate weight_decay = 5e-4 epochs = 4000 hidden_dim = 64 gnn_style = "ResGraphNet" save_fig = True # Whether to save picture save_txt = False # Whether to save txt save_np = True # Whether to save np file save_model = True # Whether to save network model ratio_train = 0.5 # Proportion of training datasets fig_size = (16, 12) ts_name_all = ["cli_dash", "HadCRUT5", "temp_month", "temp_year", "elect", "traffic", "sales"] ts_name_folder = "HadCRUT5" # Name of the folder where the data resides ts_name = "HadCRUT5_global" # Name of the selected time series iv = 1 # sampling interval, used for plotting curves way = "mean" # The style of plot curves of real data and predict results x_address = osp.join("../datasets", ts_name_folder, ts_name + ".npy") x = np.load(x_address) num = x.shape[0] # The length of time series result_address = osp.join("../result", ts_name, "ResGraphNet") if not(osp.exists(result_address)): os.makedirs(result_address) num_train = int(ratio_train * num) data_train, data_test = x[:num_train], x[num_train:num] # get training dataset and test dataset len_interp = l_y + 6 data_test_ = np.array(data_test[:-l_y].tolist() + data_test[-len_interp-l_y:-l_y].tolist() + data_test[-l_y:].tolist()) # Using Graph Neural network, prepare data information x_train, y_train = cal.create_inout_sequences(data_train, l_x, l_y, style="arr") x_test, y_test = cal.create_inout_sequences(data_test_, l_x, l_y, style="arr") x_train = torch.from_numpy(x_train).float().to(device) x_test = torch.from_numpy(x_test).float().to(device) y_train = torch.from_numpy(y_train).float().to(device) y_test = torch.from_numpy(y_test).float().to(device) num_nodes = x_train.shape[0] + x_test.shape[0] num_train = x_train.shape[0] x = torch.cat((x_train, x_test), dim=0) y = torch.cat((y_train, y_test), dim=0) adm = cal.path_graph(num_nodes) # adm = cal.ts_un(num_nodes, 6) edge_index, edge_weight = cal.tran_adm_to_edge_index(adm) train_index = torch.arange(num_train, dtype=torch.long) test_index = torch.arange(num_train, num_nodes, dtype=torch.long) train_mask = cal.index_to_mask(train_index, num_nodes).to(device) test_mask = cal.index_to_mask(test_index, num_nodes).to(device) # Using ResGraphNet, predicting time series (The Proposed Network Model) model = cal.GNNTime(l_x, hidden_dim, l_y, edge_weight, gnn_style, num_nodes).to(device) criterion = torch.nn.MSELoss().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) edge_index = edge_index.to(device) start_time = datetime.datetime.now() print("Running, {}".format(gnn_style)) for epoch in range(epochs): model.train() optimizer.zero_grad() output = model(x, edge_index) output_train, y_train = output[train_mask], y[train_mask] train_loss = criterion(output_train[:, -1], y_train[:, -1]) train_loss.backward() optimizer.step() model.eval() y_test_1 = y[test_mask][:-len_interp-l_y, :] y_test_2 = y[test_mask][-l_y:, :] y_test = torch.cat((y_test_1, y_test_2), dim=0) output_test = output[test_mask][:-len_interp, :] test_loss = criterion(output_test[:, -1], y_test[:, -1]) train_true = y_train.detach().cpu().numpy()[:, -1] train_predict = output_train.detach().cpu().numpy()[:, -1] test_true = y_test.detach().cpu().numpy()[:, -1] test_predict = output_test.detach().cpu().numpy()[:, -1] r2_train = cal.get_r2_score(train_predict, train_true, axis=1) r2_test = cal.get_r2_score(test_predict, test_true, axis=1) if (epoch + 1) % 100 == 0: print("Epoch: {:05d} Loss_Train: {:.5f} Loss_Test: {:.5f} R2_Train: {:.7f} R2_Test: {:.7f}". format(epoch + 1, train_loss.item(), test_loss.item(), r2_train, r2_test)) # predict and plot future time series plot_predict = test_predict[-l_y:] plot_true = test_true[-l_y:] mse_plot = np.mean(np.square(plot_predict - plot_true)) print("mse_plot: {}".format(mse_plot)) cal.plot_spiral(plot_predict) # predict results in the coming year if save_fig: plt.savefig(osp.join(result_address, "future_predict.png")) cal.plot_spiral(plot_true) # true data in the coming year if save_fig: plt.savefig(osp.join(result_address, "future_true.png")) # calculate running time end_time = datetime.datetime.now() run_time = end_time - start_time # The running time of program # save model and numpy.file if save_model: torch.save(model, osp.join(result_address, "{}.pkl".format(gnn_style))) if save_np: np.save(osp.join(result_address, "train_true.npy"), train_true) np.save(osp.join(result_address, "test_true.npy"), test_true) np.save(osp.join(result_address, "train_predict_{}.npy".format(gnn_style)), train_predict) np.save(osp.join(result_address, "test_predict_{}.npy".format(gnn_style)), test_predict) # plot the error and results e_gnn = test_true - test_predict cal.plot_distribute(e_gnn, 40, 4, x_name="e") if save_fig: plt.savefig(osp.join(result_address, ts_name + "_" + gnn_style + "_error_distribution.png")) cal.plot_result(train_true, test_true, train_predict, test_predict, iv, way, fig_size) if save_fig: plt.savefig(osp.join(result_address, ts_name + "_" + gnn_style + ".png")) # print indicators rmse_train = cal.get_rmse(train_predict, train_true) rmse_test = cal.get_rmse(test_predict, test_true) r2_train = cal.get_r2_score(train_predict, train_true, axis=1) r2_test = cal.get_r2_score(test_predict, test_true, axis=1) print("{}: RMSE_Train={:.5f} RMSE_Test={:.5f} R2_Train={:.7f} R2_Test={:.7f}". format(gnn_style, rmse_train, rmse_test, r2_train, r2_test)) # The output results of each model are appended to the file if save_txt: info_txt_address = osp.join(result_address, "ResGraphNet_result.txt") # txt file address for saving parameter information info_df_address = osp.join(result_address, "ResGraphNet_result.csv") # csv file address for saving parameter information f = open(info_txt_address, 'a') if osp.getsize(info_txt_address) == 0: # add the name of each feature in the first line of the text f.write("gnn_style r2_test r2_train run_time l_x l_y hidden_dim lr epochs\n") f.write(str(gnn_style) + " ") f.write(str(r2_test) + " ") f.write(str(r2_train) + " ") f.write(str(run_time) + " ") f.write(str(l_x) + " ") f.write(str(l_y) + " ") f.write(str(hidden_dim) + " ") f.write(str(lr) + " ") f.write(str(epochs) + " ") f.write("\n") # Prepare for next running f.close() # close file info = np.loadtxt(info_txt_address, dtype=str) columns = info[0, :].tolist() values = info[1:, :] info_df = pd.DataFrame(values, columns=columns) info_df.to_csv(info_df_address) print() plt.show() print()
[ [ [ 35, 43 ], [ 3114, 3122 ], [ 4852, 4860 ] ], [ [ 51, 62 ], [ 1323, 1325 ], [ 1705, 1707 ], [ 4432, 4434 ], [ 4440, 4442 ], [ 5088, 5090 ], [ 5156, 5158 ], [ 5222, 5224 ], [ 5317, 5319 ], [ 7177, 7179 ] ], [ [ 70, 82 ], [ 7290, 7292 ] ], [ [ 90, 95 ], [ 240, 245 ], [ 2039, 2044 ], [ 2093, 2098 ], [ 2147, 2152 ], [ 2201, 2206 ], [ 2326, 2331 ], [ 2366, 2371 ], [ 2540, 2545 ], [ 2570, 2575 ], [ 2595, 2600 ], [ 2636, 2641 ], [ 2952, 2957 ], [ 2994, 2999 ], [ 3574, 3579 ], [ 5000, 5005 ] ], [ [ 103, 105 ], [ 1506, 1508 ] ], [ [ 113, 127 ], [ 1261, 1264 ], [ 1420, 1423 ], [ 1473, 1476 ], [ 4622, 4625 ], [ 4770, 4773 ], [ 5018, 5021 ], [ 5096, 5099 ], [ 5164, 5167 ], [ 5230, 5233 ], [ 5325, 5328 ], [ 5544, 5547 ], [ 5742, 5745 ], [ 6296, 6299 ], [ 6422, 6425 ], [ 6569, 6572 ] ], [ [ 135, 159 ], [ 4610, 4613 ], [ 4758, 4761 ], [ 5532, 5535 ], [ 5730, 5733 ], [ 7374, 7377 ] ], [ [ 168, 171 ], [ 172, 175 ] ], [ [ 201, 216 ], [ 1887, 1890 ], [ 1966, 1969 ], [ 2409, 2412 ], [ 2493, 2496 ], [ 2661, 2664 ], [ 2726, 2729 ], [ 2860, 2863 ], [ 3976, 3979 ], [ 4042, 4045 ], [ 4516, 4519 ], [ 4670, 4673 ], [ 5469, 5472 ], [ 5626, 5629 ], [ 5837, 5840 ], [ 5889, 5892 ], [ 5938, 5941 ], [ 6000, 6003 ] ], [ [ 219, 225 ], [ 2076, 2082 ], [ 2129, 2135 ], [ 2184, 2190 ], [ 2237, 2243 ], [ 2706, 2712 ], [ 2770, 2776 ], [ 2932, 2938 ], [ 2974, 2980 ], [ 3092, 3098 ] ], [ [ 294, 297 ], [ 1926, 1929 ], [ 2005, 2008 ], [ 2872, 2875 ], [ 6903, 6906 ] ], [ [ 344, 347 ], [ 1684, 1687 ], [ 1726, 1729 ], [ 1764, 1767 ], [ 1769, 1772 ], [ 1796, 1799 ], [ 1931, 1934 ], [ 2010, 2013 ], [ 2889, 2892 ], [ 3515, 3518 ], [ 3552, 3555 ], [ 4386, 4389 ], [ 4415, 4418 ], [ 6931, 6934 ] ], [ [ 395, 397 ], [ 3034, 3036 ], [ 6994, 6996 ] ], [ [ 440, 452 ], [ 3051, 3063 ] ], [ [ 460, 466 ], [ 3196, 3202 ], [ 7021, 7027 ] ], [ [ 474, 484 ], [ 2877, 2887 ], [ 6959, 6969 ] ], [ [ 490, 499 ], [ 2907, 2916 ], [ 3165, 3174 ], [ 5059, 5068 ], [ 5285, 5294 ], [ 5379, 5388 ], [ 5585, 5594 ], [ 5783, 5792 ], [ 6145, 6154 ], [ 6771, 6780 ] ], [ [ 516, 524 ], [ 4596, 4604 ], [ 4744, 4752 ], [ 5518, 5526 ], [ 5716, 5724 ] ], [ [ 575, 583 ], [ 6263, 6271 ] ], [ [ 631, 638 ], [ 5075, 5082 ] ], [ [ 689, 699 ], [ 4984, 4994 ] ], [ [ 753, 764 ], [ 1551, 1562 ] ], [ [ 819, 827 ], [ 5703, 5711 ] ], [ [ 839, 850 ] ], [ [ 934, 948 ], [ 1285, 1299 ] ], [ [ 1009, 1016 ], [ 1301, 1308 ], [ 1442, 1449 ], [ 5569, 5576 ], [ 5767, 5774 ] ], [ [ 1078, 1080 ], [ 5694, 5696 ] ], [ [ 1156, 1159 ], [ 5698, 5701 ] ], [ [ 1249, 1258 ], [ 1331, 1340 ] ], [ [ 1319, 1320 ], [ 1348, 1349 ], [ 1594, 1595 ], [ 1609, 1610 ] ], [ [ 1342, 1345 ], [ 1565, 1568 ], [ 1621, 1624 ] ], [ [ 1403, 1417 ], [ 1484, 1498 ], [ 1518, 1532 ], [ 4631, 4645 ], [ 4779, 4793 ], [ 5027, 5041 ], [ 5105, 5119 ], [ 5173, 5187 ], [ 5239, 5253 ], [ 5334, 5348 ], [ 5553, 5567 ], [ 5751, 5765 ], [ 6305, 6319 ], [ 6431, 6445 ] ], [ [ 1535, 1544 ], [ 1597, 1606 ], [ 1611, 1620 ] ], [ [ 1570, 1580 ], [ 1914, 1924 ] ], [ [ 1582, 1591 ], [ 1714, 1723 ], [ 1742, 1751 ], [ 1785, 1794 ] ], [ [ 1671, 1681 ], [ 1753, 1763 ], [ 3504, 3514 ], [ 3651, 3661 ] ], [ [ 1692, 1702 ], [ 1993, 2003 ] ], [ [ 1868, 1875 ], [ 2056, 2063 ] ], [ [ 1877, 1884 ], [ 2164, 2171 ] ], [ [ 1949, 1955 ], [ 2110, 2116 ] ], [ [ 1957, 1963 ], [ 2218, 2224 ] ], [ [ 2029, 2036 ], [ 2257, 2264 ], [ 2304, 2311 ], [ 2337, 2344 ] ], [ [ 2084, 2090 ], [ 2276, 2282 ], [ 2346, 2352 ] ], [ [ 2137, 2144 ], [ 2377, 2384 ] ], [ [ 2192, 2198 ], [ 2386, 2392 ] ], [ [ 2245, 2254 ], [ 2424, 2433 ], [ 2619, 2628 ], [ 2692, 2701 ], [ 2756, 2765 ], [ 2918, 2927 ] ], [ [ 2292, 2301 ], [ 2553, 2562 ], [ 2608, 2617 ] ], [ [ 2322, 2323 ], [ 3268, 3269 ] ], [ [ 2362, 2363 ], [ 3331, 3332 ], [ 3489, 3490 ], [ 3538, 3539 ] ], [ [ 2403, 2406 ], [ 2520, 2523 ] ], [ [ 2467, 2477 ], [ 3078, 3088 ] ], [ [ 2479, 2490 ], [ 2894, 2905 ] ], [ [ 2526, 2537 ], [ 2679, 2690 ] ], [ [ 2582, 2592 ], [ 2744, 2754 ] ], [ [ 2648, 2658 ], [ 3318, 3328 ], [ 3333, 3343 ] ], [ [ 2714, 2723 ], [ 3491, 3500 ], [ 3540, 3549 ], [ 3638, 3647 ] ], [ [ 2852, 2857 ], [ 3011, 3016 ], [ 3209, 3214 ], [ 3262, 3267 ], [ 3461, 3466 ], [ 5011, 5016 ] ], [ [ 2940, 2949 ], [ 3362, 3371 ], [ 3682, 3691 ] ], [ [ 2982, 2991 ], [ 3227, 3236 ], [ 3439, 3448 ] ], [ [ 3065, 3075 ], [ 3271, 3281 ] ], [ [ 3101, 3111 ], [ 4898, 4908 ] ], [ [ 3181, 3186 ], [ 4101, 4106 ], [ 4250, 4255 ] ], [ [ 3253, 3259 ], [ 3311, 3317 ], [ 3631, 3637 ] ], [ [ 3287, 3299 ], [ 3372, 3384 ], [ 3803, 3815 ] ], [ [ 3301, 3308 ], [ 3393, 3400 ], [ 3745, 3752 ] ], [ [ 3349, 3359 ], [ 3413, 3423 ], [ 4261, 4271 ] ], [ [ 3478, 3486 ], [ 3585, 3593 ] ], [ [ 3527, 3535 ], [ 3595, 3603 ] ], [ [ 3565, 3571 ], [ 3712, 3718 ], [ 3862, 3868 ] ], [ [ 3617, 3628 ], [ 3692, 3703 ], [ 3918, 3929 ] ], [ [ 3670, 3679 ], [ 4280, 4289 ] ], [ [ 3732, 3742 ], [ 4008, 4018 ], [ 5140, 5150 ], [ 5642, 5652 ], [ 5865, 5875 ], [ 5970, 5980 ] ], [ [ 3787, 3800 ], [ 3993, 4006 ], [ 5298, 5311 ], [ 5665, 5678 ], [ 5850, 5863 ], [ 5955, 5968 ] ], [ [ 3850, 3859 ], [ 4073, 4082 ], [ 4404, 4413 ], [ 5207, 5216 ], [ 5444, 5453 ], [ 5654, 5663 ], [ 5916, 5925 ], [ 6031, 6040 ] ], [ [ 3903, 3915 ], [ 4059, 4071 ], [ 4372, 4384 ], [ 5392, 5404 ], [ 5456, 5468 ], [ 5680, 5692 ], [ 5902, 5914 ], [ 6017, 6029 ] ], [ [ 3965, 3973 ], [ 4298, 4306 ] ], [ [ 4032, 4039 ], [ 4308, 4315 ] ], [ [ 4357, 4369 ], [ 4450, 4462 ], [ 4532, 4544 ] ], [ [ 4392, 4401 ], [ 4465, 4474 ], [ 4686, 4695 ] ], [ [ 4421, 4429 ], [ 4505, 4513 ] ], [ [ 4841, 4849 ], [ 4887, 4895 ] ], [ [ 4876, 4884 ], [ 6870, 6878 ] ], [ [ 5436, 5441 ], [ 5489, 5494 ] ], [ [ 5824, 5834 ], [ 6156, 6166 ] ], [ [ 5877, 5886 ], [ 6168, 6177 ] ], [ [ 5927, 5935 ], [ 6179, 6187 ], [ 6837, 6845 ] ], [ [ 5990, 5997 ], [ 6189, 6196 ], [ 6805, 6812 ] ], [ [ 6277, 6293 ], [ 6539, 6555 ], [ 6581, 6597 ], [ 7188, 7204 ] ], [ [ 6404, 6419 ], [ 7347, 7362 ] ], [ [ 6530, 6531 ], [ 6677, 6678 ], [ 6759, 6760 ], [ 6793, 6794 ], [ 6825, 6826 ], [ 6858, 6859 ], [ 6891, 6892 ], [ 6919, 6920 ], [ 6947, 6948 ], [ 6982, 6983 ], [ 7009, 7010 ], [ 7041, 7042 ], [ 7112, 7113 ] ], [ [ 7170, 7174 ], [ 7231, 7235 ], [ 7264, 7268 ] ], [ [ 7221, 7228 ], [ 7319, 7326 ] ], [ [ 7255, 7261 ], [ 7303, 7309 ] ], [ [ 7280, 7287 ], [ 7332, 7339 ] ] ]
import numpy as np def FNS(scores): domination = np.all(scores[:, None, :] <= scores[None, :, :], axis=2) # domination[i, j] = "i dominuje j" domination &= np.any(scores[:, None, :] < scores[None, :, :], axis=2) Nx = domination.sum(0) Pf = [] ranks = np.zeros(scores.shape[0]) r = 0 Q = np.nonzero(Nx == 0)[0] while Q.size > 0: Nx[Q] = -1 Pf.append(Q) ranks[Q] = r r += 1 for i in Q: Nx[domination[i, :]] -= 1 Q = np.nonzero(Nx == 0)[0] return Pf, ranks def crowding_distance(scores): indices = np.argsort(scores, 0) sorted_scores = np.take_along_axis(scores, indices, 0) cd = np.zeros(scores.shape[0]) for k in range(scores.shape[1]): if sorted_scores[-1, k] != sorted_scores[0, k]: cd[indices[[0, -1], k]] = np.inf cd[indices[1:-1, k]] += (sorted_scores[2:, k] - sorted_scores[:-2, k]) / ( sorted_scores[-1, k] - sorted_scores[0, k]) return cd def random_population(d, n, x_min, x_max): return np.hstack([np.random.uniform(x_min, x_max, (n, d))]) def tournament_selection(ranks, dists, n): candidates = np.random.choice(n, (n, 2), replace=True) mask = np.where( ranks[candidates[:, 0]] == ranks[candidates[:, 1]], dists[candidates[:, 0]] > dists[candidates[:, 1]], ranks[candidates[:, 0]] < ranks[candidates[:, 1]] ) result = candidates[:, 1] result[mask] = candidates[mask, 0] return result def crossover(x, p, eta): # simulated binary crossover n, d = x.shape l = n // 2 mask = np.random.random((l, d)) <= p m = np.sum(mask) mi = np.random.random(m) beta = np.where( mi < 0.5, np.power(2 * mi, 1. / (eta + 1.)), np.power(1. / (2. * (1 - mi)), 1. / (eta + 1.)) ) c1 = x[:l, :].copy() c2 = x[l:, :].copy() c1[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask] c2[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask] return np.vstack([c1, c2]) def mutation(x, x_min, x_max, p, eta): # polynomial mutation n, d = x.shape mask = np.random.random((n, d)) <= p if isinstance(x_min, np.ndarray): x_min = np.repeat(x_min[None, :], n, axis=0) x_min = x_min[mask] if isinstance(x_max, np.ndarray): x_max = np.repeat(x_max[None, :], n, axis=0) x_max = x_max[mask] m = np.sum(mask) mi = np.random.random(m) beta = np.where( mi < 0.5, np.power(2 * mi, 1. / (eta + 1.)) - 1., 1. - np.power(2. * (1 - mi), 1. / (eta + 1.)) ) y = x.copy() y[mask] = np.where( mi < 0.5, x[mask] + beta * (x[mask] - x_min), x[mask] + beta * (x_max - x[mask]) ) return y def elitist_selection(fronts, dists, to_take): taken = [] for front in fronts: if len(front) <= to_take: taken += list(front) if len(front) == to_take: break to_take -= len(front) else: indices = np.argsort(-dists[front])[:to_take] taken += list(front[indices]) break return taken def constraint_violation(constraints): n, d = constraints.shape sort_indices = np.argsort(constraints, 0) violations = np.zeros(n) for i in range(d): values, counts = np.unique(constraints[:, i], return_counts=True) # unikalne wartości są zwracane posortowane counts = np.cumsum(counts) counts = list(counts) if values[0] != 0: counts = [0] + counts for rank, (j, k) in enumerate(zip([0] + counts, counts + [len(counts)])): violations[sort_indices[j:k, i]] += rank return violations def evaluation(objective, n_constraints, population): obj_results = objective(population) constraint_values = obj_results[:, -n_constraints:] violation_measure = constraint_violation(constraint_values) scores = np.concatenate([obj_results[:, :-n_constraints], violation_measure[:, None]], 1) return scores def split_and_select(population, scores, n_f, n_inf): dists = crowding_distance(scores) mask_f = scores[:, -1] == 0 population_f = population[mask_f, :] scores_f = scores[mask_f, :] dists_f = dists[mask_f] population_inf = population[~mask_f, :] scores_inf = scores[~mask_f, :] dists_inf = dists[~mask_f] s_f = population_f.shape[0] s_inf = population_inf.shape[0] n = n_f + n_inf if s_f < n_f: to_take_f = s_f to_take_inf = n - s_f elif s_inf < n_inf: to_take_inf = s_inf to_take_f = n - s_inf else: to_take_f = n_f to_take_inf = n_inf fronts_f, ranks_f = FNS(scores_f) taken_f = elitist_selection(fronts_f, dists_f, to_take_f) fronts_inf, ranks_inf = FNS(scores_inf) taken_inf = elitist_selection(fronts_inf, dists_inf, to_take_inf) return population_f[taken_f, :], population_inf[taken_inf, :], scores_f[taken_f, :], scores_inf[taken_inf, :] def IDEA(objective, n_constraints, x_min, x_max, d, n, *args, **kwargs): population = random_population(d, n, x_min, x_max) return sub_IDEA(population, objective, n_constraints, x_min, x_max, n, *args, **kwargs) def dynamic_IDEA(objective, n_constraints, T, x_min, x_max, d, n, alpha_inf, *args, num_iterations_init, num_iterations, n_immigrants=0, **kwargs): population = random_population(d, n, x_min, x_max) print("=" * 80) print("t=0") print("=" * 80) t = 0 def round_objective(round_population): return objective(t, round_population) p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args, num_iterations=num_iterations_init, **kwargs) population_history = [p] score_history = [s] n_to_keep = n - n_immigrants n_inf = int(n_to_keep * alpha_inf) n_f = n_to_keep - n_inf for t in range(1, T): print("=" * 80) print(f"t={t}") print("=" * 80) population = p[-1, :, :] scores = s[-1, :, :] if n_immigrants > 0: population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf) immigrants = random_population(d, n_immigrants, x_min, x_max) population = np.vstack([population_f, population_inf, immigrants]) assert population.shape[0] == n p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args, num_iterations=num_iterations, **kwargs) population_history.append(p) score_history.append(s) return population_history, score_history def sub_IDEA(population, objective, n_constraints, x_min, x_max, n, alpha_inf, eta_c, eta_m, p_c, p_m, num_iterations, log_interval=10): n_inf = int(n * alpha_inf) n_f = n - n_inf populations = [] scores = evaluation(objective, n_constraints, population) scores_hist = [] fronts, ranks = FNS(scores) dists = crowding_distance(scores) def log_message(): count_f = population_f.shape[0] count_inf = population_inf.shape[0] print( f"Iteration {iter_}, " + f"#feasible: {count_f}, best: {scores_f[:, :-1].min(0) if count_f > 0 else '-'}, " + f"#infeasible: {count_inf}, best: {scores_inf.min(0) if count_inf > 0 else '-'}" ) for iter_ in range(num_iterations): parent_indices = tournament_selection(ranks, dists, n) offspring = crossover(population[parent_indices, :], p_c, eta_c) offspring = np.clip(offspring, x_min, x_max) offspring = mutation(offspring, x_min, x_max, p_m, eta_m) offspring_scores = evaluation(objective, n_constraints, offspring) population = np.vstack([population, offspring]) scores = np.vstack([scores, offspring_scores]) population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf) population = np.vstack([population_f, population_inf]) scores = np.vstack([scores_f, scores_inf]) fronts, ranks = FNS(scores) dists = crowding_distance(scores) populations.append(population.copy()) scores_hist.append(scores.copy()) if iter_ % log_interval == 0: log_message() log_message() return np.stack(populations, 0), np.stack(scores_hist, 0)
[ [ [ 7, 18 ], [ 55, 57 ], [ 167, 169 ], [ 275, 277 ], [ 319, 321 ], [ 510, 512 ], [ 602, 604 ], [ 644, 646 ], [ 692, 694 ], [ 849, 851 ], [ 1081, 1083 ], [ 1092, 1094 ], [ 1196, 1198 ], [ 1249, 1251 ], [ 1632, 1634 ], [ 1670, 1672 ], [ 1692, 1694 ], [ 1723, 1725 ], [ 1759, 1761 ], [ 1802, 1804 ], [ 2087, 2089 ], [ 2201, 2203 ], [ 2256, 2258 ], [ 2285, 2287 ], [ 2375, 2377 ], [ 2404, 2406 ], [ 2477, 2479 ], [ 2499, 2501 ], [ 2530, 2532 ], [ 2566, 2568 ], [ 2619, 2621 ], [ 2697, 2699 ], [ 3117, 3119 ], [ 3319, 3321 ], [ 3363, 3365 ], [ 3423, 3425 ], [ 3534, 3536 ], [ 4029, 4031 ], [ 6431, 6433 ], [ 7749, 7751 ], [ 7945, 7947 ], [ 7997, 7999 ], [ 8168, 8170 ], [ 8227, 8229 ], [ 8522, 8524 ], [ 8548, 8550 ] ], [ [ 25, 28 ], [ 4797, 4800 ], [ 4902, 4905 ], [ 7141, 7144 ], [ 8285, 8288 ] ], [ [ 561, 578 ], [ 4196, 4213 ], [ 7165, 7182 ], [ 8313, 8330 ] ], [ [ 1031, 1048 ], [ 5195, 5212 ], [ 5509, 5526 ], [ 6357, 6374 ] ], [ [ 1140, 1160 ], [ 7618, 7638 ] ], [ [ 1535, 1544 ], [ 7676, 7685 ] ], [ [ 2113, 2121 ], [ 7802, 7810 ] ], [ [ 2837, 2854 ], [ 4825, 4842 ], [ 4934, 4951 ] ], [ [ 3236, 3256 ], [ 3976, 3996 ] ], [ [ 3806, 3816 ], [ 7050, 7060 ], [ 7875, 7885 ] ], [ [ 4134, 4150 ], [ 6282, 6298 ], [ 8097, 8113 ] ], [ [ 5109, 5113 ] ], [ [ 5331, 5343 ] ], [ [ 6819, 6827 ], [ 5244, 5252 ], [ 5718, 5726 ], [ 6545, 6553 ] ] ]
""" Misc functions. """ import ipaddress import datetime import hashlib import json import netaddr import netifaces import os import re import requests import scapy.all as sc import subprocess import sys import threading import time import traceback import uuid import server_config IPv4_REGEX = re.compile(r'[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}') sc.conf.verb = 0 # If non empty, then only devices with the following MAC addresses with be # inspected. Do not populate this list in production. For internal testing. TEST_OUI_LIST = [ # 'd83134', # Roku # '74f61c', # Danny's Pixel phone ] # Make sure Inspector's directory exits home_dir = os.path.join(os.path.expanduser('~'), 'princeton-iot-inspector') if not os.path.isdir(home_dir): os.mkdir(home_dir) def is_ipv4_addr(value): return IPv4_REGEX.match(value) def get_user_config(): """Returns the user_config dict.""" user_config_file = os.path.join( os.path.expanduser('~'), 'princeton-iot-inspector', 'iot_inspector_config.json' ) try: with open(user_config_file) as fp: return json.load(fp) except Exception: pass while True: user_key = requests.get(server_config.NEW_USER_URL).text.strip() # Make sure we're not getting server's error messages if len(user_key) == 32: break time.sleep(1) user_key = user_key.replace('-', '') secret_salt = str(uuid.uuid4()) with open(user_config_file, 'w') as fp: config_dict = { 'user_key': user_key, 'secret_salt': secret_salt } json.dump(config_dict, fp) return config_dict class TimeoutError(Exception): pass _lock = threading.Lock() def log(*args): log_str = '[%s] ' % datetime.datetime.today() log_str += ' '.join([str(v) for v in args]) log_file_path = os.path.join( os.path.expanduser('~'), 'princeton-iot-inspector', 'iot_inspector_logs.txt' ) print(log_str) with open(log_file_path, 'a') as fp: fp.write(log_str + '\n') def get_gateway_ip(timeout=10): """Returns the IP address of the gateway.""" return get_default_route(timeout)[0] def get_host_ip(timeout=10): """Returns the host's local IP (where IoT Inspector client runs).""" return get_default_route(timeout)[2] def _get_routes(): while True: sc.conf.route.resync() routes = sc.conf.route.routes if routes: return routes time.sleep(1) def get_default_route(): """Returns (gateway_ip, iface, host_ip).""" while True: routes = _get_routes() # Look for network = 0.0.0.0, netmask = 0.0.0.0 for default_route in routes: if default_route[0] == 0 and default_route[1] == 0: #return default_route[2:5] return ('192.168.5.1', 'wlan0', '192.168.5.7') log('get_default_route: retrying') time.sleep(1) def get_network_ip_range_windows(): default_iface = get_default_route() iface_filter = default_iface[1] print(default_iface) ip_set = set() iface_ip = iface_filter.ip iface_guid = iface_filter.guid for k, v in netifaces.ifaddresses(iface_guid).items(): if v[0]['addr'] == iface_ip: netmask = v[0]['netmask'] break network = netaddr.IPAddress(iface_ip) cidr = netaddr.IPAddress(netmask).netmask_bits() subnet = netaddr.IPNetwork('{}/{}'.format(network, cidr)) return ip_set def get_network_ip_range(): return set(['192.168.5.1', '192.168.5.6', '192.168.5.14', '192.168.5.15', '192.168.5.19']) def gget_network_ip_range(): """ Gets network IP range for the default interface specified by scapy.conf.iface """ ip_set = set() default_route = get_default_route() iface_str = '' if sys.platform.startswith('win'): iface_info = sc.conf.iface iface_str = iface_info.guid else: iface_str = sc.conf.iface netmask = None for k, v in netifaces.ifaddresses(iface_str).items(): if v[0]['addr'] == default_route[2]: netmask = v[0]['netmask'] break # Netmask is None when user runs VPN. if netmask is None: return set() gateway_ip = netaddr.IPAddress(default_route[0]) cidr = netaddr.IPAddress(netmask).netmask_bits() subnet = netaddr.IPNetwork('{}/{}'.format(gateway_ip, cidr)) for ip in subnet: ip_set.add(str(ip)) print('ip_set', ip_set) 1/0 return ip_set def get_my_mac(): """Returns the MAC addr of the default route interface.""" mac_set = get_my_mac_set(iface_filter=get_default_route()[1]) return mac_set.pop() def get_my_mac_set(iface_filter=None): """Returns a set of MAC addresses of the current host.""" out_set = set() if sys.platform.startswith("win"): from scapy.arch.windows import NetworkInterface if type(iface_filter) == NetworkInterface: out_set.add(iface_filter.mac) for iface in sc.get_if_list(): if iface_filter is not None and iface != iface_filter: continue try: mac = sc.get_if_hwaddr(iface) except Exception as e: continue else: out_set.add(mac) return out_set class _SafeRunError(object): """Used privately to denote error state in safe_run().""" def __init__(self): pass def restart_upon_crash(func, args=[], kwargs={}): """Restarts func upon unexpected exception and logs stack trace.""" while True: result = safe_run(func, args, kwargs) if isinstance(result, _SafeRunError): time.sleep(1) continue return result def safe_run(func, args=[], kwargs={}): """Returns _SafeRunError() upon failure and logs stack trace.""" try: return func(*args, **kwargs) except Exception as e: err_msg = '=' * 80 + '\n' err_msg += 'Time: %s\n' % datetime.datetime.today() err_msg += 'Function: %s %s %s\n' % (func, args, kwargs) err_msg += 'Exception: %s\n' % e err_msg += str(traceback.format_exc()) + '\n\n\n' with _lock: sys.stderr.write(err_msg + '\n') log(err_msg) return _SafeRunError() def get_device_id(device_mac, host_state): device_mac = str(device_mac).lower().replace(':', '') s = device_mac + str(host_state.secret_salt) return 's' + hashlib.sha256(s.encode('utf-8')).hexdigest()[0:10] def smart_max(v1, v2): """ Returns max value even if one value is None. Python cannot compare None and int, so build a wrapper around it. """ if v1 is None: return v2 if v2 is None: return v1 return max(v1, v2) def smart_min(v1, v2): """ Returns min value even if one of the value is None. By default min(None, x) == None per Python default behavior. """ if v1 is None: return v2 if v2 is None: return v1 return min(v1, v2) def get_min_max_tuple(min_max_tuple, value): """ Returns a new min_max_tuple with value considered. For example: min_max_tuple = (2, 3) print get_min_max_tuple(min_max_tuple, 4) We get back (2, 4). """ min_v, max_v = min_max_tuple min_v = smart_min(min_v, value) max_v = smart_max(max_v, value) return (min_v, max_v) def get_oui(mac): return mac.replace(':', '').lower()[0:6] def get_os(): """Returns 'mac', 'linux', or 'windows'. Raises RuntimeError otherwise.""" os_platform = sys.platform if os_platform.startswith('darwin'): return 'mac' if os_platform.startswith('linux'): return 'linux' if os_platform.startswith('win'): return 'windows' raise RuntimeError('Unsupported operating system.') def open_browser_on_windows(url): try: subprocess.call(['start', '', url], shell=True) except Exception: pass
[ [ [ 32, 41 ] ], [ [ 50, 58 ], [ 1814, 1822 ], [ 6084, 6092 ] ], [ [ 66, 73 ], [ 6568, 6575 ] ], [ [ 81, 85 ], [ 1137, 1141 ], [ 1651, 1655 ] ], [ [ 93, 100 ], [ 3414, 3421 ], [ 3453, 3460 ], [ 3508, 3515 ], [ 4357, 4364 ], [ 4404, 4411 ], [ 4459, 4466 ] ], [ [ 108, 117 ], [ 3261, 3270 ], [ 4108, 4117 ] ], [ [ 125, 127 ], [ 669, 671 ], [ 682, 684 ], [ 741, 743 ], [ 770, 772 ], [ 941, 943 ], [ 963, 965 ], [ 1909, 1911 ], [ 1931, 1933 ] ], [ [ 135, 137 ], [ 301, 303 ] ], [ [ 145, 153 ], [ 1223, 1231 ] ], [ [ 161, 176 ], [ 364, 366 ], [ 2440, 2442 ], [ 2480, 2482 ], [ 3978, 3980 ], [ 4058, 4060 ], [ 5121, 5123 ], [ 5254, 5256 ] ], [ [ 184, 194 ], [ 8031, 8041 ] ], [ [ 202, 205 ], [ 3925, 3928 ], [ 4922, 4925 ], [ 6307, 6310 ], [ 7716, 7719 ] ], [ [ 213, 222 ], [ 1754, 1763 ] ], [ [ 230, 234 ], [ 1399, 1403 ], [ 2555, 2559 ], [ 3008, 3012 ], [ 5771, 5775 ] ], [ [ 242, 251 ], [ 6239, 6248 ] ], [ [ 259, 263 ], [ 1477, 1481 ] ], [ [ 272, 285 ], [ 1236, 1249 ] ], [ [ 288, 298 ], [ 828, 838 ] ], [ [ 533, 546 ] ], [ [ 658, 666 ], [ 755, 763 ], [ 779, 787 ] ], [ [ 795, 807 ] ], [ [ 858, 873 ] ], [ [ 1710, 1722 ] ], [ [ 1746, 1751 ], [ 6288, 6293 ] ], [ [ 1777, 1780 ], [ 2965, 2968 ], [ 6352, 6355 ] ], [ [ 2128, 2142 ] ], [ [ 2253, 2264 ] ], [ [ 2399, 2410 ], [ 2679, 2690 ] ], [ [ 2575, 2592 ], [ 2217, 2234 ], [ 2363, 2380 ], [ 3079, 3096 ], [ 3878, 3895 ], [ 4742, 4759 ] ], [ [ 3027, 3055 ] ], [ [ 3583, 3603 ] ], [ [ 3704, 3725 ] ], [ [ 4622, 4632 ] ], [ [ 4797, 4811 ], [ 4714, 4728 ] ], [ [ 5401, 5414 ], [ 5743, 5756 ], [ 6381, 6394 ] ], [ [ 5530, 5548 ] ], [ [ 5835, 5843 ], [ 5683, 5691 ] ], [ [ 6403, 6416 ] ], [ [ 6625, 6634 ], [ 7485, 7494 ] ], [ [ 6901, 6910 ], [ 7449, 7458 ] ], [ [ 7165, 7182 ] ], [ [ 7542, 7549 ] ], [ [ 7608, 7614 ] ], [ [ 7983, 8006 ] ] ]
# Project: py-trans # Author: Itz-fork import aiohttp from .language_codes import _get_full_lang_name, _get_lang_code from .errors import check_internet_connection, UnknownErrorOccurred class Async_PyTranslator: """ Async PyTranslator Class Note: Before Trying to Translate Create an instance of this with provider (Default provider is google) Providers: google - Google Translate libre - LibreTranslate Engine translate.com - translate.com Translate my_memory - MyMemory Translate translate_dict - Translate Dict Argument(s): provider - Provider of Translator. (Must be a supported provider) Example(s): async_pytranslator = Async_PyTranslator(provider="google") """ def __init__(self, provider="google"): # Checking internet connection check_internet_connection() self.providers = ["google", "libre", "translate.com", "my_memory", "translate_dict"] if provider in self.providers: self.provider = provider else: self.provider = "google" # Headers self.lheader = {"Origin": "https://libretranslate.com", "Host": "libretranslate.com", "Referer": "https://libretranslate.com/"} # aiohttp session for translation purpose self.t_session = aiohttp.ClientSession() # aiohttp session for detecting source lang (This represents the laziness of me) self.d_session = aiohttp.ClientSession() async def translate(self, text, dest_lang="en"): """ Translator Function Argument(s): text - Source Text (Text that need to be translated) dest_lang - Destination Language Example(s): await async_pytranslator.translate(text="Hi, How are you?", dest_lang="si") """ if self.provider == "google": return await self.google_translate(text, dest_lang) elif self.provider == "libre": return await self.libre_translate(text, dest_lang) elif self.provider == "translate.com": return await self.translate_com(text, dest_lang) elif self.provider == "my_memory": return await self.my_memory(text, dest_lang) elif self.provider == "translate_dict": return await self.translate_dict(text, dest_lang) else: return # Google Translate async def google_translate(self, text, dest_lang): r_url = f"https://clients5.google.com/translate_a/t?client=dict-chrome-ex&sl=auto&tl={dest_lang}&q={text}" try: async with self.t_session as tr_ses: async with tr_ses.get(r_url) as get_req: request_resp = await get_req.json() translation = "" for tr in request_resp["sentences"]: try: translation += tr["trans"] except KeyError: pass except BaseException as e: raise UnknownErrorOccurred(e) origin_text = text origin_lang = await self.get_lang_name(request_resp['src']) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "Google Translate", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": origin_text, "origin_lang": origin_lang} # Closing unwanted language detection aiohttp session await self.d_session.close() return tr_dict except Exception as e: return {"status": "failed", "error": e} # LibreTranslate async def _detect_lang(self, text, full_name=False): r_url = "https://libretranslate.com/detect" ld_data = {"q": str(text)} try: async with self.d_session as tr_ses: async with tr_ses.post(r_url, data=ld_data) as get_req: request_resp = await get_req.json() language_code = request_resp[0]["language"] except: # If can't detect the language let's think it's just english (RIP moment) language_code = "en" if full_name is False: return language_code else: return await self.get_lang_name(language_code) async def libre_translate(self, text, dest_lang): r_url = "https://libretranslate.com/translate" try: source_lang = await self._detect_lang(text=text, full_name=False) l_data = {"q": str(text), "source": source_lang, "target": dest_lang} async with self.t_session as tr_ses: async with tr_ses.post(r_url, data=l_data) as get_req: request_resp = await get_req.json() translation = request_resp["translatedText"] origin_lang = await self.get_lang_name(source_lang) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "LibreTranslate", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # Translate.com async def translate_com(self, text, dest_lang): r_url = "https://www.translate.com/translator/ajax_translate" try: source_lang = await self._detect_lang(text=text, full_name=False) tr_data = {"text_to_translate": str(text), "source_lang": source_lang, "translated_lang": dest_lang, "use_cache_only": "false"} async with self.t_session as tr_ses: async with tr_ses.post(url=r_url, data=tr_data) as get_req: request_resp = await get_req.json(content_type='text/html') translation = request_resp["translated_text"] origin_lang = await self.get_lang_name(text) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "Translate.com", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": origin_lang, "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # My Memory async def my_memory(self, text, dest_lang): r_url = "https://api.mymemory.translated.net/get" try: source_lang = await self._detect_lang(text=text, full_name=False) m_params = {"q": text, "langpair": f"{source_lang}|{dest_lang}"} async with self.t_session as tr_ses: async with tr_ses.get(r_url, params=m_params) as get_req: request_resp = await get_req.json() translation = request_resp["matches"][0]["translation"] origin_lang = await self.get_lang_name(source_lang) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "MyMemory", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # Translate Dict async def translate_dict(self, text, dest_lang): r_url = f"https://t3.translatedict.com/1.php?p1=auto&p2={dest_lang}&p3={text}" try: async with self.t_session as tr_ses: async with tr_ses.get(r_url) as get_req: request_resp = await get_req.text() origin_lang = await self._detect_lang(text=text, full_name=True) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "Translate Dict", "translation": request_resp, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # Get Language Names async def get_lang_name(self, text): if len(text) == 2: return _get_full_lang_name(text) else: if len(text) <= 3: return "Not a full language name" else: return _get_lang_code(text)
[ [ [ 46, 53 ], [ 1342, 1349 ], [ 1480, 1487 ] ], [ [ 83, 102 ], [ 8440, 8459 ] ], [ [ 104, 118 ], [ 8602, 8616 ] ], [ [ 139, 164 ], [ 865, 890 ] ], [ [ 166, 186 ], [ 3124, 3144 ] ], [ [ 195, 213 ] ] ]
# -*- coding: utf-8 -*- """API for working with saved queries for assets.""" import warnings from typing import Generator, List, Optional, Union from ...constants.api import MAX_PAGE_SIZE from ...exceptions import NotFoundError, ResponseError, ApiWarning # from ...features import Features from ...parsers.tables import tablize_sqs from ...tools import check_gui_page_size, listify from .. import json_api from ..api_endpoints import ApiEndpoints from ..mixins import ChildMixins # XXX need update saved query class SavedQuery(ChildMixins): """API object for working with saved queries for the parent asset type. Examples: Create a ``client`` using :obj:`axonius_api_client.connect.Connect` and assume ``apiobj`` is either ``client.devices`` or ``client.users`` >>> apiobj = client.devices # or client.users * Get a saved query by name: :meth:`get_by_name` * Get a saved query by UUID: :meth:`get_by_uuid` * Get a saved query by tags: :meth:`get_by_tags` * Get all saved query tags: :meth:`get_tags` * Get all saved queries: :meth:`get` * Add a saved query: :meth:`add` * Delete a saved query by name: :meth:`delete_by_name` * Delete a saved query by UUID or SQ object: :meth:`delete` See Also: * Device assets :obj:`axonius_api_client.api.assets.devices.Devices` * User assets :obj:`axonius_api_client.api.assets.users.Users` """ def get_by_name(self, value: str) -> dict: """Get a saved query by name. Examples: Get a saved query by name >>> sq = apiobj.saved_query.get_by_name(name="test") >>> sq['tags'] ['Unmanaged Devices'] >>> sq['description'][:80] 'Devices that have been seen by at least one agent or at least one endpoint manag' >>> sq['view']['fields'] [ 'adapters', 'specific_data.data.name', 'specific_data.data.hostname', 'specific_data.data.last_seen', 'specific_data.data.network_interfaces.manufacturer', 'specific_data.data.network_interfaces.mac', 'specific_data.data.network_interfaces.ips', 'specific_data.data.os.type', 'labels' ] >>> sq['view']['query']['filter'][:80] '(specific_data.data.adapter_properties == "Agent") or (specific_data.data.adapte' Args: value: name of saved query """ data = self.get() found = [x for x in data if x["name"] == value] if found: return found[0] err = f"Saved Query with name of {value!r} not found" raise NotFoundError(tablize_sqs(data=data, err=err)) def get_by_uuid(self, value: str) -> dict: """Get a saved query by uuid. Examples: Get a saved query by uuid >>> sq = apiobj.saved_query.get_by_uuid(value="5f76721ce4557d5cba93f59e") Args: value: uuid of saved query """ data = self.get() found = [x for x in data if x["uuid"] == value] if found: return found[0] err = f"Saved Query with UUID of {value!r} not found" raise NotFoundError(tablize_sqs(data=data, err=err)) def get_by_tags(self, value: Union[str, List[str]], **kwargs) -> List[dict]: """Get saved queries by tags. Examples: Get all saved queries with tagged with 'AD' >>> sqs = apiobj.saved_query.get_by_tags('AD') >>> len(sqs) 2 Get all saved queries with tagged with 'AD' or 'AWS' >>> sqs = apiobj.saved_query.get_by_tags(['AD', 'AWS']) >>> len(sqs) 5 Args: value: list of tags **kwargs: passed to :meth:`get` Raises: :exc:`NotFoundError`: if no saved queries found tagged with supplied tags """ value = listify(value) rows = self.get(**kwargs) matches = [] known = set() for row in rows: for tag in row.get("tags", []): known.add(tag) if tag in value and row not in matches: matches.append(row) if not matches: valid = "\n " + "\n ".join(sorted(list(known))) msg = f"No saved query found with tags {value!r}, valid tags:{valid}" raise NotFoundError(msg) return matches def get_tags(self, **kwargs) -> List[str]: """Get all tags for saved queries. Examples: Get all known tags for all saved queries >>> tags = apiobj.saved_query.get_tags() >>> len(tags) 19 Args: **kwargs: passed to :meth:`get` """ rows = self.get(**kwargs) tags = [y for x in rows for y in x.get("tags", [])] return sorted(list(set(tags))) def get(self, generator: bool = False) -> Union[Generator[dict, None, None], List[dict]]: """Get all saved queries. Examples: Get all saved queries >>> sqs = apiobj.saved_query.get() >>> len(sqs) 39 Args: generator: return an iterator """ gen = self.get_generator() return gen if generator else list(gen) def get_generator(self) -> Generator[dict, None, None]: """Get Saved Queries using a generator.""" offset = 0 while True: rows = self._get(offset=offset) offset += len(rows) if not rows: break for row in rows: yield row.to_dict() def add( self, name: str, query: Optional[str] = None, tags: Optional[List[str]] = None, description: Optional[str] = None, expressions: Optional[List[str]] = None, fields: Optional[Union[List[str], str]] = None, fields_manual: Optional[Union[List[str], str]] = None, fields_regex: Optional[Union[List[str], str]] = None, fields_fuzzy: Optional[Union[List[str], str]] = None, fields_default: bool = True, fields_root: Optional[str] = None, sort_field: Optional[str] = None, sort_descending: bool = True, column_filters: Optional[dict] = None, gui_page_size: Optional[int] = None, private: bool = False, always_cached: bool = False, **kwargs, ) -> dict: """Create a saved query. Examples: Create a saved query using a :obj:`axonius_api_client.api.wizards.wizard.Wizard` >>> parsed = apiobj.wizard_text.parse(content="simple hostname contains blah") >>> query = parsed["query"] >>> expressions = parsed["expressions"] >>> sq = apiobj.saved_query.add( ... name="test", ... query=query, ... expressions=expressions, ... description="meep meep", ... tags=["nyuck1", "nyuck2", "nyuck3"], ... ) Notes: Saved Queries created without expressions will not be editable using the query wizard in the GUI. Use :obj:`axonius_api_client.api.wizards.wizard.Wizard` to produce a query and it's accordant expressions for the GUI query wizard. Args: name: name of saved query description: description tags: list of tags expressions: expressions built by :obj:`axonius_api_client.api.wizards.wizard.Wizard` query: query built by GUI or the CLI query wizard fields: fields to return for each asset (will be validated) fields_manual: fields to return for each asset (will NOT be validated) fields_regex: regex of fields to return for each asset fields_fuzzy: string to fuzzy match of fields to return for each asset fields_default: include the default fields defined in the parent asset object fields_root: include all fields of an adapter that are not complex sub-fields sort_field: sort the returned assets on a given field sort_descending: reverse the sort of the returned assets column_filters: column filters keyed as field_name:value gui_page_size: show N rows per page in GUI private: make this saved query private to current user """ query_expr: Optional[str] = kwargs.get("query_expr", None) or query gui_page_size = check_gui_page_size(size=gui_page_size) fields = self.parent.fields.validate( fields=fields, fields_manual=fields_manual, fields_regex=fields_regex, fields_default=fields_default, fields_root=fields_root, fields_fuzzy=fields_fuzzy, ) if sort_field: sort_field = self.parent.fields.get_field_name(value=sort_field) data_column_filters = {} if column_filters: for col_field, col_value in column_filters.items(): col_field = self.parent.fields.get_field_name(value=col_field) data_column_filters[col_field] = col_value dmeta = {} # TBD dmeta["enforcementFilter"] = None # TBD dmeta["uniqueAdapters"] = False # TBD data_query = {} data_query["filter"] = query or "" if query_expr: data_query["onlyExpressionsFilter"] = query_expr data_query["expressions"] = expressions or [] data_query["search"] = None # TBD data_query["meta"] = dmeta # TBD data_sort = {} data_sort["desc"] = sort_descending data_sort["field"] = sort_field or "" data_view = {} data_view["query"] = data_query data_view["sort"] = data_sort data_view["fields"] = fields data_view["pageSize"] = gui_page_size # 4.5 SEMI_BREAKING_CHANGE: now a list of dict # data_view["colFilters"] = listify(data_column_filters or {}) if data_column_filters: msg = f"Column filters structure has changed and is currently not supported by the API client." warnings.warn(message=msg, category=ApiWarning) # 4.5 SEMI_BREAKING_CHANGE: now a list of dict # data_view["colExcludedAdapters"] = listify({}) # TBD # data = {} # data["name"] = name # data["query_type"] = "saved" # data["description"] = description # data["view"] = data_view # data["tags"] = tags or [] # data["private"] = private added = self._add( name=name, description=description, view=data_view, private=private, always_cached=always_cached, tags=tags, ) return self.get_by_uuid(value=added.id) def delete_by_name(self, value: str, **kwargs) -> dict: """Delete a saved query by name. Examples: Delete the saved query by name >>> deleted = apiobj.saved_query.delete_by_name(name="test") Args: value: name of saved query to delete **kwargs: passed to :meth:`get_by_name` """ row = self.get_by_name(value=value, **kwargs) self._delete(uuid=row["uuid"]) return row def delete(self, rows: Union[str, List[str], List[dict]]) -> List[str]: """Delete saved queries. Args: rows: list of UUIDs or rows previously fetched saved queries to delete """ rows = listify(rows) deleted = [] for row in rows: uuid = row["uuid"] if isinstance(row, dict) else row self._delete(uuid=uuid) deleted.append(uuid) return deleted def _add( self, name: str, view: dict, description: Optional[str] = "", always_cached: bool = False, private: bool = False, tags: Optional[List[str]] = None, ) -> str: """Direct API method to create a saved query. Args: data: saved query metadata """ api_endpoint = ApiEndpoints.saved_queries.create request_obj = api_endpoint.load_request( name=name, view=view, description=description, always_cached=always_cached, private=private, tags=tags or [], ) return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE ) def _delete(self, uuid: str) -> json_api.generic.Metadata: """Direct API method to delete saved queries. Args: ids: list of uuid's to delete """ # NEW_IN: 05/31/21 cortex/develop try: api_endpoint = ApiEndpoints.saved_queries.delete request_obj = api_endpoint.load_request() return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE, uuid=uuid, ) except ResponseError as exc: if exc.is_incorrect_type: api_endpoint = ApiEndpoints.saved_queries.delete_4_3 request_obj = api_endpoint.load_request() return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE, uuid=uuid, ) raise def _get( self, limit: int = MAX_PAGE_SIZE, offset: int = 0 ) -> List[json_api.saved_queries.SavedQuery]: """Direct API method to get all users. Args: limit: limit to N rows per page offset: start at row N """ api_endpoint = ApiEndpoints.saved_queries.get request_obj = api_endpoint.load_request(page={"limit": limit, "offset": offset}) return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE )
[ [ [ 84, 92 ], [ 10387, 10395 ] ], [ [ 113, 122 ], [ 5094, 5103 ], [ 5494, 5503 ] ], [ [ 124, 128 ], [ 3441, 3445 ], [ 3416, 3420 ], [ 4616, 4620 ], [ 5123, 5127 ], [ 5911, 5915 ], [ 6003, 6007 ], [ 6053, 6057 ], [ 6116, 6120 ], [ 6178, 6182 ], [ 6240, 6244 ], [ 11606, 11610 ], [ 11579, 11583 ], [ 11590, 11594 ], [ 12192, 12196 ], [ 13911, 13915 ] ], [ [ 130, 138 ], [ 5866, 5874 ], [ 5902, 5910 ], [ 5951, 5959 ], [ 5994, 6002 ], [ 6038, 6046 ], [ 6101, 6109 ], [ 6163, 6171 ], [ 6225, 6233 ], [ 6323, 6331 ], [ 6365, 6373 ], [ 6449, 6457 ], [ 6495, 6503 ], [ 8629, 8637 ], [ 12081, 12089 ], [ 12183, 12191 ] ], [ [ 140, 145 ], [ 3405, 3410 ], [ 5088, 5093 ], [ 6047, 6052 ], [ 6110, 6115 ], [ 6172, 6177 ], [ 6234, 6239 ], [ 11568, 11573 ] ], [ [ 176, 189 ], [ 13871, 13884 ] ], [ [ 216, 229 ], [ 2776, 2789 ], [ 3324, 3337 ], [ 4537, 4550 ] ], [ [ 231, 244 ], [ 13381, 13394 ] ], [ [ 246, 256 ], [ 10423, 10433 ] ], [ [ 322, 333 ], [ 2790, 2801 ], [ 3338, 3349 ] ], [ [ 355, 374 ], [ 8709, 8728 ] ], [ [ 376, 383 ], [ 4061, 4068 ], [ 11775, 11782 ] ], [ [ 399, 407 ], [ 12827, 12835 ], [ 13916, 13924 ] ], [ [ 436, 448 ], [ 12368, 12380 ], [ 13059, 13071 ], [ 13472, 13484 ], [ 14128, 14140 ] ], [ [ 470, 481 ], [ 531, 542 ] ], [ [ 520, 530 ] ] ]
""" Miscellaneous package utilities. .. include:: ../include/links.rst """ from itertools import chain, combinations from IPython import embed import numpy def all_subclasses(cls): """ Collect all the subclasses of the provided class. The search follows the inheritance to the highest-level class. Intermediate base classes are included in the returned set, but not the base class itself. Thanks to: https://stackoverflow.com/questions/3862310/how-to-find-all-the-subclasses-of-a-class-given-its-name Args: cls (object): The base class Returns: :obj:`set`: The unique set of derived classes, including any intermediate base classes in the inheritance thread. """ return set(cls.__subclasses__()).union( [s for c in cls.__subclasses__() for s in all_subclasses(c)]) def string_table(tbl, delimeter='print', has_header=True): """ Provided the array of data, format it with equally spaced columns and add a header (first row) and contents delimeter. Args: tbl (`numpy.ndarray`_): Array of string representations of the data to print. delimeter (:obj:`str`, optional): If the first row in the table containts the column headers (see ``has_header``), this sets the delimeter between first table row and the column data. Use ``'print'`` for a simple line of hyphens, anything else results in an ``rst`` style table formatting. has_header (:obj:`bool`, optional): The first row in ``tbl`` contains the column headers. Returns: :obj:`str`: Single long string with the data table. """ nrows, ncols = tbl.shape col_width = [numpy.amax([len(dij) for dij in dj]) for dj in tbl.T] _nrows = nrows start = 1 if delimeter != 'print': _nrows += 2 start += 1 if has_header: _nrows += 1 start += 1 row_string = ['']*_nrows for i in range(start,nrows+start-1): row_string[i] = ' '.join([tbl[1+i-start,j].ljust(col_width[j]) for j in range(ncols)]) if delimeter == 'print': # Heading row row_string[0] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)]) # Delimiter if has_header: row_string[1] = '-'*len(row_string[0]) return '\n'.join(row_string)+'\n' # For an rst table row_string[0] = ' '.join([ '='*col_width[j] for j in range(ncols)]) row_string[1] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)]) if has_header: row_string[2] = row_string[0] row_string[-1] = row_string[0] return '\n'.join(row_string)+'\n' def powerset(iterable, reverse=False): """" Construct an iterable that steps through all combinations of the provided iterable. This is pulled from the recipes provided by the itertools documentation. Examples: Get all unique combinations of the list [1,2,3]: >>> list(powerset([1,2,3])) [() (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)] Args: iterable (iterable): An iterable object reverse (:obj:`bool`, optional): Reverse the order (only roughly) of the iterable by placing the longer sequences first. Returns: `itertools.chain`: Iterable object that returns the sequence of combinations. """ rng = range(len(iterable)+1)[::-1] if reverse else range(len(iterable)+1) return chain.from_iterable(combinations(iterable, r) for r in rng) def polygon_winding_number(polygon, point): """ Determine the winding number of a 2D polygon about a point. The code does **not** check if the polygon is simple (no interesecting line segments). Algorithm taken from Numerical Recipes Section 21.4. Args: polygon (`numpy.ndarray`_): An Nx2 array containing the x,y coordinates of a polygon. The points should be ordered either counter-clockwise or clockwise. point (`numpy.ndarray`_): One or more points for the winding number calculation. Must be either a 2-element array for a single (x,y) pair, or an Nx2 array with N (x,y) points. Returns: :obj:`int`, `numpy.ndarray`_: The winding number of each point with respect to the provided polygon. Points inside the polygon have winding numbers of 1 or -1; see :func:`point_inside_polygon`. Raises: ValueError: Raised if ``polygon`` is not 2D, if ``polygon`` does not have two columns, or if the last axis of ``point`` does not have 2 and only 2 elements. """ # Check input shape is for 2D only if len(polygon.shape) != 2: raise ValueError('Polygon must be an Nx2 array.') if polygon.shape[1] != 2: raise ValueError('Polygon must be in two dimensions.') _point = numpy.atleast_2d(point) if _point.shape[1] != 2: raise ValueError('Point must contain two elements.') # Get the winding number nvert = polygon.shape[0] npnt = _point.shape[0] dl = numpy.roll(polygon, 1, axis=0)[None,:,:] - _point[:,None,:] dr = polygon[None,:,:] - point[:,None,:] dx = dl[...,0]*dr[...,1] - dl[...,1]*dr[...,0] indx_l = dl[...,1] > 0 indx_r = dr[...,1] > 0 wind = numpy.zeros((npnt, nvert), dtype=int) wind[indx_l & numpy.logical_not(indx_r) & (dx < 0)] = -1 wind[numpy.logical_not(indx_l) & indx_r & (dx > 0)] = 1 return numpy.sum(wind, axis=1)[0] if point.ndim == 1 else numpy.sum(wind, axis=1) def point_inside_polygon(polygon, point): """ Determine if one or more points is inside the provided polygon. Primarily a wrapper for :func:`polygon_winding_number`, that returns True for each point that is inside the polygon. Args: polygon (`numpy.ndarray`_): An Nx2 array containing the x,y coordinates of a polygon. The points should be ordered either counter-clockwise or clockwise. point (`numpy.ndarray`_): One or more points for the winding number calculation. Must be either a 2-element array for a single (x,y) pair, or an Nx2 array with N (x,y) points. Returns: :obj:`bool`, `numpy.ndarray`: Boolean indicating whether or not each point is within the polygon. """ return numpy.absolute(polygon_winding_number(polygon, point)) == 1
[ [ [ 99, 104 ], [ 3550, 3555 ] ], [ [ 106, 118 ], [ 3570, 3582 ] ], [ [ 140, 145 ] ], [ [ 155, 160 ], [ 1754, 1759 ], [ 5000, 5005 ], [ 5210, 5215 ], [ 5433, 5438 ], [ 5489, 5494 ], [ 5541, 5546 ], [ 5604, 5609 ], [ 5655, 5660 ], [ 6501, 6506 ] ], [ [ 167, 181 ], [ 845, 859 ] ], [ [ 871, 883 ] ], [ [ 2728, 2736 ] ], [ [ 3616, 3638 ], [ 6516, 6538 ] ], [ [ 5685, 5705 ] ] ]
import math import tensorflow as tf import os import struct import pdb import numpy as np from datasets import dataset_factory from nets import nets_factory import nets.resnet_v2 as resnet_v2 from preprocessing import preprocessing_factory slim = tf.contrib.slim def merge_predictions(predictions_fn): ''' Merge predictions/logit scores for products that are the same. ''' out_f = open(predictions_fn + '_merged', 'wb') f = open(predictions_fn, 'r') line = f.readline().strip().split() curr_id = line[0] curr_scores = np.power(np.array([float(x) for x in line[1:]]), 3) num_elems = 1 line = f.readline().strip().split() while line != []: id = line[0] # raise elements to the third power, and then take the cubic root scores = np.power(np.array([float(x) for x in line[1:]]), 3) if id == curr_id: num_elems += 1 curr_scores += scores else: curr_scores = np.cbrt(curr_scores / float(num_elems)) for score in curr_scores: out_f.write(struct.pack('>f', score)) curr_scores = scores num_elems = 1 curr_id = id line = f.readline().strip().split() curr_scores = np.cbrt(curr_scores / float(num_elems)) for score in curr_scores: out_f.write(struct.pack('>f', score)) out_f.close() f.close() if __name__ == '__main__': checkpoint_dir = '/home/shunan/Code/Data/cdiscount/training' dataset_dir = '/home/shunan/Code/Data/cdiscount/tf_records' num_classes = 5270 image_size = 180 batch_size = 100 set_name = 'validation' data_sizes = {'train': 12195682, 'validation': 175611, 'test': 3095080} out_fn = os.path.join(dataset_dir, '{}_predictions.txt'.format(set_name)) checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir) # loading the dataset dataset = dataset_factory.get_dataset('cdiscount', set_name, dataset_dir) # dataset provider to load data from the dataset. provider = slim.dataset_data_provider.DatasetDataProvider(dataset, shuffle=False, common_queue_capacity=2*batch_size, common_queue_min=batch_size) [image, label, product_id] = provider.get(['image', 'label', 'product_id']) # Pre-processing step. image_preprocessing_fn = preprocessing_factory.get_preprocessing('simple', is_training=False) image = image_preprocessing_fn(image, image_size, image_size) images, labels, product_ids = tf.train.batch([image, label, product_id], batch_size=batch_size, num_threads=1, capacity=5 * batch_size) # Get the model # network_fn = nets_factory.get_network_fn('resnet_v2_152', num_classes=num_classes, is_training=False) with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=0.)): logits, end_points = resnet_v2.resnet_v2_152(images, num_classes=num_classes, is_training=False) #Obtain the trainable variables and a saver variables_to_restore = slim.get_variables_to_restore() saver = tf.train.Saver(variables_to_restore) output_f = open(out_fn, 'w') with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) sess.run(tf.global_variables_initializer()) saver.restore(sess, checkpoint_file) num_iters = int(math.ceil(data_sizes[set_name] / float(batch_size))) num_last_batch = batch_size - ((num_iters * batch_size) - data_sizes[set_name]) for i in range(num_iters): output, ids = sess.run([logits, product_ids]) if i == num_iters - 1: output = output[:num_last_batch, :] ids = ids[:num_last_batch] for j in range(output.shape[0]): vec_str = [str(x) for x in output[j, :]] output_f.write(str(ids[j]) + ' ' + ' '.join(vec_str) + '\n') output_f.close()
[ [ [ 7, 11 ], [ 3408, 3412 ] ], [ [ 19, 35 ], [ 247, 249 ], [ 1837, 1839 ], [ 2506, 2508 ], [ 3085, 3087 ], [ 3165, 3167 ], [ 3203, 3205 ], [ 3244, 3246 ], [ 3304, 3306 ] ], [ [ 43, 45 ], [ 1749, 1751 ] ], [ [ 53, 59 ], [ 1083, 1089 ], [ 1349, 1355 ] ], [ [ 67, 70 ] ], [ [ 78, 89 ], [ 552, 554 ], [ 561, 563 ], [ 797, 799 ], [ 806, 808 ], [ 977, 979 ], [ 1259, 1261 ] ], [ [ 111, 126 ], [ 1921, 1936 ] ], [ [ 144, 156 ] ], [ [ 164, 191 ], [ 2814, 2823 ], [ 2889, 2898 ] ], [ [ 218, 239 ], [ 2336, 2357 ] ], [ [ 240, 244 ], [ 2055, 2059 ], [ 2799, 2803 ], [ 3041, 3045 ] ], [ [ 268, 285 ] ], [ [ 1442, 1456 ], [ 1864, 1878 ] ], [ [ 1507, 1518 ], [ 1762, 1773 ], [ 1972, 1983 ] ], [ [ 1571, 1582 ], [ 2933, 2944 ] ], [ [ 1594, 1604 ], [ 2447, 2457 ], [ 2459, 2469 ] ], [ [ 1615, 1625 ], [ 2150, 2160 ], [ 2187, 2197 ], [ 2560, 2570 ], [ 2649, 2659 ], [ 3447, 3457 ], [ 3486, 3496 ], [ 3513, 3523 ] ], [ [ 1636, 1644 ], [ 1803, 1811 ], [ 1962, 1970 ], [ 3429, 3437 ], [ 3538, 3546 ] ], [ [ 1664, 1674 ], [ 3418, 3428 ], [ 3527, 3537 ] ], [ [ 1740, 1746 ], [ 3142, 3148 ] ], [ [ 1819, 1834 ], [ 3367, 3382 ] ], [ [ 1911, 1918 ], [ 2102, 2109 ] ], [ [ 2044, 2052 ], [ 2232, 2240 ] ], [ [ 2204, 2209 ], [ 2440, 2445 ] ], [ [ 2211, 2216 ], [ 2529, 2534 ] ], [ [ 2218, 2228 ], [ 2536, 2546 ] ], [ [ 2311, 2333 ], [ 2417, 2439 ] ], [ [ 2409, 2414 ], [ 2522, 2527 ] ], [ [ 2476, 2482 ], [ 2913, 2919 ] ], [ [ 2484, 2490 ] ], [ [ 2492, 2503 ], [ 3629, 3640 ] ], [ [ 2868, 2874 ], [ 3621, 3627 ] ], [ [ 2876, 2886 ] ], [ [ 3018, 3038 ], [ 3100, 3120 ] ], [ [ 3077, 3082 ], [ 3347, 3352 ] ], [ [ 3126, 3134 ], [ 3893, 3901 ], [ 3963, 3971 ] ], [ [ 3181, 3185 ], [ 3295, 3299 ], [ 3361, 3365 ], [ 3611, 3615 ] ], [ [ 3195, 3200 ], [ 3279, 3284 ] ], [ [ 3234, 3241 ] ], [ [ 3392, 3401 ], [ 3501, 3510 ], [ 3573, 3582 ], [ 3664, 3673 ] ], [ [ 3469, 3483 ], [ 3712, 3726 ], [ 3758, 3772 ] ], [ [ 3562, 3563 ], [ 3659, 3660 ] ], [ [ 3597, 3603 ], [ 3704, 3710 ], [ 3802, 3808 ], [ 3863, 3869 ] ], [ [ 3605, 3608 ], [ 3753, 3756 ], [ 3912, 3915 ] ], [ [ 3695, 3701 ], [ 3802, 3808 ], [ 3863, 3869 ] ], [ [ 3747, 3750 ], [ 3912, 3915 ] ], [ [ 3791, 3792 ], [ 3870, 3871 ], [ 3916, 3917 ] ], [ [ 3836, 3843 ], [ 3937, 3944 ] ] ]
# -*- coding: utf-8 -*- from logging import getLogger from time import time, strftime from BTrees.IIBTree import IITreeSet from Products.CMFCore.utils import getToolByName from Products.Five.browser import BrowserView from plone.uuid.interfaces import IUUID, IUUIDAware from zope.interface import implementer from zope.component import queryUtility, queryAdapter from collective.solr.indexer import DefaultAdder from collective.solr.flare import PloneFlare from collective.solr.interfaces import ISolrConnectionManager from collective.solr.interfaces import ISolrMaintenanceView from collective.solr.interfaces import ISolrAddHandler from collective.solr.interfaces import ICheckIndexable from collective.solr.indexer import SolrIndexProcessor from collective.solr.indexer import boost_values from collective.solr.parser import parse_date_as_datetime from collective.solr.parser import SolrResponse from collective.solr.parser import unmarshallers from collective.solr.utils import findObjects from collective.solr.utils import prepareData logger = getLogger("collective.solr.maintenance") MAX_ROWS = 1000000000 try: from time import process_time except ImportError: # Python < 3.8 from time import clock as process_time def timer(func=time): """set up a generator returning the elapsed time since the last call""" def gen(last=func()): while True: elapsed = func() - last last = func() yield "%.3fs" % elapsed return gen() def checkpointIterator(function, interval=100): """the iterator will call the given function for every nth invocation""" counter = 0 while True: counter += 1 if counter % interval == 0: function() yield None def notimeout(func): """decorator to prevent long-running solr tasks from timing out""" def wrapper(*args, **kw): """wrapper with random docstring so ttw access still works""" manager = queryUtility(ISolrConnectionManager) manager.setTimeout(None, lock=True) try: return func(*args, **kw) finally: manager.setTimeout(None, lock=False) return wrapper @implementer(ISolrMaintenanceView) class SolrMaintenanceView(BrowserView): """helper view for indexing all portal content in Solr""" def mklog(self, use_std_log=False): """helper to prepend a time stamp to the output""" write = self.request.RESPONSE.write def log(msg, timestamp=True): if timestamp: msg = strftime("%Y/%m/%d-%H:%M:%S ") + msg write(msg.encode("utf-8")) if use_std_log: logger.info(msg) return log def optimize(self): """optimize solr indexes""" manager = queryUtility(ISolrConnectionManager) conn = manager.getConnection() conn.setTimeout(None) conn.commit(optimize=True) return "solr indexes optimized." def clear(self): """clear all data from solr, i.e. delete all indexed objects""" manager = queryUtility(ISolrConnectionManager) uniqueKey = manager.getSchema().uniqueKey conn = manager.getConnection() conn.setTimeout(None) conn.deleteByQuery("%s:[* TO *]" % uniqueKey) conn.commit() return "solr index cleared." def reindex( self, batch=1000, skip=0, limit=0, ignore_portal_types=None, only_portal_types=None, idxs=[], ignore_exceptions=False, ): """find all contentish objects (meaning all objects derived from one of the catalog mixin classes) and (re)indexes them""" if ignore_portal_types and only_portal_types: raise ValueError( "It is not possible to combine " "ignore_portal_types with only_portal_types" ) atomic = idxs != [] manager = queryUtility(ISolrConnectionManager) proc = SolrIndexProcessor(manager) conn = manager.getConnection() zodb_conn = self.context._p_jar log = self.mklog() log("reindexing solr catalog...\n") if skip: log("skipping indexing of %d object(s)...\n" % skip) if limit: log("limiting indexing to %d object(s)...\n" % limit) real = timer() # real time lap = timer() # real lap time (for intermediate commits) cpu = timer(process_time) # cpu time processed = 0 schema = manager.getSchema() key = schema.uniqueKey updates = {} # list to hold data to be updated def flush(): return conn.commit(soft=True) flush = notimeout(flush) def checkPoint(): for my_boost_values, data in updates.values(): adder = data.pop("_solr_adder") try: adder(conn, boost_values=my_boost_values, **data) except Exception as e: logger.warning("Error %s @ %s", e, data["path_string"]) if not ignore_exceptions: raise updates.clear() msg = ( "intermediate commit (%d items processed, " "last batch in %s)...\n" % (processed, next(lap)) ) log(msg) logger.info(msg) flush() zodb_conn.cacheGC() cpi = checkpointIterator(checkPoint, batch) count = 0 if atomic: log("indexing only {0} \n".format(idxs)) for path, obj in findObjects(self.context): if ICheckIndexable(obj)(): count += 1 if count <= skip: continue if ignore_portal_types: if obj.portal_type in ignore_portal_types: continue if only_portal_types: if obj.portal_type not in only_portal_types: continue attributes = None if atomic: attributes = idxs # For atomic updates to work the uniqueKey must be present # in *every* update operation. if attributes and key not in attributes: attributes.append(key) data, missing = proc.getData(obj, attributes=attributes) prepareData(data) if not missing or atomic: value = data.get(key, None) if value is not None: log("indexing %r\n" % obj) pt = data.get("portal_type", "default") adder = queryAdapter(obj, ISolrAddHandler, name=pt) if adder is None: adder = DefaultAdder(obj) data["_solr_adder"] = adder updates[value] = (boost_values(obj, data), data) processed += 1 next(cpi) else: log("missing data, skipping indexing of %r.\n" % obj) if limit and count >= (skip + limit): break checkPoint() conn.commit() log("solr index rebuilt.\n") msg = "processed %d items in %s (%s cpu time)." msg = msg % (processed, next(real), next(cpu)) log(msg) logger.info(msg) def sync(self, batch=1000, preImportDeleteQuery="*:*"): """Sync the Solr index with the portal catalog. Records contained in the catalog but not in Solr will be indexed and records not contained in the catalog will be removed. """ manager = queryUtility(ISolrConnectionManager) proc = SolrIndexProcessor(manager) conn = manager.getConnection() key = queryUtility(ISolrConnectionManager).getSchema().uniqueKey zodb_conn = self.context._p_jar catalog = getToolByName(self.context, "portal_catalog") getIndex = catalog._catalog.getIndex modified_index = getIndex("modified") uid_index = getIndex(key) log = self.mklog() real = timer() # real time lap = timer() # real lap time (for intermediate commits) cpu = timer(process_time) # cpu time # get Solr status response = conn.search( q=preImportDeleteQuery, rows=MAX_ROWS, fl="%s modified" % key ) # avoid creating DateTime instances simple_unmarshallers = unmarshallers.copy() simple_unmarshallers["date"] = parse_date_as_datetime flares = SolrResponse(response, simple_unmarshallers) response.close() solr_results = {} solr_uids = set() def _utc_convert(value): t_tup = value.utctimetuple() return ( ((t_tup[0] * 12 + t_tup[1]) * 31 + t_tup[2]) * 24 + t_tup[3] ) * 60 + t_tup[4] for flare in flares: uid = flare[key] solr_uids.add(uid) solr_results[uid] = _utc_convert(flare["modified"]) # get catalog status cat_results = {} cat_uids = set() for uid, rid in uid_index._index.items(): cat_uids.add(uid) cat_results[uid] = rid # differences index = cat_uids.difference(solr_uids) solr_uids.difference_update(cat_uids) unindex = solr_uids processed = 0 flush = notimeout(lambda: conn.flush()) def checkPoint(): msg = ( "intermediate commit (%d items processed, " "last batch in %s)...\n" % (processed, next(lap)) ) log(msg) logger.info(msg) flush() zodb_conn.cacheGC() cpi = checkpointIterator(checkPoint, batch) # Look up objects uid_rid_get = cat_results.get rid_path_get = catalog._catalog.paths.get catalog_traverse = catalog.unrestrictedTraverse def lookup( uid, rid=None, uid_rid_get=uid_rid_get, rid_path_get=rid_path_get, catalog_traverse=catalog_traverse, ): if rid is None: rid = uid_rid_get(uid) if not rid: return None if not isinstance(rid, int): rid = tuple(rid)[0] path = rid_path_get(rid) if not path: return None try: obj = catalog_traverse(path) except AttributeError: return None return obj log('processing %d "unindex" operations next...\n' % len(unindex)) op = notimeout(lambda uid: conn.delete(id=uid)) for uid in unindex: obj = lookup(uid) if obj is None: op(uid) processed += 1 next(cpi) else: log("not unindexing existing object %r.\n" % uid) log('processing %d "index" operations next...\n' % len(index)) op = notimeout(lambda obj: proc.index(obj)) for uid in index: obj = lookup(uid) if ICheckIndexable(obj)(): op(obj) processed += 1 next(cpi) else: log("not indexing unindexable object %r.\n" % uid) if obj is not None: obj._p_deactivate() log('processing "reindex" operations next...\n') op = notimeout(lambda obj: proc.reindex(obj)) cat_mod_get = modified_index._unindex.get solr_mod_get = solr_results.get done = unindex.union(index) for uid, rid in cat_results.items(): if uid in done: continue if isinstance(rid, IITreeSet): rid = list(rid.keys())[0] if cat_mod_get(rid) != solr_mod_get(uid): obj = lookup(uid, rid=rid) if ICheckIndexable(obj)(): op(obj) processed += 1 next(cpi) else: log("not reindexing unindexable object %r.\n" % uid) if obj is not None: obj._p_deactivate() conn.commit() log("solr index synced.\n") msg = "processed %d object(s) in %s (%s cpu time)." msg = msg % (processed, next(real), next(cpu)) log(msg) logger.info(msg) def cleanup(self, batch=1000): """remove entries from solr that don't have a corresponding Zope object or have a different UID than the real object""" manager = queryUtility(ISolrConnectionManager) proc = SolrIndexProcessor(manager) conn = manager.getConnection() log = self.mklog(use_std_log=True) log("cleaning up solr index...\n") key = manager.getSchema().uniqueKey start = 0 resp = SolrResponse(conn.search(q="*:*", rows=batch, start=start)) res = resp.results() log("%s items in solr catalog\n" % resp.response.numFound) deleted = 0 reindexed = 0 while len(res) > 0: for flare in res: try: ob = PloneFlare(flare).getObject() except Exception as err: log( "Error getting object, removing: %s (%s)\n" % (flare["path_string"], err) ) conn.delete(flare[key]) deleted += 1 continue if ob is None: log("Object not found, removing: %s\n" % (flare["path_string"])) conn.delete(flare[key]) deleted += 1 continue if not IUUIDAware.providedBy(ob): no_skipping_msg = ( "Object %s of type %s does not " + "support uuids, skipping.\n" ) log( no_skipping_msg % ("/".join(ob.getPhysicalPath()), ob.meta_type) ) continue uuid = IUUID(ob) if uuid != flare[key]: log( "indexed under wrong UID, removing: %s\n" % flare["path_string"] ) conn.delete(flare[key]) deleted += 1 realob_res = SolrResponse( conn.search(q="%s:%s" % (key, uuid)) ).results() if len(realob_res) == 0: log("no sane entry for last object, reindexing\n") data, missing = proc.getData(ob) prepareData(data) if not missing: boost = boost_values(ob, data) conn.add(boost_values=boost, **data) reindexed += 1 else: log(" missing data, cannot index.\n") log("handled batch of %d items, committing\n" % len(res)) conn.commit() start += batch resp = SolrResponse(conn.search(q="*:*", rows=batch, start=start)) res = resp.results() finished_msg = ( "solr cleanup finished, %s item(s) removed, " + "%s item(s) reindexed\n" ) msg = finished_msg % (deleted, reindexed) log(msg) logger.info(msg)
[ [ [ 44, 53 ], [ 1052, 1061 ] ], [ [ 71, 75 ], [ 1255, 1259 ] ], [ [ 77, 85 ], [ 2559, 2567 ] ], [ [ 114, 123 ], [ 11940, 11949 ] ], [ [ 159, 172 ], [ 8050, 8063 ] ], [ [ 207, 218 ], [ 2252, 2263 ] ], [ [ 253, 258 ], [ 14344, 14349 ] ], [ [ 260, 270 ], [ 13979, 13989 ] ], [ [ 298, 309 ], [ 2192, 2203 ] ], [ [ 337, 349 ], [ 2795, 2807 ], [ 3089, 3101 ], [ 3961, 3973 ], [ 7800, 7812 ], [ 7933, 7945 ], [ 12803, 12815 ], [ 1972, 1984 ] ], [ [ 351, 363 ], [ 6766, 6778 ] ], [ [ 401, 413 ], [ 6888, 6900 ] ], [ [ 448, 458 ], [ 13388, 13398 ] ], [ [ 498, 520 ], [ 2808, 2830 ], [ 3102, 3124 ], [ 3974, 3996 ], [ 7813, 7835 ], [ 7946, 7968 ], [ 12816, 12838 ], [ 1985, 2007 ] ], [ [ 560, 580 ], [ 2204, 2224 ] ], [ [ 620, 635 ], [ 6784, 6799 ] ], [ [ 675, 690 ], [ 5667, 5682 ], [ 11316, 11331 ], [ 12110, 12125 ] ], [ [ 727, 745 ], [ 4013, 4031 ], [ 7852, 7870 ], [ 12855, 12873 ] ], [ [ 782, 794 ], [ 7000, 7012 ], [ 15041, 15053 ] ], [ [ 830, 852 ], [ 8673, 8695 ] ], [ [ 888, 900 ], [ 8713, 8725 ], [ 13086, 13098 ], [ 14639, 14651 ], [ 15411, 15423 ] ], [ [ 936, 949 ], [ 8613, 8626 ] ], [ [ 984, 995 ], [ 5625, 5636 ] ], [ [ 1030, 1041 ], [ 6467, 6478 ], [ 14947, 14958 ] ], [ [ 1043, 1049 ], [ 7497, 7503 ], [ 12596, 12602 ], [ 15699, 15705 ], [ 2679, 2685 ], [ 5033, 5039 ], [ 5386, 5392 ], [ 9819, 9825 ] ], [ [ 1093, 1101 ], [ 8495, 8503 ] ], [ [ 1143, 1155 ], [ 4479, 4491 ], [ 8370, 8382 ] ], [ [ 1216, 1237 ], [ 4479, 4491 ], [ 8370, 8382 ] ], [ [ 1244, 1249 ], [ 4372, 4377 ], [ 4407, 4412 ], [ 4473, 4478 ], [ 8263, 8268 ], [ 8298, 8303 ], [ 8364, 8369 ] ], [ [ 1507, 1525 ], [ 5470, 5488 ], [ 9903, 9921 ] ], [ [ 1765, 1774 ], [ 4732, 4741 ], [ 9567, 9576 ], [ 10828, 10837 ], [ 11206, 11215 ], [ 11644, 11653 ] ], [ [ 2232, 2251 ] ] ]
""" One of the central problems in statistics is to make estimations — and quantify how good these estimations are — of the distribution of an entire population given only a small (random) sample. A classic example is to estimate the average height of all the people in a country when measuring the height of a randomly selected sample of people. These kinds of problems are particularly interesting when the true population distribution, by which we usually mean the mean of the whole population, cannot feasibly be measured. In this case, we must rely on our knowledge of statistics and a (usually much smaller) randomly selected sample to estimate the true population mean and standard deviation, and also quantify how good our estimations are. It is the latter that is the source of confusion, misunderstanding, and misrepresentation of statistics in the wider world. This module illustrates how to estimate the population mean and give a confidence interval fo these estimates. """ import math import pandas as pd from scipy import stats sample_data = pd.Series([ 172.3, 171.3, 164.7, 162.9, 172.5, 176.3, 174.8, 171.9, 176.8, 167.8, 164.5, 179.7, 157.8, 170.6, 189.9, 185. , 172.7, 165.5, 174.5, 171.5]) sample_mean = sample_data.mean() sample_std = sample_data.std() print(f"Mean: {sample_mean}, st. dev: {sample_std}") # Mean: 172.15, st. dev: 7.473778724383846 N = sample_data.count() std_err = sample_std/math.sqrt(N) cv_95, cv_99 = stats.t.ppf([0.975, 0.995], df=N-1) pm_95 = cv_95 * std_err pm_99 = cv_99 * std_err conf_interval_95 = [sample_mean - pm_95, sample_mean + pm_95] conf_interval_99 = [sample_mean - pm_99, sample_mean + pm_99] print(f"95% confidence: {conf_interval_95}") print(f"99% confidence: {conf_interval_99}") # 95% confidence: [168.65216388659374, 175.64783611340627] # 99% confidence: [167.36884119608774, 176.93115880391227]
[ [ [ 995, 999 ], [ 1433, 1437 ] ], [ [ 1007, 1019 ], [ 1060, 1062 ] ], [ [ 1039, 1044 ], [ 1462, 1467 ] ], [ [ 1046, 1057 ], [ 1240, 1251 ], [ 1272, 1283 ], [ 1392, 1403 ] ], [ [ 1226, 1237 ], [ 1306, 1317 ], [ 1567, 1578 ], [ 1588, 1599 ], [ 1630, 1641 ], [ 1651, 1662 ] ], [ [ 1259, 1269 ], [ 1330, 1340 ], [ 1422, 1432 ] ], [ [ 1388, 1389 ], [ 1443, 1444 ], [ 1493, 1494 ] ], [ [ 1412, 1419 ], [ 1515, 1522 ], [ 1539, 1546 ] ], [ [ 1447, 1452 ], [ 1507, 1512 ] ], [ [ 1454, 1459 ], [ 1531, 1536 ] ], [ [ 1499, 1504 ], [ 1581, 1586 ], [ 1602, 1607 ] ], [ [ 1523, 1528 ], [ 1644, 1649 ], [ 1665, 1670 ] ], [ [ 1547, 1563 ], [ 1698, 1714 ] ], [ [ 1610, 1626 ], [ 1743, 1759 ] ] ]
#!/usr/bin/env python # -*- coding: utf-8 -*- from collections import Iterable def flatten(input_arr, output_arr = None): if output_arr is None: output_arr = [] for t in input_arr: if isinstance(t, Iterable): flatten(t, output_arr) else: output_arr.append(t) return output_arr def flatten_iter(iterable): for t in iterable: if isinstance(t, Iterable): yield from flatten_iter(t) else: yield t
[ [ [ 70, 78 ], [ 224, 232 ], [ 416, 424 ] ], [ [ 85, 92 ], [ 247, 254 ] ], [ [ 344, 356 ], [ 450, 462 ] ] ]
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for rmsprop.""" import tensorflow.compat.v2 as tf import copy import itertools import math from absl.testing import parameterized import numpy as np from tensorflow.python.framework import test_util from keras import combinations from keras import testing_utils from keras.optimizer_v2 import learning_rate_schedule from keras.optimizer_v2 import rmsprop _DATA_TYPES = [ tf.half, tf.float32, tf.float64, tf.complex64, tf.complex128 ] _TEST_PARAM_VALUES = [ # learning_rate, rho, momentum, epsilon, centered [0.05, 0.9, 0.0, 1e-3, True], [0.05, 0.9, 0.0, 1e-3, False], [0.1, 0.9, 0.0, 1e-3, True], [0.01, 0.9, 0.0, 1e-5, True], [0.01, 0.9, 0.9, 1e-5, True], ] _TESTPARAMS = [ [data_type] + values for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES) ] class RMSpropOptimizerTest(tf.test.TestCase, parameterized.TestCase): def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum, epsilon, centered): rms_t = rms * rho + (1 - rho) * g * g if centered: mg_t = mg * rho + (1 - rho) * g denom_t = rms_t - mg_t * mg_t else: mg_t = mg denom_t = rms_t if momentum > 0.: mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon)) var_t = var - mom_t else: mom_t = mom var_t = var - lr * g / (np.sqrt(denom_t) + epsilon) return var_t, mg_t, rms_t, mom_t def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom, lr, rho, momentum, epsilon, centered): mg_t = copy.deepcopy(mg) rms_t = copy.deepcopy(rms) mom_t = copy.deepcopy(mom) var_t = copy.deepcopy(var) for i in range(len(gindexs)): gindex = gindexs[i] gvalue = gvalues[i] rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue if centered: mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex] else: denom_t = rms_t[gindex] if momentum > 0.: mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t + epsilon) var_t[gindex] = var[gindex] - mom_t[gindex] else: mom_t[gindex] = mom[gindex] var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon) return var_t, mg_t, rms_t, mom_t def testDense(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu(): # Initialize variables for numpy implementation. var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, dtype=dtype) var1 = tf.Variable(var1_np, dtype=dtype) grads0 = tf.constant(grads0_np, dtype=dtype) grads1 = tf.constant(grads1_np, dtype=dtype) opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) if centered: mg0 = opt.get_slot(var0, "mg") mg1 = opt.get_slot(var1, "mg") else: mg0 = None mg1 = None if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of RMSprop for _ in range(1, 4): self.evaluate(update) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, momentum, epsilon, centered) # Validate updated params if centered: self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0)) self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testDenseWithLearningRateDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): var0_np = np.array([1.0, 2.0]) grads0_np = np.array([0.1, 0.2]) var1_np = np.array([3.0, 4.0]) grads1_np = np.array([0.01, 0.2]) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.01 rho = 0.9 momentum = 0.0 epsilon = 1e-7 centered = False decay = 0.5 opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered, decay=decay) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0]) mg1_np = np.array([0.0, 0.0]) rms0_np = np.array([0.0, 0.0]) rms1_np = np.array([0.0, 0.0]) mom0_np = np.array([0.0, 0.0]) mom1_np = np.array([0.0, 0.0]) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 4 steps of RMSprop for t in range(2): self.evaluate(update) lr = learning_rate / (1 + decay * t) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum, epsilon, centered) # Validate updated params self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testDenseWithLearningRateInverseTimeDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): var0_np = np.array([1.0, 2.0]) grads0_np = np.array([0.1, 0.2]) var1_np = np.array([3.0, 4.0]) grads1_np = np.array([0.01, 0.2]) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.01 rho = 0.9 momentum = 0.0 epsilon = 1e-7 centered = False decay = 0.5 lr_schedule = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps=1.0, decay_rate=decay) opt = rmsprop.RMSprop( learning_rate=lr_schedule, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0]) mg1_np = np.array([0.0, 0.0]) rms0_np = np.array([0.0, 0.0]) rms1_np = np.array([0.0, 0.0]) mom0_np = np.array([0.0, 0.0]) mom1_np = np.array([0.0, 0.0]) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 4 steps of RMSprop for t in range(2): self.evaluate(update) lr = learning_rate / (1 + decay * t) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum, epsilon, centered) # Validate updated params self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testMinimizeSparseResourceVariable(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = rmsprop.RMSprop( learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0, centered=False).minimize( loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[0., 1.]], self.evaluate(var0), atol=0.01) def testMinimizeSparseResourceVariableCentered(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred # loss = lambda: pred * pred # pylint: disable=cell-var-from-loop sgd_op = rmsprop.RMSprop( learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0, centered=True).minimize( loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0), atol=0.01) def testSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu(): # Initialize variables for numpy implementation. var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0_np_indices = np.array([0], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np), tf.constant(grads0_np_indices), tf.constant([1])) grads1_np_indices = np.array([1], dtype=np.int32) grads1 = tf.IndexedSlices( tf.constant(grads1_np), tf.constant(grads1_np_indices), tf.constant([1])) opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) if centered: mg0 = opt.get_slot(var0, "mg") self.assertEqual(mg0 is not None, centered) mg1 = opt.get_slot(var1, "mg") self.assertEqual(mg1 is not None, centered) else: mg0 = None mg1 = None rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of RMSprop for _ in range(1, 4): self.evaluate(update) var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy( var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy( var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, momentum, epsilon, centered) # Validate updated params if centered: self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0)) self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1)) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["eager"])) def testCallableParams(self): for dtype in _DATA_TYPES: var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) learning_rate = lambda: 2.0 rho = lambda: 0.9 momentum = lambda: 0.0 epsilon = 1.0 opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Step 1: the rms accumulators where 1. So we should see a normal # update: v -= grad * learning_rate opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Check the parameters. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)), 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)), 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) ]), self.evaluate(var1)) # Step 2: the root mean square accumulators contain the previous update. opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Check the parameters. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) - (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)), 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) - (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) - (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)), 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) - (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)) ]), self.evaluate(var1)) def testConstructRMSpropWithLR(self): opt = rmsprop.RMSprop(lr=1.0) opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0) opt_3 = rmsprop.RMSprop(learning_rate=0.1) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) @combinations.generate(combinations.combine(mode=["eager"])) def testSlotsUniqueEager(self): v1 = tf.Variable(1.) v2 = tf.Variable(1.) opt = rmsprop.RMSprop(1., momentum=0., centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and one unique slot variable for v1 and v2. self.assertLen(set({id(v) for v in opt.variables()}), 3) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and v2. self.assertLen(set({id(v) for v in opt.variables()}), 5) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and three unique slot variables for v1 and v2 self.assertLen(set({id(v) for v in opt.variables()}), 7) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) @combinations.generate(combinations.combine(mode=["eager"])) def testMomentumProperValue(self): with self.assertRaisesRegex(ValueError, r"`momentum` must be between \[0, 1\]. " r"Received: momentum=2.5 \(of type <class " r"\'float\'>\)."): rmsprop.RMSprop(1., momentum=2.5, centered=False) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class SlotColocationTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters([True, False]) @test_util.run_gpu_only def testRunMinimizeOnGPUForCPUVariables(self, use_resource): with tf.device("/device:CPU:0"): if use_resource: var0 = tf.Variable([1.0, 2.0], dtype=tf.float32) var1 = tf.Variable([3.0, 4.0], dtype=tf.float32) else: var0 = tf.Variable([1.0, 2.0], dtype=tf.float32) var1 = tf.Variable([3.0, 4.0], dtype=tf.float32) def loss(): return 5 * var0 + 3 * var1 opt = rmsprop.RMSprop( learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0) # Fetch params to validate initial values self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step through optimizer on GPU. # Slot variables are created the first time optimizer is used on some # variable. This tests that slot variables will be colocated with the base # variable. with tf.device("/device:GPU:0"): # Note that for eager execution, minimize expects a function instead of a # Tensor. opt_op = opt.minimize(loss, [var0, var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params, All variables should have decreased. self.assertTrue(all(v < 0.0 for v in self.evaluate(var0)), msg="updated variables: %s" % self.evaluate(var0)) self.assertTrue(all(v < 2.0 for v in self.evaluate(var1)), msg="updated variables: %s" % self.evaluate(var1)) if __name__ == "__main__": tf.test.main()
[ [ [ 722, 748 ], [ 1077, 1079 ], [ 1086, 1088 ], [ 1098, 1100 ], [ 1110, 1112 ], [ 1128, 1130 ], [ 1547, 1549 ], [ 22262, 22264 ], [ 23959, 23961 ], [ 3329, 3331 ], [ 3747, 3749 ], [ 3796, 3798 ], [ 3847, 3849 ], [ 3900, 3902 ], [ 4217, 4219 ], [ 6623, 6625 ], [ 6815, 6817 ], [ 6849, 6851 ], [ 6885, 6887 ], [ 6923, 6925 ], [ 7360, 7362 ], [ 9223, 9225 ], [ 9415, 9417 ], [ 9449, 9451 ], [ 9485, 9487 ], [ 9523, 9525 ], [ 10056, 10058 ], [ 11912, 11914 ], [ 11984, 11986 ], [ 12035, 12037 ], [ 12442, 12444 ], [ 13002, 13004 ], [ 13074, 13076 ], [ 13125, 13127 ], [ 13606, 13608 ], [ 14221, 14223 ], [ 14629, 14631 ], [ 14665, 14667 ], [ 14761, 14763 ], [ 14791, 14793 ], [ 14827, 14829 ], [ 14859, 14861 ], [ 14952, 14954 ], [ 14982, 14984 ], [ 15018, 15020 ], [ 15050, 15052 ], [ 15348, 15350 ], [ 17941, 17943 ], [ 17991, 17993 ], [ 18043, 18045 ], [ 18095, 18097 ], [ 20202, 20204 ], [ 20251, 20253 ], [ 20300, 20302 ], [ 20332, 20334 ], [ 20650, 20652 ], [ 20675, 20677 ], [ 22447, 22449 ], [ 22513, 22515 ], [ 22543, 22545 ], [ 22570, 22572 ], [ 22600, 22602 ], [ 22639, 22641 ], [ 22669, 22671 ], [ 22696, 22698 ], [ 22726, 22728 ], [ 22946, 22948 ], [ 23327, 23329 ], [ 23519, 23521 ], [ 12114, 12116 ], [ 12124, 12126 ], [ 13204, 13206 ], [ 13214, 13216 ] ], [ [ 757, 761 ], [ 2299, 2303 ], [ 2329, 2333 ], [ 2360, 2364 ], [ 2391, 2395 ] ], [ [ 769, 778 ], [ 1465, 1474 ] ], [ [ 786, 790 ], [ 18779, 18783 ], [ 18837, 18841 ], [ 18993, 18997 ], [ 19054, 19058 ], [ 19383, 19387 ], [ 19436, 19440 ], [ 19508, 19512 ], [ 19561, 19565 ], [ 19731, 19735 ], [ 19787, 19791 ], [ 19861, 19865 ], [ 19917, 19921 ] ], [ [ 817, 830 ], [ 1565, 1578 ], [ 22280, 22293 ], [ 22309, 22322 ] ], [ [ 838, 849 ], [ 1958, 1960 ], [ 2070, 2072 ], [ 2841, 2843 ], [ 3092, 3094 ], [ 3476, 3478 ], [ 3545, 3547 ], [ 3612, 3614 ], [ 3681, 3683 ], [ 4774, 4776 ], [ 4840, 4842 ], [ 4907, 4909 ], [ 4974, 4976 ], [ 5041, 5043 ], [ 5108, 5110 ], [ 6664, 6666 ], [ 6703, 6705 ], [ 6740, 6742 ], [ 6779, 6781 ], [ 7734, 7736 ], [ 7770, 7772 ], [ 7807, 7809 ], [ 7844, 7846 ], [ 7881, 7883 ], [ 7918, 7920 ], [ 9264, 9266 ], [ 9303, 9305 ], [ 9340, 9342 ], [ 9379, 9381 ], [ 10430, 10432 ], [ 10466, 10468 ], [ 10503, 10505 ], [ 10540, 10542 ], [ 10577, 10579 ], [ 10614, 10616 ], [ 14368, 14370 ], [ 14437, 14439 ], [ 14499, 14501 ], [ 14568, 14570 ], [ 14714, 14716 ], [ 14734, 14736 ], [ 14905, 14907 ], [ 14925, 14927 ], [ 16011, 16013 ], [ 16077, 16079 ], [ 16144, 16146 ], [ 16211, 16213 ], [ 16278, 16280 ], [ 16345, 16347 ], [ 18735, 18737 ], [ 18948, 18950 ], [ 19339, 19341 ], [ 19686, 19688 ] ], [ [ 890, 899 ], [ 22352, 22361 ] ], [ [ 918, 930 ], [ 17806, 17818 ], [ 17828, 17840 ], [ 20547, 20559 ], [ 20569, 20581 ], [ 21768, 21780 ], [ 21790, 21802 ], [ 22168, 22180 ], [ 22190, 22202 ] ], [ [ 949, 962 ], [ 3376, 3389 ], [ 14268, 14281 ] ], [ [ 994, 1016 ], [ 9692, 9714 ] ], [ [ 1048, 1055 ], [ 3950, 3957 ], [ 7084, 7091 ], [ 9805, 9812 ], [ 12259, 12266 ], [ 13424, 13431 ], [ 15082, 15089 ], [ 18254, 18261 ], [ 20042, 20049 ], [ 20078, 20085 ], [ 20133, 20140 ], [ 20702, 20709 ], [ 21049, 21056 ], [ 21412, 21419 ], [ 22115, 22122 ], [ 22799, 22806 ] ], [ [ 1057, 1068 ], [ 1483, 1494 ], [ 11956, 11967 ], [ 13046, 13057 ], [ 17915, 17926 ] ], [ [ 1145, 1163 ], [ 1496, 1514 ] ], [ [ 1395, 1406 ], [ 3305, 3316 ], [ 14197, 14208 ] ], [ [ 1526, 1546 ] ], [ [ 22243, 22261 ] ] ]
""" ArcBall.py -- Math utilities, vector, matrix types and ArcBall quaternion rotation class >>> unit_test_ArcBall_module () unit testing ArcBall Quat for first drag [ 0.08438914 -0.08534209 -0.06240178 0.99080837] First transform [[ 0.97764552 -0.1380603 0.15858325 0. ] [ 0.10925253 0.97796899 0.17787792 0. ] [-0.17964739 -0.15657592 0.97119039 0. ] [ 0. 0. 0. 1. ]] LastRot at end of first drag [[ 0.97764552 -0.1380603 0.15858325] [ 0.10925253 0.97796899 0.17787792] [-0.17964739 -0.15657592 0.97119039]] Quat for second drag [ 0.00710336 0.31832787 0.02679029 0.94757545] Second transform [[ 0.88022292 -0.08322023 -0.46720669 0. ] [ 0.14910145 0.98314685 0.10578787 0. ] [ 0.45052907 -0.16277808 0.8777966 0. ] [ 0. 0. 0. 1.00000001]] """ try: import numpy as Numeric def sumDot( a,b ): return Numeric.dot (a, b) except ImportError: try: import Numeric def sumDot( a,b ): return sum (Numeric.dot (a, b) ) except ImportError: print ("This demo requires the numpy or Numeric extension, sorry") import sys sys.exit() import copy from math import sqrt # //assuming IEEE-754(GLfloat), which i believe has max precision of 7 bits Epsilon = 1.0e-5 class ArcBallT: def __init__ (self, NewWidth, NewHeight): self.m_StVec = Vector3fT () self.m_EnVec = Vector3fT () self.m_AdjustWidth = 1.0 self.m_AdjustHeight = 1.0 self.setBounds (NewWidth, NewHeight) def __str__ (self): str_rep = "" str_rep += "StVec = " + str (self.m_StVec) str_rep += "\nEnVec = " + str (self.m_EnVec) str_rep += "\n scale coords %f %f" % (self.m_AdjustWidth, self.m_AdjustHeight) return str_rep def setBounds (self, NewWidth, NewHeight): # //Set new bounds assert (NewWidth > 1.0 and NewHeight > 1.0), "Invalid width or height for bounds." # //Set adjustment factor for width/height self.m_AdjustWidth = 1.0 / ((NewWidth - 1.0) * 0.5) self.m_AdjustHeight = 1.0 / ((NewHeight - 1.0) * 0.5) def _mapToSphere (self, NewPt): # Given a new window coordinate, will modify NewVec in place X = 0 Y = 1 Z = 2 NewVec = Vector3fT () # //Copy paramter into temp point TempPt = copy.copy (NewPt) #print ('NewPt', NewPt, TempPt) # //Adjust point coords and scale down to range of [-1 ... 1] TempPt [X] = (NewPt [X] * self.m_AdjustWidth) - 1.0 TempPt [Y] = 1.0 - (NewPt [Y] * self.m_AdjustHeight) # //Compute the square of the length of the vector to the point from the center length = sumDot( TempPt, TempPt) # //If the point is mapped outside of the sphere... (length > radius squared) if (length > 1.0): # //Compute a normalizing factor (radius / sqrt(length)) norm = 1.0 / sqrt (length); # //Return the "normalized" vector, a point on the sphere NewVec [X] = TempPt [X] * norm; NewVec [Y] = TempPt [Y] * norm; NewVec [Z] = 0.0; else: # //Else it's on the inside # //Return a vector to a point mapped inside the sphere sqrt(radius squared - length) NewVec [X] = TempPt [X] NewVec [Y] = TempPt [Y] NewVec [Z] = sqrt (1.0 - length) return NewVec def click (self, NewPt): # //Mouse down (Point2fT self.m_StVec = self._mapToSphere (NewPt) return def drag (self, NewPt): # //Mouse drag, calculate rotation (Point2fT Quat4fT) """ drag (Point2fT mouse_coord) -> new_quaternion_rotation_vec """ X = 0 Y = 1 Z = 2 W = 3 self.m_EnVec = self._mapToSphere (NewPt) # //Compute the vector perpendicular to the begin and end vectors # Perp = Vector3fT () Perp = Vector3fCross(self.m_StVec, self.m_EnVec); NewRot = Quat4fT () # //Compute the length of the perpendicular vector if (Vector3fLength(Perp) > Epsilon): # //if its non-zero # //We're ok, so return the perpendicular vector as the transform after all NewRot[X] = Perp[X]; NewRot[Y] = Perp[Y]; NewRot[Z] = Perp[Z]; # //In the quaternion values, w is cosine (theta / 2), where theta is rotation angle NewRot[W] = Vector3fDot(self.m_StVec, self.m_EnVec); else: # //if its zero # //The begin and end vectors coincide, so return a quaternion of zero matrix (no rotation) NewRot.X = NewRot.Y = NewRot.Z = NewRot.W = 0.0; return NewRot # ##################### Math utility ########################################## def Matrix4fT (): return Numeric.identity (4, 'f') def Matrix3fT (): return Numeric.identity (3, 'f') def Quat4fT (): return Numeric.zeros (4, 'f') def Vector3fT (): return Numeric.zeros (3, 'f') def Point2fT (x = 0.0, y = 0.0): pt = Numeric.zeros (2, 'f') pt [0] = x pt [1] = y return pt def Vector3fDot(u, v): # Dot product of two 3f vectors dotprod = Numeric.dot (u,v) return dotprod def Vector3fCross(u, v): # Cross product of two 3f vectors X = 0 Y = 1 Z = 2 cross = Numeric.zeros (3, 'f') cross [X] = (u[Y] * v[Z]) - (u[Z] * v[Y]) cross [Y] = (u[Z] * v[X]) - (u[X] * v[Z]) cross [Z] = (u[X] * v[Y]) - (u[Y] * v[X]) return cross def Vector3fLength (u): mag_squared = sumDot(u,u) mag = sqrt (mag_squared) return mag def Matrix3fSetIdentity (): return Numeric.identity (3, 'f') def Matrix3fMulMatrix3f (matrix_a, matrix_b): return sumDot( matrix_a, matrix_b ) def Matrix4fSVD (NewObj): X = 0 Y = 1 Z = 2 s = sqrt ( ( (NewObj [X][X] * NewObj [X][X]) + (NewObj [X][Y] * NewObj [X][Y]) + (NewObj [X][Z] * NewObj [X][Z]) + (NewObj [Y][X] * NewObj [Y][X]) + (NewObj [Y][Y] * NewObj [Y][Y]) + (NewObj [Y][Z] * NewObj [Y][Z]) + (NewObj [Z][X] * NewObj [Z][X]) + (NewObj [Z][Y] * NewObj [Z][Y]) + (NewObj [Z][Z] * NewObj [Z][Z]) ) / 3.0 ) return s def Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix): # Modifies NewObj in-place by replacing its upper 3x3 portion from the # passed in 3x3 matrix. # NewObj = Matrix4fT () NewObj [0:3,0:3] = three_by_three_matrix return NewObj # /** # * Sets the rotational component (upper 3x3) of this matrix to the matrix # * values in the T precision Matrix3d argument; the other elements of # * this matrix are unchanged; a singular value decomposition is performed # * on this object's upper 3x3 matrix to factor out the scale, then this # * object's upper 3x3 matrix components are replaced by the passed rotation # * components, and then the scale is reapplied to the rotational # * components. # * @param three_by_three_matrix T precision 3x3 matrix # */ def Matrix4fSetRotationFromMatrix3f (NewObj, three_by_three_matrix): scale = Matrix4fSVD (NewObj) NewObj = Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix); scaled_NewObj = NewObj * scale # Matrix4fMulRotationScale(NewObj, scale); return scaled_NewObj def Matrix3fSetRotationFromQuat4f (q1): # Converts the H quaternion q1 into a new equivalent 3x3 rotation matrix. X = 0 Y = 1 Z = 2 W = 3 NewObj = Matrix3fT () n = sumDot(q1, q1) s = 0.0 if (n > 0.0): s = 2.0 / n xs = q1 [X] * s; ys = q1 [Y] * s; zs = q1 [Z] * s wx = q1 [W] * xs; wy = q1 [W] * ys; wz = q1 [W] * zs xx = q1 [X] * xs; xy = q1 [X] * ys; xz = q1 [X] * zs yy = q1 [Y] * ys; yz = q1 [Y] * zs; zz = q1 [Z] * zs # This math all comes about by way of algebra, complex math, and trig identities. # See Lengyel pages 88-92 NewObj [X][X] = 1.0 - (yy + zz); NewObj [Y][X] = xy - wz; NewObj [Z][X] = xz + wy; NewObj [X][Y] = xy + wz; NewObj [Y][Y] = 1.0 - (xx + zz); NewObj [Z][Y] = yz - wx; NewObj [X][Z] = xz - wy; NewObj [Y][Z] = yz + wx; NewObj [Z][Z] = 1.0 - (xx + yy) return NewObj def unit_test_ArcBall_module (): # Unit testing of the ArcBall calss and the real math behind it. # Simulates a click and drag followed by another click and drag. print ("unit testing ArcBall") Transform = Matrix4fT () LastRot = Matrix3fT () ThisRot = Matrix3fT () ArcBall = ArcBallT (640, 480) # print "The ArcBall with NO click" # print ArcBall # First click LastRot = copy.copy (ThisRot) mouse_pt = Point2fT (500,250) ArcBall.click (mouse_pt) # print "The ArcBall with first click" # print ArcBall # First drag mouse_pt = Point2fT (475, 275) ThisQuat = ArcBall.drag (mouse_pt) # print "The ArcBall after first drag" # print ArcBall # print # print print ("Quat for first drag") print (ThisQuat) ThisRot = Matrix3fSetRotationFromQuat4f (ThisQuat) # Linear Algebra matrix multiplication A = old, B = New : C = A * B ThisRot = Matrix3fMulMatrix3f (LastRot, ThisRot) Transform = Matrix4fSetRotationFromMatrix3f (Transform, ThisRot) print ("First transform") print (Transform) # Done with first drag # second click LastRot = copy.copy (ThisRot) print ("LastRot at end of first drag") print (LastRot) mouse_pt = Point2fT (350,260) ArcBall.click (mouse_pt) # second drag mouse_pt = Point2fT (450, 260) ThisQuat = ArcBall.drag (mouse_pt) # print "The ArcBall" # print ArcBall print ("Quat for second drag") print (ThisQuat) ThisRot = Matrix3fSetRotationFromQuat4f (ThisQuat) ThisRot = Matrix3fMulMatrix3f (LastRot, ThisRot) # print ThisRot Transform = Matrix4fSetRotationFromMatrix3f (Transform, ThisRot) print ("Second transform") print (Transform) # Done with second drag LastRot = copy.copy (ThisRot) def _test (): # This will run doctest's unit testing capability. # see http://www.python.org/doc/current/lib/module-doctest.html # # doctest introspects the ArcBall module for all docstrings # that look like interactive python sessions and invokes # the same commands then and there as unit tests to compare # the output generated. Very nice for unit testing and # documentation. import doctest, ArcBall return doctest.testmod (ArcBall) if __name__ == "__main__": # Invoke our function that runs python's doctest unit testing tool. _test () # unit_test ()
[ [ [ 896, 912 ], [ 942, 949 ], [ 4414, 4421 ], [ 4467, 4474 ], [ 4518, 4525 ], [ 4568, 4575 ], [ 4631, 4638 ], [ 4757, 4764 ], [ 4882, 4889 ], [ 5176, 5183 ] ], [ [ 918, 924 ], [ 2565, 2571 ], [ 5088, 5094 ], [ 5257, 5263 ], [ 6910, 6916 ] ], [ [ 997, 1004 ], [ 1041, 1048 ], [ 4414, 4421 ], [ 4467, 4474 ], [ 4518, 4525 ], [ 4568, 4575 ], [ 4631, 4638 ], [ 4757, 4764 ], [ 4882, 4889 ], [ 5176, 5183 ] ], [ [ 1011, 1017 ], [ 2565, 2571 ], [ 5088, 5094 ], [ 5257, 5263 ], [ 6910, 6916 ] ], [ [ 1161, 1164 ], [ 1167, 1170 ] ], [ [ 1185, 1189 ], [ 2247, 2251 ], [ 7973, 7977 ], [ 8649, 8653 ], [ 9224, 9228 ] ], [ [ 1207, 1211 ], [ 2769, 2773 ], [ 3134, 3138 ], [ 5107, 5111 ], [ 5342, 5346 ] ], [ [ 1289, 1296 ], [ 3756, 3763 ] ], [ [ 1314, 1322 ], [ 7873, 7881 ] ], [ [ 4392, 4401 ], [ 7800, 7809 ] ], [ [ 4445, 4454 ], [ 6892, 6901 ], [ 7824, 7833 ], [ 7848, 7857 ] ], [ [ 4498, 4505 ], [ 3663, 3670 ] ], [ [ 4546, 4555 ], [ 1384, 1393 ], [ 1414, 1423 ], [ 2187, 2196 ] ], [ [ 4596, 4604 ], [ 8005, 8013 ], [ 8133, 8141 ], [ 8738, 8746 ], [ 8810, 8818 ] ], [ [ 4694, 4705 ], [ 4044, 4055 ] ], [ [ 4796, 4809 ], [ 3608, 3621 ] ], [ [ 5053, 5067 ], [ 3733, 3747 ] ], [ [ 5144, 5163 ] ], [ [ 5207, 5226 ], [ 8445, 8464 ], [ 9019, 9038 ] ], [ [ 5294, 5305 ], [ 6535, 6546 ] ], [ [ 5687, 5723 ], [ 6567, 6603 ] ], [ [ 6461, 6492 ], [ 8497, 8528 ], [ 9088, 9119 ] ], [ [ 6741, 6770 ], [ 8324, 8353 ], [ 8967, 8996 ] ], [ [ 7594, 7618 ] ], [ [ 9249, 9254 ], [ 9790, 9795 ] ] ]
import os import boto3 import fsspec import pytest from moto import mock_s3 from datasets.filesystems import ( COMPRESSION_FILESYSTEMS, HfFileSystem, S3FileSystem, extract_path_from_uri, is_remote_filesystem, ) from .utils import require_lz4, require_zstandard @pytest.fixture(scope="function") def aws_credentials(): """Mocked AWS Credentials for moto.""" os.environ["AWS_ACCESS_KEY_ID"] = "fake_access_key" os.environ["AWS_SECRET_ACCESS_KEY"] = "fake_secret_key" os.environ["AWS_SECURITY_TOKEN"] = "fake_secrurity_token" os.environ["AWS_SESSION_TOKEN"] = "fake_session_token" @pytest.fixture(scope="function") def s3(aws_credentials): with mock_s3(): yield boto3.client("s3", region_name="us-east-1") def test_extract_path_from_uri(s3): mock_bucket = "moto-mock-s3-bucket" # We need to create the bucket since this is all in Moto's 'virtual' AWS account s3.create_bucket(Bucket=mock_bucket) dataset_path = f"s3://{mock_bucket}" dataset_path = extract_path_from_uri(dataset_path) assert dataset_path.startswith("s3://") is False dataset_path = "./local/path" new_dataset_path = extract_path_from_uri(dataset_path) assert dataset_path == new_dataset_path def test_is_remote_filesystem(): fs = S3FileSystem(key="fake_access_key", secret="fake_secret") is_remote = is_remote_filesystem(fs) assert is_remote is True fs = fsspec.filesystem("file") is_remote = is_remote_filesystem(fs) assert is_remote is False @require_zstandard @require_lz4 @pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} input_path = str(input_paths[compression_fs_class.protocol]) fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path) assert isinstance(fs, compression_fs_class) expected_filename = os.path.basename(input_path) expected_filename = expected_filename[: expected_filename.rindex(".")] assert fs.ls("/") == [expected_filename] with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file): repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token) hffs = HfFileSystem(repo_info=repo_info, token=hf_token) assert sorted(hffs.glob("*")) == [".gitattributes", "data.txt"] with open(text_file) as f: assert hffs.open("data.txt", "r").read() == f.read()
[ [ [ 7, 9 ], [ 390, 392 ], [ 446, 448 ], [ 506, 508 ], [ 568, 570 ], [ 2076, 2078 ] ], [ [ 18, 23 ], [ 718, 723 ] ], [ [ 31, 37 ], [ 1440, 1446 ], [ 1940, 1946 ] ], [ [ 45, 51 ], [ 287, 293 ], [ 626, 632 ], [ 1573, 1579 ] ], [ [ 69, 76 ], [ 693, 700 ] ], [ [ 117, 140 ], [ 1621, 1644 ] ], [ [ 146, 158 ], [ 2576, 2588 ] ], [ [ 164, 176 ], [ 1301, 1313 ] ], [ [ 182, 203 ], [ 1029, 1050 ], [ 1176, 1197 ] ], [ [ 209, 229 ], [ 1376, 1396 ], [ 1483, 1503 ] ], [ [ 253, 264 ], [ 1560, 1571 ] ], [ [ 266, 283 ], [ 1541, 1558 ] ], [ [ 324, 339 ] ], [ [ 663, 665 ] ], [ [ 768, 794 ] ], [ [ 1262, 1287 ] ], [ [ 1650, 1678 ] ], [ [ 2396, 2414 ] ] ]
# Copyright 2014 Facebook, Inc. # You are hereby granted a non-exclusive, worldwide, royalty-free license to # use, copy, modify, and distribute this software in source code or binary # form for use in connection with the web services and APIs provided by # Facebook. # As with any software that integrates with the Facebook platform, your use # of this software is subject to the Facebook Developer Principles and # Policies [http://developers.facebook.com/policy/]. This copyright notice # shall be included in all copies or substantial portions of the software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from facebook_business.adobjects.abstractobject import AbstractObject from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject from facebook_business.adobjects.objectparser import ObjectParser from facebook_business.api import FacebookRequest from facebook_business.typechecker import TypeChecker """ This class is auto-generated. For any issues or feature requests related to this class, please let us know on github and we'll fix in our codegen framework. We'll not be able to accept pull request for this class. """ class MessengerDestinationPageWelcomeMessage( AbstractCrudObject, ): def __init__(self, fbid=None, parent_id=None, api=None): self._isMessengerDestinationPageWelcomeMessage = True super(MessengerDestinationPageWelcomeMessage, self).__init__(fbid, parent_id, api) class Field(AbstractObject.Field): id = 'id' page_welcome_message_body = 'page_welcome_message_body' page_welcome_message_type = 'page_welcome_message_type' template_name = 'template_name' time_created = 'time_created' time_last_used = 'time_last_used' def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False): from facebook_business.utils import api_utils if batch is None and (success is not None or failure is not None): api_utils.warning('`success` and `failure` callback only work for batch call.') param_types = { } enums = { } request = FacebookRequest( node_id=self['id'], method='GET', endpoint='/', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=MessengerDestinationPageWelcomeMessage, api_type='NODE', response_parser=ObjectParser(reuse_object=self), ) request.add_params(params) request.add_fields(fields) if batch is not None: request.add_to_batch(batch, success=success, failure=failure) return request elif pending: return request else: self.assure_call() return request.execute() _field_types = { 'id': 'string', 'page_welcome_message_body': 'string', 'page_welcome_message_type': 'string', 'template_name': 'string', 'time_created': 'datetime', 'time_last_used': 'datetime', } @classmethod def _get_field_enum_info(cls): field_enum_info = {} return field_enum_info
[ [ [ 1099, 1113 ], [ 1892, 1906 ] ], [ [ 1173, 1191 ], [ 1637, 1655 ] ], [ [ 1245, 1257 ], [ 2894, 2906 ] ], [ [ 1292, 1307 ], [ 2585, 2600 ] ], [ [ 1350, 1361 ], [ 2739, 2750 ] ], [ [ 1593, 1631 ], [ 1798, 1836 ], [ 2797, 2835 ] ] ]
from base_app.serializers import CustomUserSerializer from rest_framework import serializers from task_app.models import TaskFile class TaskFileCreateSerializer(serializers.ModelSerializer): '''Serializer for creating task files''' author = CustomUserSerializer(read_only=True) class Meta: model = TaskFile fields = '__all__' read_only_fields = ['author', 'task'] class TaskFileDetailsSerializer(serializers.ModelSerializer): '''Serializer for a specified task file This serializer provides detailed information about task file.''' file = serializers.FileField(read_only=True, allow_empty_file=True) author = CustomUserSerializer(read_only=True) class Meta: model = TaskFile exclude = ['task'] read_only_fields = ['file', 'author'] class TaskFileUpdateSerializer(serializers.ModelSerializer): '''Serializer for updating a specified task file. With this serializer task file can be updated only by a task file author. ''' file = serializers.FileField(allow_empty_file=True) author = CustomUserSerializer(read_only=True) class Meta: model = TaskFile fields = '__all__' read_only_fields = ['task', 'author']
[ [ [ 33, 53 ], [ 252, 272 ], [ 668, 688 ], [ 1093, 1113 ] ], [ [ 81, 92 ], [ 163, 174 ], [ 438, 449 ], [ 594, 605 ], [ 853, 864 ], [ 1035, 1046 ] ], [ [ 121, 129 ], [ 322, 330 ], [ 738, 746 ], [ 1163, 1171 ] ], [ [ 138, 162 ] ], [ [ 412, 437 ] ], [ [ 828, 852 ] ] ]