content
stringlengths
0
1.55M
<import_stmt>tensorflow<as>tf<def_stmt>create_batch_dict batch_size tensors_dict<block_start><if_stmt>batch_size<eq>1<block_start>batch={k:tf.expand_dims(t axis=0)<for>k,t list(tensors_dict.items())}<line_sep>summary=<none><block_end><else_stmt><block_start>keys=list(tensors_dict.keys())<line_sep>values=list(tensors_dict.values())<line_sep>values=tf.train.batch(values batch_size num_threads=8 capacity=5<times>batch_size)<line_sep>batch=dict(list(zip(keys values)))<line_sep>summary=tf.get_collection(tf.GraphKeys.SUMMARIES)[-1]<assert_stmt>"fraction_of_"<in>summary.name<block_end><for_stmt>t list(batch.values())<block_start>t.set_shape([batch_size]+[<none>]<times>(t.get_shape().ndims-1))<block_end><return>batch summary<block_end>
# # Copyright (c) 2013-2018 Quarkslab. # This file is part of IRMA project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the top-level directory # of this distribution and at: # # http://www.apache.org/licenses/LICENSE-2.0 # # No part of the project, including this file, may be copied, # modified, propagated, or distributed except according to the # terms contained in the LICENSE file. """Defines common Object Oriented Patterns One should re-use these instead of defining their owns. """<line_sep># ========================== # Singleton Design Pattern # ========================== <class_stmt>SingletonMetaClass(type)<block_start>"""Metaclass for singleton design pattern. .. warning:: This metaclass should not be used directly. To declare a class using the singleton pattern, one should use the :class:`Singleton` class instead. """<line_sep>_instances={}<def_stmt>__call__ mcs *args **kwargs<block_start><if_stmt>mcs<not><in>mcs._instances<block_start>mcs._instances[mcs]=super(SingletonMetaClass mcs).__call__(*args **kwargs)<block_end><return>mcs._instances[mcs]<block_end><block_end># Metaclass compatible with python 2 and 3. Inherit from this for singletons Singleton=SingletonMetaClass('Singleton' (object ) {})<line_sep>"""Base class for singleton This class implements the singleton design pattern. One can inherit from this base class to make a class implement the singleton design pattern. .. code-block:: python # a class implementing a singleton class aParametricSingleton(Singleton): # do some stuff here pass # let us verify that it is really a singleton print(id(aParametricSingleton()) print(id(aParametricSingleton()) """<line_sep># ===================================== # Parametric Singleton Design Pattern # ===================================== <class_stmt>ParametricSingletonMetaClass(type)<block_start>"""Metaclass for parametric singleton design pattern .. warning:: This metaclass should not be used directly. To declare a class using the singleton pattern, one should use the :class:`ParametricSingleton` class instead and precise the parameter used for the dict using a class method named ``depends_on``. """<line_sep>_instances={}<def_stmt>__call__ mcs *args **kwargs# check for "depends_on" attribute <block_start><if_stmt>"depends_on"<not><in>kwargs<and><not>hasattr(mcs "depends_on")<block_start><raise>TypeError("argument or attribute 'depends_on' not defined")<block_end># check for unbound methods <if_stmt>"depends_on"<in>kwargs<and>(<not>kwargs["depends_on"]<or><not>callable(kwargs["depends_on"]))<block_start><raise>TypeError("function in parameter 'depends_on' is not bound")<block_end><elif_stmt>hasattr(mcs "depends_on")<and>(<not>getattr(mcs "depends_on")<or><not>callable(getattr(mcs "depends_on")))<block_start><raise>TypeError("function in attribute 'depends_on' is not bound")<block_end># call depends_on to get the key <if_stmt>"depends_on"<in>kwargs<block_start>key=kwargs["depends_on"](mcs args kwargs)<del_stmt>kwargs["depends_on"]<block_end><else_stmt><block_start>key=getattr(mcs "depends_on")(mcs args kwargs)<block_end># check for instance <if_stmt>mcs<not><in>mcs._instances<block_start>mcs._instances[mcs]={}<block_end><if_stmt>key<not><in>mcs._instances[mcs]<block_start>mcs._instances[mcs][key]=super(ParametricSingletonMetaClass mcs).__call__(*args **kwargs)<block_end><return>mcs._instances[mcs][key]<block_end><def_stmt>update_key mcs old_key new_key<block_start>mcs._instances[mcs][new_key]=mcs._instances[mcs].pop(old_key)<block_end><def_stmt>remove_key mcs key<block_start><if_stmt>key<in>mcs._instances<block_start><del_stmt>mcs._instances[mcs][key]<block_end><block_end><block_end># Metaclass compatible with python 2 and 3. # Inherit from this for parametric singletons ParametricSingleton=ParametricSingletonMetaClass('ParametricSingleton' (object ) {})<line_sep>"""Base class for parametric singletons This class implements the parametric singleton design pattern. One can inherit from this base class to make a class implement a parametric singleton pattern. Pass either an argument ``depends_on`` in the constructor or define a class method called ``depends_on`` that specifies how to compute the parameter value used for the hash table storing the instances: * example with a **static method**: .. code-block:: python class aParametricSingleton(ParametricSingleton): @staticmethod def depends_on(*args, **kwargs): return "my key" * example with a **``lambda`` wrapped with a static method**: .. code-block:: python class aParametricSingleton(ParametricSingleton): depends_on = staticmethod(lambda *args, **kwargs: "my key") """<class_stmt>PluginMetaClass(type)<block_start>"""Metaclass for auto-registering plugin pattern .. warning:: This metaclass should not be used directly. To declare a class using the plugin pattern, one should use the :class:`Plugin` class instead. """<line_sep># =================== # class constructor # =================== <def_stmt>__init__ mcs name bases attrs# small hack to skip Plugin base class when initializing <block_start><if_stmt><not>len(attrs)<block_start><return><block_end># Begin to register all classes that derives from Plugin base class <if_stmt><not>hasattr(mcs '_plugins')# This branch only executes when processing the mount point itself. # So, since this is a new plugin type, not an implementation, this # class shouldn't be registered as a plugin. Instead, it sets up a # list where plugins can be registered later. <block_start>mcs._plugins=[]<block_end><else_stmt># This must be a plugin implementation, which should be registered. # Simply appending it to the list is all that's needed to keep # track of it later. <block_start>mcs._plugins.append(mcs)<block_end><block_end># ================= # Plugin metadata # ================= _plugin_name=<none><line_sep>_plugin_version=<none><line_sep>_plugin_description=<none><line_sep>_plugin_dependencies=<none><line_sep># ===================== # Setters and getters # ===================== @property<def_stmt>plugin_name mcs<block_start><return>mcs._plugin_name<block_end>@property<def_stmt>plugin_version mcs<block_start><return>mcs._plugin_version<block_end>@property<def_stmt>plugin_description mcs<block_start><return>mcs._plugin_description<block_end>@property<def_stmt>plugin_dependencies mcs<block_start><return>mcs._plugin_dependencies<block_end>@property<def_stmt>plugins mcs<block_start><return>mcs._plugins<block_end># ================= # Utility methods # ================= <def_stmt>get_plugins mcs *args **kwargs<block_start>"""return instances of plugins"""<line_sep><return>[plugin(*args **kwargs)<for>plugin mcs._plugins]<block_end><def_stmt>get_plugin mcs name *args **kwargs<block_start>"""return instance of a named plugin"""<line_sep>plugin=[x<for>x mcs._plugins<if>x.plugin_name<eq>name]<line_sep><return>plugin[0]<if>plugin<else><none><block_end><block_end># Metaclass compatible with python 2 and 3. Inherit from this for Plugins Plugin=PluginMetaClass('Plugin' (object ) {})<line_sep>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # General Impala query tests <import_stmt>pytest<import_stmt>re<import_from_stmt>copy deepcopy<import_from_stmt>tests.common.impala_test_suite ImpalaTestSuite<import_from_stmt>tests.common.skip SkipIfEC<import_from_stmt>tests.common.test_dimensions create_uncompressed_text_dimension extend_exec_option_dimension create_beeswax_hs2_dimension hs2_parquet_constraint <import_from_stmt>tests.common.test_vector ImpalaTestVector<class_stmt>TestQueries(ImpalaTestSuite)<block_start>@classmethod<def_stmt>add_test_dimensions cls<block_start>super(TestQueries cls).add_test_dimensions()<if_stmt>cls.exploration_strategy()<eq>'core'<block_start>cls.ImpalaTestMatrix.add_constraint(<lambda>v:v.get_value('table_format').file_format<eq>'parquet')<block_end># Run these queries through both beeswax and HS2 to get coverage of both protocols. # Don't run all combinations of table format and protocol - the dimensions should # be orthogonal. cls.ImpalaTestMatrix.add_dimension(create_beeswax_hs2_dimension())<line_sep>cls.ImpalaTestMatrix.add_constraint(hs2_parquet_constraint)<line_sep># Adding a test dimension here to test the small query opt in exhaustive. <if_stmt>cls.exploration_strategy()<eq>'exhaustive'<block_start>extend_exec_option_dimension(cls "exec_single_node_rows_threshold" "100")<block_end><block_end>@classmethod<def_stmt>get_workload cls<block_start><return>'functional-query'<block_end><def_stmt>test_analytic_fns self vector# TODO: Enable some of these tests for Avro if possible # Don't attempt to evaluate timestamp expressions with Avro tables which don't # support a timestamp type <block_start>table_format=vector.get_value('table_format')<if_stmt>table_format.file_format<eq>'avro'<block_start>pytest.xfail("%s doesn't support TIMESTAMP"%(table_format.file_format))<block_end><if_stmt>table_format.file_format<eq>'hbase'<block_start>pytest.xfail("A lot of queries check for NULLs, which hbase does not recognize")<block_end>self.run_test_case('QueryTest/analytic-fns' vector)<block_end><def_stmt>test_limit self vector<block_start><if_stmt>vector.get_value('table_format').file_format<eq>'hbase'<block_start>pytest.xfail("IMPALA-283 - select count(*) produces inconsistent results")<block_end><if_stmt>vector.get_value('table_format').file_format<eq>'kudu'<block_start>pytest.xfail("Limit queries without order by clauses are non-deterministic")<block_end>self.run_test_case('QueryTest/limit' vector)<block_end><def_stmt>test_top_n self vector<block_start><if_stmt>vector.get_value('table_format').file_format<eq>'hbase'<block_start>pytest.xfail(reason="IMPALA-283 - select count(*) produces inconsistent results")<block_end># QueryTest/top-n is also run in test_sort with disable_outermost_topn = 1 self.run_test_case('QueryTest/top-n' vector)<block_end><def_stmt>test_union self vector<block_start>self.run_test_case('QueryTest/union' vector)<line_sep># IMPALA-3586: The passthrough and materialized children are interleaved. The batch # size is small to test the transition between materialized and passthrough children. query_string=("select count(c) from ( "<concat>"select bigint_col + 1 as c from functional.alltypes limit 15 "<concat>"union all "<concat>"select bigint_col as c from functional.alltypes limit 15 "<concat>"union all "<concat>"select bigint_col + 1 as c from functional.alltypes limit 15 "<concat>"union all "<concat>"(select bigint_col as c from functional.alltypes limit 15)) t")<line_sep>vector.get_value('exec_option')['batch_size']=10<line_sep>result=self.execute_query(query_string vector.get_value('exec_option'))<assert_stmt>result.data[0]<eq>'60'<block_end><def_stmt>test_sort self vector<block_start><if_stmt>vector.get_value('table_format').file_format<eq>'hbase'<block_start>pytest.xfail(reason="IMPALA-283 - select count(*) produces inconsistent results")<block_end>vector.get_value('exec_option')['disable_outermost_topn']=1<line_sep>self.run_test_case('QueryTest/sort' vector)<line_sep># We can get the sort tests for free from the top-n file self.run_test_case('QueryTest/top-n' vector)<block_end><def_stmt>test_inline_view self vector<block_start><if_stmt>vector.get_value('table_format').file_format<eq>'hbase'<block_start>pytest.xfail("jointbl does not have columns with unique values, "<concat>"hbase collapses them")<block_end>self.run_test_case('QueryTest/inline-view' vector)<block_end><def_stmt>test_inline_view_limit self vector<block_start>self.run_test_case('QueryTest/inline-view-limit' vector)<block_end><def_stmt>test_subquery self vector<block_start>self.run_test_case('QueryTest/subquery' vector)<block_end><def_stmt>test_subquery_single_node self vector<block_start>new_vector=deepcopy(vector)<line_sep>new_vector.get_value('exec_option')['num_nodes']=1<line_sep>self.run_test_case('QueryTest/subquery-single-node' new_vector)<block_end><def_stmt>test_alias self vector<block_start>self.run_test_case('QueryTest/alias' vector)<block_end><def_stmt>test_subquery_in_constant_lhs self vector<block_start>self.run_test_case('QueryTest/subquery-in-constant-lhs' vector)<block_end><def_stmt>test_empty self vector<block_start>self.run_test_case('QueryTest/empty' vector)<block_end><def_stmt>test_views self vector<block_start><if_stmt>vector.get_value('table_format').file_format<eq>"hbase"<block_start>pytest.xfail("TODO: Enable views tests for hbase")<block_end>self.run_test_case('QueryTest/views' vector)<block_end><def_stmt>test_with_clause self vector<block_start><if_stmt>vector.get_value('table_format').file_format<eq>"hbase"<block_start>pytest.xfail("TODO: Enable with clause tests for hbase")<block_end>self.run_test_case('QueryTest/with-clause' vector)<block_end><def_stmt>test_misc self vector<block_start>table_format=vector.get_value('table_format')<if_stmt>table_format.file_format<in>['hbase' 'rc' 'parquet' 'kudu']<block_start>msg=("Failing on rc/snap/block despite resolution of IMP-624,IMP-503. "<concat>"Failing on kudu and parquet because tables do not exist")<line_sep>pytest.xfail(msg)<block_end>self.run_test_case('QueryTest/misc' vector)<block_end><def_stmt>test_null_data self vector<block_start><if_stmt>vector.get_value('table_format').file_format<eq>'hbase'<block_start>pytest.xfail("null data does not appear to work in hbase")<block_end>self.run_test_case('QueryTest/null_data' vector)<block_end><block_end># Tests in this class are only run against text/none either because that's the only # format that is supported, or the tests don't exercise the file format. <class_stmt>TestQueriesTextTables(ImpalaTestSuite)<block_start>@classmethod<def_stmt>add_test_dimensions cls<block_start>super(TestQueriesTextTables cls).add_test_dimensions()<line_sep>cls.ImpalaTestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))<block_end>@classmethod<def_stmt>get_workload cls<block_start><return>'functional-query'<block_end><def_stmt>test_overflow self vector<block_start>self.run_test_case('QueryTest/overflow' vector)<block_end><def_stmt>test_strict_mode self vector<block_start>vector.get_value('exec_option')['strict_mode']=1<line_sep>vector.get_value('exec_option')['abort_on_error']=0<line_sep>self.run_test_case('QueryTest/strict-mode' vector)<line_sep>vector.get_value('exec_option')['abort_on_error']=1<line_sep>self.run_test_case('QueryTest/strict-mode-abort' vector)<block_end><def_stmt>test_data_source_tables self vector<block_start>self.run_test_case('QueryTest/data-source-tables' vector)<block_end><def_stmt>test_distinct_estimate self vector# These results will vary slightly depending on how the values get split up # so only run with 1 node and on text. <block_start>vector.get_value('exec_option')['num_nodes']=1<line_sep>self.run_test_case('QueryTest/distinct-estimate' vector)<block_end>@SkipIfEC.oom<def_stmt>test_random self vector# These results will vary slightly depending on how the values get split up # so only run with 1 node and on text. <block_start>vector.get_value('exec_option')['num_nodes']=1<line_sep>self.run_test_case('QueryTest/random' vector)<block_end>@SkipIfEC.oom<def_stmt>test_values self vector<block_start>self.run_test_case('QueryTest/values' vector)<block_end><block_end># Tests in this class are only run against Parquet because the tests don't exercise the # file format. <class_stmt>TestQueriesParquetTables(ImpalaTestSuite)<block_start>@classmethod<def_stmt>add_test_dimensions cls<block_start>super(TestQueriesParquetTables cls).add_test_dimensions()<line_sep>cls.ImpalaTestMatrix.add_constraint(<lambda>v:v.get_value('table_format').file_format<eq>'parquet')<block_end>@classmethod<def_stmt>get_workload cls<block_start><return>'functional-query'<block_end>@[email protected]_serially<def_stmt>test_very_large_strings self vector<block_start>"""Regression test for IMPALA-1619. Doesn't need to be run on all file formats. Executes serially to avoid large random spikes in mem usage."""<line_sep>self.run_test_case('QueryTest/large_strings' vector)<block_end><def_stmt>test_single_node_large_sorts self vector<block_start><if_stmt>self.exploration_strategy()<ne>'exhaustive'<block_start>pytest.skip("only run large sorts on exhaustive")<block_end>vector.get_value('exec_option')['disable_outermost_topn']=1<line_sep>vector.get_value('exec_option')['num_nodes']=1<line_sep>self.run_test_case('QueryTest/single-node-large-sorts' vector)<block_end><block_end># Tests for queries in HDFS-specific tables, e.g. AllTypesAggMultiFilesNoPart. <class_stmt>TestHdfsQueries(ImpalaTestSuite)<block_start>@classmethod<def_stmt>add_test_dimensions cls<block_start>super(TestHdfsQueries cls).add_test_dimensions()<line_sep># Kudu doesn't support AllTypesAggMultiFilesNoPart (KUDU-1271, KUDU-1570). cls.ImpalaTestMatrix.add_constraint(<lambda>v:v.get_value('table_format').file_format<ne>'kudu')<line_sep># Adding a test dimension here to test the small query opt in exhaustive. <if_stmt>cls.exploration_strategy()<eq>'exhaustive'<block_start>extend_exec_option_dimension(cls "exec_single_node_rows_threshold" "100")<block_end><block_end>@classmethod<def_stmt>get_workload cls<block_start><return>'functional-query'<block_end>@SkipIfEC.oom<def_stmt>test_hdfs_scan_node self vector<block_start>self.run_test_case('QueryTest/hdfs-scan-node' vector)<block_end><def_stmt>test_file_partitions self vector<block_start>self.run_test_case('QueryTest/hdfs-partitions' vector)<block_end><block_end><class_stmt>TestTopNReclaimQuery(ImpalaTestSuite)<block_start>"""Test class to validate that TopN periodically reclaims tuple pool memory and runs with a lower memory footprint."""<line_sep>QUERY="select * from tpch.lineitem order by l_orderkey desc limit 10;"<line_sep># Mem limit empirically selected so that the query fails if tuple pool reclamation # is not implemented for TopN MEM_LIMIT="60m"<line_sep>@classmethod<def_stmt>get_workload self<block_start><return>'tpch'<block_end>@classmethod<def_stmt>add_test_dimensions cls<block_start>super(TestTopNReclaimQuery cls).add_test_dimensions()<line_sep># The tpch tests take a long time to execute so restrict the combinations they # execute over. cls.ImpalaTestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload()))<block_end><def_stmt>test_top_n_reclaim self vector<block_start>exec_options=vector.get_value('exec_option')<line_sep>exec_options['mem_limit']=self.MEM_LIMIT<line_sep>exec_options['num_scanner_threads']=1<line_sep>result=self.execute_query(self.QUERY exec_options)<line_sep>runtime_profile=str(result.runtime_profile)<line_sep>num_of_times_tuple_pool_reclaimed=re.findall('TuplePoolReclamations: ([0-9]*)' runtime_profile)<line_sep># Confirm newly added counter is visible <assert_stmt>len(num_of_times_tuple_pool_reclaimed)<g>0<line_sep># Tuple pool is expected to be reclaimed for this query <for_stmt>n num_of_times_tuple_pool_reclaimed<block_start><assert_stmt>int(n)<g>0<block_end><block_end><block_end>
# Copyright (C) 2013 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # pylint: disable=relative-import """Functions for type handling and type conversion (Blink/C++ <-> V8/JS). Extends IdlType and IdlUnionType with V8-specific properties, methods, and class methods. Spec: http://www.w3.org/TR/WebIDL/#es-type-mapping Design doc: http://www.chromium.org/developers/design-documents/idl-compiler """<import_stmt>posixpath<import_from_stmt>idl_types IdlAnnotatedType<import_from_stmt>idl_types IdlArrayOrSequenceType<import_from_stmt>idl_types IdlNullableType<import_from_stmt>idl_types IdlRecordType<import_from_stmt>idl_types IdlType<import_from_stmt>idl_types IdlTypeBase<import_from_stmt>idl_types IdlUnionType<import_from_stmt>utilities to_snake_case<import_stmt>v8_attributes# for IdlType.constructor_type_name <import_from_stmt>v8_globals includes<import_from_stmt>v8_utilities binding_header_filename extended_attribute_value_contains<line_sep>################################################################################ # V8-specific handling of IDL types ################################################################################ NON_WRAPPER_TYPES=frozenset(['EventHandler' 'NodeFilter' 'OnBeforeUnloadEventHandler' 'OnErrorEventHandler' ])<line_sep>TYPED_ARRAY_TYPES=frozenset(['Float32Array' 'Float64Array' 'Int8Array' 'Int16Array' 'Int32Array' 'Uint8Array' 'Uint8ClampedArray' 'Uint16Array' 'Uint32Array' 'BigInt64Array' 'BigUint64Array' ])<line_sep>ARRAY_BUFFER_VIEW_AND_TYPED_ARRAY_TYPES=TYPED_ARRAY_TYPES.union(frozenset(['ArrayBufferView']))<line_sep>ARRAY_BUFFER_AND_VIEW_TYPES=TYPED_ARRAY_TYPES.union(frozenset(['ArrayBuffer' 'ArrayBufferView' 'DataView' 'SharedArrayBuffer' ]))<line_sep># We have an unfortunate hack that treats types whose name ends with # 'Constructor' as aliases to IDL interface object. This list is used to disable # the hack. _CALLBACK_CONSTRUCTORS=frozenset(('AnimatorConstructor' 'BlinkAudioWorkletProcessorConstructor' 'CustomElementConstructor' 'NoArgumentConstructor' ))<line_sep>IdlType.is_array_buffer_or_view=property(<lambda>self:self.base_type<in>ARRAY_BUFFER_AND_VIEW_TYPES)<line_sep>IdlType.is_array_buffer_view_or_typed_array=property(<lambda>self:self.base_type<in>ARRAY_BUFFER_VIEW_AND_TYPED_ARRAY_TYPES)<line_sep>IdlType.is_typed_array=property(<lambda>self:self.base_type<in>TYPED_ARRAY_TYPES)<line_sep>IdlType.is_wrapper_type=property(<lambda>self:(self.is_interface_type<and><not>self.is_callback_interface<and>self.base_type<not><in>NON_WRAPPER_TYPES))<line_sep>################################################################################ # C++ types ################################################################################ CPP_TYPE_SAME_AS_IDL_TYPE=set(['double' 'float' ])<line_sep>CPP_INTEGER_CONVERSION_RULES={'byte':'int8_t' 'octet':'uint8_t' 'short':'int16_t' 'unsigned short':'uint16_t' 'long':'int32_t' 'unsigned long':'uint32_t' 'long long':'int64_t' 'unsigned long long':'uint64_t' }<line_sep>CPP_SPECIAL_CONVERSION_RULES={'EventHandler':'EventListener*' 'OnBeforeUnloadEventHandler':'EventListener*' 'OnErrorEventHandler':'EventListener*' 'Promise':'ScriptPromise' 'ScriptValue':'ScriptValue' # FIXME: Eliminate custom bindings for XPathNSResolver http://crbug.com/345529 'XPathNSResolver':'XPathNSResolver*' 'boolean':'bool' 'object':'ScriptValue' 'unrestricted double':'double' 'unrestricted float':'float' }<def_stmt>string_resource_mode idl_type<block_start>"""Returns a V8StringResourceMode value corresponding to the IDL type. Args: idl_type: A string IdlType. """<if_stmt>idl_type.is_nullable<block_start><return>'kTreatNullAndUndefinedAsNullString'<block_end><if_stmt>idl_type.is_annotated_type<block_start>treat_null_as=idl_type.extended_attributes.get('TreatNullAs')<if_stmt>treat_null_as<eq>'EmptyString'<block_start><return>'kTreatNullAsEmptyString'<block_end><elif_stmt>treat_null_as<block_start><raise>ValueError('Unknown value for [TreatNullAs]: %s'%treat_null_as)<block_end><block_end><return>''<block_end><def_stmt>cpp_type idl_type extended_attributes=<none> raw_type=<false> used_as_rvalue_type=<false> used_as_variadic_argument=<false> used_in_cpp_sequence=<false><block_start>"""Returns C++ type corresponding to IDL type. |idl_type| argument is of type IdlType, while return value is a string Args: idl_type: IdlType raw_type: bool, True if idl_type's raw/primitive C++ type should be returned. used_as_rvalue_type: bool, True if the C++ type is used as an argument or the return type of a method. used_as_variadic_argument: bool, True if the C++ type is used as a variadic argument of a method. used_in_cpp_sequence: bool, True if the C++ type is used as an element of a container. Containers can be an array, a sequence, a dictionary or a record. """<line_sep>extended_attributes=extended_attributes<or>{}<line_sep>idl_type=idl_type.preprocessed_type<line_sep># Nullable types <def_stmt>needs_optional_wrapper <block_start><if_stmt><not>idl_type.is_nullable<or><not>used_in_cpp_sequence<block_start><return><false><block_end># NativeValueTraits<T>::NullValue should exist in order to provide the # implicit null value, if needed. <return><not>idl_type.inner_type.cpp_type_has_null_value<block_end><if_stmt>needs_optional_wrapper()<block_start>inner_type=idl_type.inner_type<if_stmt>inner_type.is_dictionary<or>inner_type.is_sequence<or>inner_type.is_record_type# TODO(jbroman, bashi): Implement this if needed. # This is non-trivial to support because HeapVector refuses to hold # base::Optional<>, and IDLDictionaryBase (and subclasses) have no # integrated null state that can be distinguished from a present but # empty dictionary. It's unclear whether this will ever come up in # real spec WebIDL. <block_start><raise>NotImplementedError('Sequences of nullable dictionary, sequence or record types are not yet supported.')<block_end><return>'base::Optional<%s>'%inner_type.cpp_type_args(extended_attributes raw_type used_as_rvalue_type used_as_variadic_argument used_in_cpp_sequence)<block_end># Array or sequence types <if_stmt>used_as_variadic_argument<block_start>native_array_element_type=idl_type<block_end><else_stmt><block_start>native_array_element_type=idl_type.native_array_element_type<block_end><if_stmt>native_array_element_type<block_start>vector_type=cpp_ptr_type('Vector' 'HeapVector' native_array_element_type.is_traceable)<line_sep>vector_template_type=cpp_template_type(vector_type native_array_element_type.cpp_type_args(used_in_cpp_sequence=<true>))<if_stmt>used_as_rvalue_type<block_start><return>'const %s&'%vector_template_type<block_end><return>vector_template_type<block_end># Record types. <if_stmt>idl_type.is_record_type<block_start>vector_type=cpp_ptr_type('Vector' 'HeapVector' idl_type.value_type.is_traceable)<line_sep>value_type=idl_type.value_type.cpp_type_args(used_in_cpp_sequence=<true>)<line_sep>vector_template_type=cpp_template_type(vector_type 'std::pair<String, %s>'%value_type)<if_stmt>used_as_rvalue_type<block_start><return>'const %s&'%vector_template_type<block_end><return>vector_template_type<block_end># Simple types base_idl_type=idl_type.base_type<if_stmt>base_idl_type<in>CPP_TYPE_SAME_AS_IDL_TYPE<block_start><return>base_idl_type<block_end><if_stmt>base_idl_type<in>CPP_INTEGER_CONVERSION_RULES<block_start><return>CPP_INTEGER_CONVERSION_RULES[base_idl_type]<block_end><if_stmt>base_idl_type<in>CPP_SPECIAL_CONVERSION_RULES<block_start><return>CPP_SPECIAL_CONVERSION_RULES[base_idl_type]<block_end><if_stmt>idl_type.is_string_type<block_start><if_stmt>idl_type.has_string_context<block_start><return>'String'<block_end><if_stmt><not>raw_type<block_start><return>'const String&'<if>used_as_rvalue_type<else>'String'<block_end><return>'V8StringResource<%s>'%string_resource_mode(idl_type)<block_end><if_stmt>base_idl_type<eq>'ArrayBufferView'<and>'FlexibleArrayBufferView'<in>extended_attributes<block_start><return>'FlexibleArrayBufferView'<block_end><if_stmt>base_idl_type<in>TYPED_ARRAY_TYPES<and>'FlexibleArrayBufferView'<in>extended_attributes<block_start><return>'Flexible'+base_idl_type<block_end><if_stmt>base_idl_type<in>ARRAY_BUFFER_VIEW_AND_TYPED_ARRAY_TYPES<or>base_idl_type<eq>'DataView'<block_start><if_stmt>'AllowShared'<in>extended_attributes<block_start><return>cpp_template_type('MaybeShared' idl_type.implemented_as)<block_end><else_stmt><block_start><return>cpp_template_type('NotShared' idl_type.implemented_as)<block_end><block_end><if_stmt>idl_type.is_interface_type<or>idl_type.is_dictionary<block_start>implemented_as_class=idl_type.implemented_as<if_stmt>raw_type<or><not>used_in_cpp_sequence<block_start><return>implemented_as_class+'*'<block_end><if_stmt><not>used_in_cpp_sequence<block_start><return>implemented_as_class+'*'<block_end><if_stmt>used_as_rvalue_type<and>idl_type.is_garbage_collected<block_start><return>'const %s*'%implemented_as_class<block_end><return>cpp_template_type('Member' implemented_as_class)<block_end><if_stmt>idl_type.is_union_type# Avoid "AOrNullOrB" for cpp type of (A? or B) because we generate # V8AOrBOrNull to handle nulle for (A? or B), (A or B?) and (A or B)? <block_start><def_stmt>member_cpp_name idl_type<block_start><if_stmt>idl_type.is_nullable<block_start><return>idl_type.inner_type.name<block_end><return>idl_type.name<block_end>idl_type_name='Or'.join(member_cpp_name(member)<for>member idl_type.member_types)<line_sep><return>'const %s&'%idl_type_name<if>used_as_rvalue_type<else>idl_type_name<block_end><if_stmt>idl_type.is_callback_function<block_start>v8_type_name='V8'+base_idl_type<if_stmt>idl_type.is_custom_callback_function<block_start><return>v8_type_name<block_end><if_stmt><not>used_in_cpp_sequence<block_start><return>v8_type_name+'*'<block_end><return>cpp_template_type('Member' v8_type_name)<block_end><if_stmt>base_idl_type<eq>'void'<block_start><return>base_idl_type<block_end># Default, assume native type is a pointer with same type name as idl type <return>base_idl_type+'*'<block_end><def_stmt>cpp_type_initializer idl_type<block_start>"""Returns a string containing a C++ initialization statement for the corresponding type. |idl_type| argument is of type IdlType. """<line_sep>base_idl_type=idl_type.base_type<if_stmt>idl_type.native_array_element_type<block_start><return>''<block_end><if_stmt>idl_type.is_explicit_nullable<block_start><return>''<block_end><if_stmt>idl_type.is_numeric_type<block_start><return>' = 0'<block_end><if_stmt>base_idl_type<eq>'boolean'<block_start><return>' = false'<block_end><if_stmt>(base_idl_type<in>NON_WRAPPER_TYPES<or>base_idl_type<in>CPP_SPECIAL_CONVERSION_RULES<or>base_idl_type<eq>'any'<or>idl_type.is_string_type<or>idl_type.is_enum)<block_start><return>''<block_end><return>' = nullptr'<block_end># Allow access as idl_type.cpp_type if no arguments IdlTypeBase.cpp_type=property(cpp_type)<line_sep>IdlTypeBase.cpp_type_initializer=property(cpp_type_initializer)<line_sep>IdlTypeBase.cpp_type_args=cpp_type<line_sep>IdlUnionType.cpp_type_initializer=''<line_sep>IdlArrayOrSequenceType.native_array_element_type=property(<lambda>self:self.element_type)<def_stmt>cpp_template_type template inner_type<block_start>"""Returns C++ template specialized to type."""<line_sep>format_string='{template}<{inner_type}>'<line_sep><return>format_string.format(template=template inner_type=inner_type)<block_end><def_stmt>cpp_ptr_type old_type new_type is_gc_type<block_start><if_stmt>is_gc_type<block_start><return>new_type<block_end><return>old_type<block_end><def_stmt>v8_type interface_name<block_start><return>'V8'+interface_name<block_end># [ImplementedAs] # This handles [ImplementedAs] on interface types, not [ImplementedAs] in the # interface being generated. e.g., given: # Foo.idl: interface Foo {attribute Bar bar}; # Bar.idl: [ImplementedAs=Zork] interface Bar {}; # when generating bindings for Foo, the [ImplementedAs] on Bar is needed. # This data is external to Foo.idl, and hence computed as global information in # compute_interfaces_info.py to avoid having to parse IDLs of all used interfaces. IdlType.implemented_as_interfaces={}<def_stmt>implemented_as idl_type<block_start>base_idl_type=idl_type.base_type<if_stmt>base_idl_type<in>IdlType.implemented_as_interfaces<block_start><return>IdlType.implemented_as_interfaces[base_idl_type]<block_end><elif_stmt>idl_type.is_callback_function<or>idl_type.is_callback_interface<block_start><return>'V8%s'%base_idl_type<block_end><return>base_idl_type<block_end>IdlType.implemented_as=property(implemented_as)<line_sep>IdlType.set_implemented_as_interfaces=classmethod(<lambda>cls new_implemented_as_interfaces:cls.implemented_as_interfaces.update(new_implemented_as_interfaces))<line_sep># [GarbageCollected] IdlType.garbage_collected_types=set()<line_sep>IdlType.is_garbage_collected=property(<lambda>self:self.base_type<in>IdlType.garbage_collected_types)<line_sep>IdlType.set_garbage_collected_types=classmethod(<lambda>cls new_garbage_collected_types:cls.garbage_collected_types.update(new_garbage_collected_types))<def_stmt>is_gc_type idl_type<block_start><return>idl_type.is_garbage_collected<or>idl_type.is_union_type<block_end>IdlTypeBase.is_gc_type=property(is_gc_type)<def_stmt>is_traceable idl_type<block_start><return>(idl_type.is_garbage_collected<or>idl_type.is_callback_function<or>idl_type.cpp_type<in>('ScriptValue' 'ScriptPromise'))<block_end>IdlTypeBase.is_traceable=property(is_traceable)<line_sep>IdlUnionType.is_traceable=property(<lambda>self:<true>)<line_sep>IdlArrayOrSequenceType.is_traceable=property(<lambda>self:self.element_type.is_traceable)<line_sep>IdlRecordType.is_traceable=property(<lambda>self:self.value_type.is_traceable)<line_sep>IdlNullableType.is_traceable=property(<lambda>self:self.inner_type.is_traceable)<line_sep>################################################################################ # Includes ################################################################################ INCLUDES_FOR_TYPE={'object':set(['bindings/core/v8/script_value.h' 'bindings/core/v8/idl_types.h' 'bindings/core/v8/native_value_traits_impl.h']) 'ArrayBufferView':set(['bindings/core/v8/v8_array_buffer_view.h' 'core/typed_arrays/array_buffer_view_helpers.h' 'core/typed_arrays/flexible_array_buffer_view.h']) 'EventHandler':set(['bindings/core/v8/js_event_handler.h']) 'HTMLCollection':set(['bindings/core/v8/v8_html_collection.h' 'core/dom/class_collection.h' 'core/dom/tag_collection.h' 'core/html/html_collection.h' 'core/html/html_table_rows_collection.h' 'core/html/forms/html_data_list_options_collection.h' 'core/html/forms/html_form_controls_collection.h']) 'NodeList':set(['bindings/core/v8/v8_node_list.h' 'core/dom/name_node_list.h' 'core/dom/node_list.h' 'core/dom/static_node_list.h' 'core/html/forms/labels_node_list.h']) 'Promise':set(['bindings/core/v8/script_promise.h']) 'ScriptValue':set(['bindings/core/v8/script_value.h']) }<def_stmt>includes_for_type idl_type extended_attributes=<none><block_start>idl_type=idl_type.preprocessed_type<line_sep>extended_attributes=extended_attributes<or>{}<line_sep># Simple types base_idl_type=idl_type.base_type<if_stmt>base_idl_type<in>INCLUDES_FOR_TYPE<block_start><return>INCLUDES_FOR_TYPE[base_idl_type]<block_end><if_stmt>base_idl_type<in>TYPED_ARRAY_TYPES<block_start><return>INCLUDES_FOR_TYPE['ArrayBufferView'].union(set(['bindings/%s/v8/%s'%(component_dir[base_idl_type] binding_header_filename(base_idl_type))]))<block_end><if_stmt>idl_type.is_basic_type<block_start><return>set(['bindings/core/v8/idl_types.h' 'bindings/core/v8/native_value_traits_impl.h'])<block_end><if_stmt>base_idl_type.endswith('ConstructorConstructor')# FIXME: rename to NamedConstructor # FIXME: replace with a [NamedConstructorAttribute] extended attribute # Ending with 'ConstructorConstructor' indicates a named constructor, # and these do not have header files, as they are part of the generated # bindings for the interface <block_start><return>set()<block_end><if_stmt>(base_idl_type.endswith('Constructor')<and>base_idl_type<not><in>_CALLBACK_CONSTRUCTORS)# FIXME: replace with a [ConstructorAttribute] extended attribute <block_start>base_idl_type=idl_type.constructor_type_name<block_end><if_stmt>idl_type.is_custom_callback_function<block_start><return>set()<block_end><if_stmt>idl_type.is_callback_function<block_start>component=IdlType.callback_functions[base_idl_type]['component_dir']<line_sep><return>set(['bindings/%s/v8/%s'%(component binding_header_filename(base_idl_type))])<block_end><if_stmt>base_idl_type<not><in>component_dir<block_start><return>set()<block_end><return>set(['bindings/%s/v8/%s'%(component_dir[base_idl_type] binding_header_filename(base_idl_type))])<block_end>IdlType.includes_for_type=includes_for_type<def_stmt>includes_for_union_type idl_type extended_attributes=<none><block_start><return>set.union(*[member_type.includes_for_type(extended_attributes)<for>member_type idl_type.member_types])<block_end>IdlUnionType.includes_for_type=includes_for_union_type<def_stmt>includes_for_array_or_sequence_type idl_type extended_attributes=<none><block_start><return>set.union(set(['bindings/core/v8/idl_types.h' 'bindings/core/v8/native_value_traits_impl.h']) idl_type.element_type.includes_for_type(extended_attributes))<block_end>IdlArrayOrSequenceType.includes_for_type=includes_for_array_or_sequence_type<def_stmt>includes_for_record_type idl_type extended_attributes=<none><block_start><return>set.union(idl_type.key_type.includes_for_type(extended_attributes) idl_type.value_type.includes_for_type(extended_attributes))<block_end>IdlRecordType.includes_for_type=includes_for_record_type<def_stmt>add_includes_for_type idl_type extended_attributes=<none><block_start>includes.update(idl_type.includes_for_type(extended_attributes))<block_end>IdlTypeBase.add_includes_for_type=add_includes_for_type<def_stmt>includes_for_interface interface_name<block_start><return>IdlType(interface_name).includes_for_type()<block_end><def_stmt>add_includes_for_interface interface_name<block_start>includes.update(includes_for_interface(interface_name))<block_end><def_stmt>impl_includes_for_type idl_type interfaces_info<block_start>includes_for_type=set()<line_sep>idl_type=idl_type.preprocessed_type<line_sep>native_array_element_type=idl_type.native_array_element_type<if_stmt>native_array_element_type<block_start>includes_for_type.update(impl_includes_for_type(native_array_element_type interfaces_info))<line_sep>includes_for_type.add('platform/wtf/vector.h')<block_end>base_idl_type=idl_type.base_type<if_stmt>idl_type.is_string_type<block_start>includes_for_type.add('platform/wtf/text/wtf_string.h')<block_end><if_stmt>idl_type.is_record_type<block_start>includes_for_type.update(impl_includes_for_type(idl_type.key_type interfaces_info))<line_sep>includes_for_type.update(impl_includes_for_type(idl_type.value_type interfaces_info))<block_end><if_stmt>idl_type.is_callback_function<block_start>component=IdlType.callback_functions[base_idl_type]['component_dir']<line_sep><return>set(['bindings/%s/v8/%s'%(component binding_header_filename(base_idl_type))])<block_end><if_stmt>base_idl_type<in>interfaces_info<block_start>interface_info=interfaces_info[base_idl_type]<line_sep>includes_for_type.add(interface_info['include_path'])<block_end><if_stmt>base_idl_type<in>INCLUDES_FOR_TYPE<block_start>includes_for_type.update(INCLUDES_FOR_TYPE[base_idl_type])<block_end><if_stmt>idl_type.is_array_buffer_view_or_typed_array<block_start><return>set(['core/typed_arrays/dom_typed_array.h' 'core/typed_arrays/array_buffer_view_helpers.h'])<block_end><return>includes_for_type<block_end><def_stmt>impl_includes_for_type_union idl_type interfaces_info<block_start>includes_for_type=set()<for_stmt>member_type idl_type.member_types<block_start>includes_for_type.update(member_type.impl_includes_for_type(interfaces_info))<block_end><return>includes_for_type<block_end>IdlTypeBase.impl_includes_for_type=impl_includes_for_type<line_sep>IdlUnionType.impl_includes_for_type=impl_includes_for_type_union<def_stmt>impl_forward_declaration_name idl_type<block_start>element_type=idl_type.native_array_element_type<if_stmt>element_type<block_start><return>element_type.impl_forward_declaration_name<block_end><if_stmt>((idl_type.is_wrapper_type<and><not>idl_type.is_array_buffer_view_or_typed_array)<or>idl_type.is_dictionary)<block_start><return>idl_type.implemented_as<block_end><return><none><block_end>IdlTypeBase.impl_forward_declaration_name=property(impl_forward_declaration_name)<line_sep>component_dir={}<def_stmt>set_component_dirs new_component_dirs<block_start>component_dir.update(new_component_dirs)<block_end>################################################################################ # V8 -> C++ ################################################################################ # TODO(rakuco): Get rid of this definition altogether and move to NativeValueTraits<T>::nativeValue(). # That requires not requiring ExceptionState where it is not used, and we must be careful not # to introduce any performance regressions. V8_VALUE_TO_CPP_VALUE={# Basic 'DOMString':'{v8_value}' # Interface types 'FlexibleArrayBufferView':'ToFlexibleArrayBufferView({isolate}, {v8_value}, {variable_name})' 'Promise':'ScriptPromise::Cast(ScriptState::Current({isolate}), {v8_value})' 'ScriptValue':'ScriptValue({isolate}, {v8_value})' 'Window':'ToDOMWindow({isolate}, {v8_value})' 'XPathNSResolver':'ToXPathNSResolver(ScriptState::Current({isolate}), {v8_value})' }<def_stmt>v8_conversion_needs_exception_state idl_type<block_start><return>(idl_type.is_numeric_type<or>idl_type.is_enum<or>idl_type.is_dictionary<or>idl_type.is_array_buffer_view_or_typed_array<or>idl_type.has_string_context<or>idl_type.name<in>('Boolean' 'ByteString' 'Object' 'USVString'))<block_end>IdlType.v8_conversion_needs_exception_state=property(v8_conversion_needs_exception_state)<line_sep>IdlAnnotatedType.v8_conversion_needs_exception_state=property(v8_conversion_needs_exception_state)<line_sep>IdlArrayOrSequenceType.v8_conversion_needs_exception_state=<true><line_sep>IdlRecordType.v8_conversion_needs_exception_state=<true><line_sep>IdlUnionType.v8_conversion_needs_exception_state=<true><line_sep>TRIVIAL_CONVERSIONS=frozenset(['any' 'boolean' 'NodeFilter' 'XPathNSResolver' 'Promise'])<def_stmt>v8_conversion_is_trivial idl_type# The conversion is a simple expression that returns the converted value and # cannot raise an exception. <block_start><return>(idl_type.base_type<in>TRIVIAL_CONVERSIONS<or>idl_type.is_wrapper_type)<block_end>IdlType.v8_conversion_is_trivial=property(v8_conversion_is_trivial)<def_stmt>native_value_traits_type_name idl_type extended_attributes in_sequence_or_record=<false><block_start>idl_type=idl_type.preprocessed_type<if_stmt>idl_type.is_string_type# Strings are handled separately because null and/or undefined are # processed by V8StringResource due to the [TreatNullAs] extended # attribute and nullable string types. <block_start>name='IDL%s'%idl_type.name<block_end><elif_stmt>idl_type.is_nullable<block_start>inner_type=idl_type.inner_type<line_sep>inner_type_nvt_type=native_value_traits_type_name(inner_type extended_attributes)<line_sep># The IDL compiler has special cases to handle some nullable types in operation # parameters, dictionary fields, etc. <if_stmt>in_sequence_or_record<or>inner_type.name<eq>'Object'<block_start>name='IDLNullable<%s>'%inner_type_nvt_type<block_end><else_stmt><block_start>name=inner_type_nvt_type<block_end><block_end><elif_stmt>idl_type.native_array_element_type<block_start>name='IDLSequence<%s>'%native_value_traits_type_name(idl_type.native_array_element_type extended_attributes <true>)<block_end><elif_stmt>idl_type.is_record_type<block_start>name='IDLRecord<%s, %s>'%(native_value_traits_type_name(idl_type.key_type extended_attributes) native_value_traits_type_name(idl_type.value_type extended_attributes <true>))<block_end><elif_stmt>idl_type.is_basic_type<or>idl_type.name<in>['Object' 'Promise']<block_start>name='IDL%s'%idl_type.name<block_end><elif_stmt>idl_type.implemented_as<is><not><none><block_start>name=idl_type.implemented_as<block_end><else_stmt><block_start>name=idl_type.name<block_end><return>name<block_end><def_stmt>v8_value_to_cpp_value idl_type extended_attributes v8_value variable_name isolate for_constructor_callback<block_start><if_stmt>idl_type.name<eq>'void'<block_start><return>''<block_end># Simple types idl_type=idl_type.preprocessed_type<line_sep>base_idl_type=idl_type.as_union_type.name<if>idl_type.is_union_type<else>idl_type.base_type<if_stmt>'FlexibleArrayBufferView'<in>extended_attributes<block_start><if_stmt>base_idl_type<not><in>ARRAY_BUFFER_VIEW_AND_TYPED_ARRAY_TYPES<block_start><raise>ValueError('Unrecognized base type for extended attribute "FlexibleArrayBufferView": %s'%(idl_type.base_type))<block_end><if_stmt>'AllowShared'<not><in>extended_attributes<block_start><raise>ValueError('"FlexibleArrayBufferView" extended attribute requires "AllowShared" on %s'%(idl_type.base_type))<block_end>base_idl_type='FlexibleArrayBufferView'<block_end><if_stmt>'AllowShared'<in>extended_attributes<and><not>idl_type.is_array_buffer_view_or_typed_array<block_start><raise>ValueError('Unrecognized base type for extended attribute "AllowShared": %s'%(idl_type.base_type))<block_end><if_stmt>idl_type.is_integer_type<block_start>arguments=', '.join([v8_value 'exception_state'])<block_end><elif_stmt>idl_type.v8_conversion_needs_exception_state<block_start>arguments=', '.join([v8_value 'exception_state'])<block_end><else_stmt><block_start>arguments=v8_value<block_end><if_stmt>idl_type.has_string_context<block_start>execution_context='bindings::ExecutionContextFromV8Wrappable(impl)'<if_stmt>for_constructor_callback<block_start>execution_context='CurrentExecutionContext(info.GetIsolate())'<block_end>cpp_expression_format='NativeValueTraits<IDL%s>::NativeValue(%s, %s, exception_state, %s)'%(idl_type.name isolate v8_value execution_context)<block_end><elif_stmt>base_idl_type<in>V8_VALUE_TO_CPP_VALUE<block_start>cpp_expression_format=V8_VALUE_TO_CPP_VALUE[base_idl_type]<block_end><elif_stmt>idl_type.name<eq>'ArrayBuffer'<block_start>cpp_expression_format=('{v8_value}->Is{idl_type}() ? '<concat>'V8{idl_type}::ToImpl(v8::Local<v8::{idl_type}>::Cast({v8_value})) : 0')<block_end><elif_stmt>idl_type.is_array_buffer_view_or_typed_array<or>base_idl_type<eq>'DataView'<block_start>this_cpp_type=idl_type.cpp_type_args(extended_attributes=extended_attributes)<if_stmt>'AllowShared'<in>extended_attributes<block_start>cpp_expression_format=('ToMaybeShared<%s>({isolate}, {v8_value}, exception_state)'%this_cpp_type)<block_end><else_stmt><block_start>cpp_expression_format=('ToNotShared<%s>({isolate}, {v8_value}, exception_state)'%this_cpp_type)<block_end><block_end><elif_stmt>idl_type.is_union_type<block_start>nullable='UnionTypeConversionMode::kNullable'<if>idl_type.includes_nullable_type<else>'UnionTypeConversionMode::kNotNullable'<line_sep># We need to consider the moving of the null through the union in order # to generate the correct V8* class name. this_cpp_type=idl_type.cpp_type_args(extended_attributes=extended_attributes)<line_sep>cpp_expression_format='%s::ToImpl({isolate}, {v8_value}, {variable_name}, %s, exception_state)'%(v8_type(this_cpp_type) nullable)<block_end><elif_stmt>idl_type.use_output_parameter_for_result<block_start>cpp_expression_format='V8{idl_type}::ToImpl({isolate}, {v8_value}, {variable_name}, exception_state)'<block_end><elif_stmt>idl_type.is_callback_function<block_start>cpp_expression_format='V8{idl_type}::Create({v8_value}.As<v8::Function>())'<block_end><elif_stmt>idl_type.v8_conversion_needs_exception_state# Effectively, this if branch means everything with v8_conversion_needs_exception_state == True # except for unions and dictionary interfaces. <block_start>base_idl_type=native_value_traits_type_name(idl_type extended_attributes)<line_sep>cpp_expression_format=('NativeValueTraits<{idl_type}>::NativeValue({isolate}, {arguments})')<block_end><else_stmt><block_start>cpp_expression_format=('V8{idl_type}::ToImplWithTypeCheck({isolate}, {v8_value})')<block_end><return>cpp_expression_format.format(arguments=arguments idl_type=base_idl_type v8_value=v8_value variable_name=variable_name isolate=isolate)<block_end># FIXME: this function should be refactored, as this takes too many flags. <def_stmt>v8_value_to_local_cpp_value idl_type extended_attributes v8_value variable_name declare_variable=<true> isolate='info.GetIsolate()' bailout_return_value=<none> use_exception_state=<false> code_generation_target=<none> for_constructor_callback=<false><block_start>"""Returns an expression that converts a V8 value to a C++ value and stores it as a local value."""<line_sep>this_cpp_type=idl_type.cpp_type_args(extended_attributes=extended_attributes raw_type=<true>)<line_sep>idl_type=idl_type.preprocessed_type<line_sep>cpp_value=v8_value_to_cpp_value(idl_type extended_attributes v8_value variable_name isolate for_constructor_callback=for_constructor_callback)<line_sep># Optional expression that returns a value to be assigned to the local variable. assign_expression=<none><line_sep># Optional void expression executed unconditionally. set_expression=<none><line_sep># Optional expression that returns true if the conversion fails. check_expression=<none><line_sep># Optional expression used as the return value when returning. Only # meaningful if 'check_expression' is not None. return_expression=bailout_return_value<if_stmt>'FlexibleArrayBufferView'<in>extended_attributes<block_start><if_stmt>idl_type.base_type<not><in>ARRAY_BUFFER_VIEW_AND_TYPED_ARRAY_TYPES<block_start><raise>ValueError('Unrecognized base type for extended attribute "FlexibleArrayBufferView": %s'%(idl_type.base_type))<block_end>set_expression=cpp_value<block_end><elif_stmt>idl_type.is_string_type<or>idl_type.v8_conversion_needs_exception_state# Types for which conversion can fail and that need error handling. <block_start>check_expression='exception_state.HadException()'<if_stmt>idl_type.is_union_type<block_start>set_expression=cpp_value<block_end><else_stmt><block_start>assign_expression=cpp_value<line_sep># Note: 'not idl_type.v8_conversion_needs_exception_state' implies # 'idl_type.is_string_type', but there are types for which both are # true (ByteString and USVString), so using idl_type.is_string_type # as the condition here would be wrong. <if_stmt><not>idl_type.v8_conversion_needs_exception_state<block_start><if_stmt>use_exception_state<block_start>check_expression='!%s.Prepare(exception_state)'%variable_name<block_end><else_stmt><block_start>check_expression='!%s.Prepare()'%variable_name<block_end><block_end><block_end><block_end><elif_stmt><not>idl_type.v8_conversion_is_trivial<and><not>idl_type.is_callback_function<block_start><return>{'error_message':'no V8 -> C++ conversion for IDL type: %s'%idl_type.name}<block_end><else_stmt><block_start>assign_expression=cpp_value<block_end># Types that don't need error handling, and simply assign a value to the # local variable. <if_stmt>(idl_type.is_explicit_nullable<and>code_generation_target<eq>'attribute_set')<block_start>this_cpp_type=cpp_template_type('base::Optional' this_cpp_type)<line_sep>expr='{cpp_type}({expr})'.format(cpp_type=this_cpp_type expr=assign_expression)<line_sep>assign_expression=("is_null "<concat>"? base::nullopt "<concat>": {expr}".format(expr=expr))<block_end><return>{'assign_expression':assign_expression 'check_expression':check_expression 'cpp_type':this_cpp_type 'cpp_name':variable_name 'declare_variable':declare_variable 'return_expression':return_expression 'set_expression':set_expression }<block_end>IdlTypeBase.v8_value_to_local_cpp_value=v8_value_to_local_cpp_value<def_stmt>use_output_parameter_for_result idl_type<block_start>"""True when methods/getters which return the given idl_type should take the output argument. """<line_sep><return>idl_type.is_union_type<block_end>IdlTypeBase.use_output_parameter_for_result=property(use_output_parameter_for_result)<line_sep>################################################################################ # C++ -> V8 ################################################################################ <def_stmt>preprocess_idl_type idl_type<block_start><if_stmt>idl_type.is_nullable<block_start><return>IdlNullableType(idl_type.inner_type.preprocessed_type)<block_end><if_stmt>idl_type.is_enum# Enumerations are internally DOMStrings <block_start><return>IdlType('DOMString')<block_end><if_stmt>idl_type.base_type<eq>'any'<or>idl_type.is_custom_callback_function<block_start><return>IdlType('ScriptValue')<block_end><if_stmt>idl_type.is_callback_function<block_start><return>idl_type<block_end><return>idl_type<block_end>IdlTypeBase.preprocessed_type=property(preprocess_idl_type)<def_stmt>preprocess_idl_type_and_value idl_type cpp_value extended_attributes<block_start>"""Returns IDL type and value, with preliminary type conversions applied."""<line_sep>idl_type=idl_type.preprocessed_type<if_stmt>idl_type.name<eq>'Promise'<block_start>idl_type=IdlType('ScriptValue')<block_end><if_stmt>idl_type.base_type<in>['long long' 'unsigned long long']# long long and unsigned long long are not representable in ECMAScript; # we represent them as doubles. <block_start>is_nullable=idl_type.is_nullable<line_sep>idl_type=IdlType('double')<if_stmt>is_nullable<block_start>idl_type=IdlNullableType(idl_type)<block_end>cpp_value='static_cast<double>(%s)'%cpp_value<block_end># HTML5 says that unsigned reflected attributes should be in the range # [0, 2^31). When a value isn't in this range, a default value (or 0) # should be returned instead. extended_attributes=extended_attributes<or>{}<if_stmt>('Reflect'<in>extended_attributes<and>idl_type.base_type<in>['unsigned long' 'unsigned short'])<block_start>cpp_value=cpp_value.replace('GetUnsignedIntegralAttribute' 'GetIntegralAttribute')<line_sep>cpp_value='std::max(0, static_cast<int>(%s))'%cpp_value<block_end><return>idl_type cpp_value<block_end><def_stmt>v8_conversion_type idl_type extended_attributes<block_start>"""Returns V8 conversion type, adding any additional includes. The V8 conversion type is used to select the C++ -> V8 conversion function or V8SetReturnValue* function; it can be an idl_type, a cpp_type, or a separate name for the type of conversion (e.g., 'DOMWrapper'). """<line_sep>extended_attributes=extended_attributes<or>{}<line_sep># Nullable dictionaries need to be handled differently than either # non-nullable dictionaries or unions. <if_stmt>idl_type.is_dictionary<and>idl_type.is_nullable<block_start><return>'NullableDictionary'<block_end><if_stmt>idl_type.is_dictionary<or>idl_type.is_union_type<block_start><return>'DictionaryOrUnion'<block_end># Array or sequence types native_array_element_type=idl_type.native_array_element_type<if_stmt>native_array_element_type<block_start><return>'FrozenArray'<if>idl_type.is_frozen_array<else>'sequence'<block_end># Record types. <if_stmt>idl_type.is_record_type<block_start><return>'Record'<block_end># Simple types base_idl_type=idl_type.base_type<line_sep># Basic types, without additional includes <if_stmt>base_idl_type<in>CPP_INTEGER_CONVERSION_RULES<block_start><return>CPP_INTEGER_CONVERSION_RULES[base_idl_type]<block_end><if_stmt>idl_type.is_string_type<block_start><if_stmt>idl_type.is_nullable<block_start><return>'StringOrNull'<block_end><return>base_idl_type<block_end><if_stmt>idl_type.is_basic_type<block_start><return>base_idl_type<block_end><if_stmt>base_idl_type<in>['object' 'ScriptValue']<block_start><return>'ScriptValue'<block_end># Data type with potential additional includes <if_stmt>base_idl_type<in>V8_SET_RETURN_VALUE# Special V8SetReturnValue treatment <block_start><return>base_idl_type<block_end># Pointer type <return>'DOMWrapper'<block_end>IdlTypeBase.v8_conversion_type=v8_conversion_type<line_sep>V8_SET_RETURN_VALUE={'boolean':'V8SetReturnValueBool(info, {cpp_value})' 'DOMString':'V8SetReturnValueString(info, {cpp_value}, info.GetIsolate())' 'ByteString':'V8SetReturnValueString(info, {cpp_value}, info.GetIsolate())' 'USVString':'V8SetReturnValueString(info, {cpp_value}, info.GetIsolate())' 'StringOrNull':'V8SetReturnValueStringOrNull(info, {cpp_value}, info.GetIsolate())' 'void':'' # All the int types below are converted to (u)int32_t in the V8SetReturnValue{Int,Unsigned}() calls. # The 64-bit int types have already been converted to double when V8_SET_RETURN_VALUE is used, so they are not # listed here. 'int8_t':'V8SetReturnValueInt(info, {cpp_value})' 'int16_t':'V8SetReturnValueInt(info, {cpp_value})' 'int32_t':'V8SetReturnValueInt(info, {cpp_value})' 'uint8_t':'V8SetReturnValueUnsigned(info, {cpp_value})' 'uint16_t':'V8SetReturnValueUnsigned(info, {cpp_value})' 'uint32_t':'V8SetReturnValueUnsigned(info, {cpp_value})' # No special V8SetReturnValue* function (set value directly) 'float':'V8SetReturnValue(info, {cpp_value})' 'unrestricted float':'V8SetReturnValue(info, {cpp_value})' 'double':'V8SetReturnValue(info, {cpp_value})' 'unrestricted double':'V8SetReturnValue(info, {cpp_value})' # No special V8SetReturnValue* function, but instead convert value to V8 # and then use general V8SetReturnValue. 'sequence':'V8SetReturnValue(info, {cpp_value})' 'FrozenArray':'V8SetReturnValue(info, {cpp_value})' 'EventHandler':'V8SetReturnValue(info, {cpp_value})' 'NodeFilter':'V8SetReturnValue(info, {cpp_value})' 'OnBeforeUnloadEventHandler':'V8SetReturnValue(info, {cpp_value})' 'OnErrorEventHandler':'V8SetReturnValue(info, {cpp_value})' 'ScriptValue':'V8SetReturnValue(info, {cpp_value})' # Records. 'Record':'V8SetReturnValue(info, ToV8({cpp_value}, info.Holder(), info.GetIsolate()))' # DOMWrapper 'DOMWrapperForMainWorld':'V8SetReturnValueForMainWorld(info, {cpp_value})' 'DOMWrapperFast':'V8SetReturnValueFast(info, {cpp_value}, {script_wrappable})' 'DOMWrapperDefault':'V8SetReturnValue(info, {cpp_value})' # If [CheckSecurity=ReturnValue] is specified, the returned object must be # wrapped in its own realm, which can be different from the realm of the # receiver object. # # [CheckSecurity=ReturnValue] is used only for contentDocument and # getSVGDocument attributes of HTML{IFrame,Frame,Object,Embed}Element, # and Window.frameElement. Except for Window.frameElement, all interfaces # support contentWindow(), so we create a new wrapper in the realm of # contentWindow(). Note that DOMWindow* has its own realm and there is no # need to pass |creationContext| in for ToV8(DOMWindow*). # Window.frameElement is implemented with [Custom]. 'DOMWrapperAcrossContext':('V8SetReturnValue(info, ToV8({cpp_value}, '+'ToV8(impl->contentWindow(), v8::Local<v8::Object>(), '+'info.GetIsolate()).As<v8::Object>(), info.GetIsolate()))') # Note that static attributes and operations do not check whether |this| is # an instance of the interface nor |this|'s creation context is the same as # the current context. So we must always use the current context as the # creation context of the DOM wrapper for the return value. 'DOMWrapperStatic':'V8SetReturnValue(info, {cpp_value}, info.GetIsolate()->GetCurrentContext()->Global())' # Nullable dictionaries 'NullableDictionary':'V8SetReturnValue(info, result)' 'NullableDictionaryStatic':'V8SetReturnValue(info, result, info.GetIsolate()->GetCurrentContext()->Global())' # Union types or dictionaries 'DictionaryOrUnion':'V8SetReturnValue(info, result)' 'DictionaryOrUnionStatic':'V8SetReturnValue(info, result, info.GetIsolate()->GetCurrentContext()->Global())' }<def_stmt>v8_set_return_value idl_type cpp_value extended_attributes=<none> script_wrappable='' for_main_world=<false> is_static=<false><block_start>"""Returns a statement that converts a C++ value to a V8 value and sets it as a return value. """<def_stmt>dom_wrapper_conversion_type <block_start><if_stmt>('CheckSecurity'<in>extended_attributes<and>extended_attribute_value_contains(extended_attributes['CheckSecurity'] 'ReturnValue'))<block_start><return>'DOMWrapperAcrossContext'<block_end><if_stmt>is_static<block_start><return>'DOMWrapperStatic'<block_end><if_stmt><not>script_wrappable<block_start><return>'DOMWrapperDefault'<block_end><if_stmt>for_main_world<block_start><return>'DOMWrapperForMainWorld'<block_end><return>'DOMWrapperFast'<block_end>idl_type,cpp_value=preprocess_idl_type_and_value(idl_type cpp_value extended_attributes)<line_sep>this_v8_conversion_type=idl_type.v8_conversion_type(extended_attributes)<line_sep># SetReturn-specific overrides <if_stmt>this_v8_conversion_type<in>('EventHandler' 'NodeFilter' 'OnBeforeUnloadEventHandler' 'OnErrorEventHandler' 'ScriptValue' 'sequence' 'FrozenArray')# Convert value to V8 and then use general V8SetReturnValue <block_start>cpp_value=idl_type.cpp_value_to_v8_value(cpp_value extended_attributes=extended_attributes)<block_end><if_stmt>this_v8_conversion_type<eq>'DOMWrapper'<block_start>this_v8_conversion_type=dom_wrapper_conversion_type()<block_end><if_stmt>is_static<and>this_v8_conversion_type<in>('NullableDictionary' 'DictionaryOrUnion')<block_start>this_v8_conversion_type<augadd>'Static'<block_end>format_string=V8_SET_RETURN_VALUE[this_v8_conversion_type]<line_sep>statement=format_string.format(cpp_value=cpp_value script_wrappable=script_wrappable)<line_sep><return>statement<block_end>IdlTypeBase.v8_set_return_value=v8_set_return_value<line_sep>CPP_VALUE_TO_V8_VALUE={# Built-in types 'DOMString':'V8String({isolate}, {cpp_value})' 'ByteString':'V8String({isolate}, {cpp_value})' 'USVString':'V8String({isolate}, {cpp_value})' 'boolean':'v8::Boolean::New({isolate}, {cpp_value})' # All the int types below are converted to (u)int32_t in the v8::Integer::New*() calls. # The 64-bit int types have already been converted to double when CPP_VALUE_TO_V8_VALUE is used, so they are not # listed here. 'int8_t':'v8::Integer::New({isolate}, {cpp_value})' 'int16_t':'v8::Integer::New({isolate}, {cpp_value})' 'int32_t':'v8::Integer::New({isolate}, {cpp_value})' 'uint8_t':'v8::Integer::NewFromUnsigned({isolate}, {cpp_value})' 'uint16_t':'v8::Integer::NewFromUnsigned({isolate}, {cpp_value})' 'uint32_t':'v8::Integer::NewFromUnsigned({isolate}, {cpp_value})' 'float':'v8::Number::New({isolate}, {cpp_value})' 'unrestricted float':'v8::Number::New({isolate}, {cpp_value})' 'double':'v8::Number::New({isolate}, {cpp_value})' 'unrestricted double':'v8::Number::New({isolate}, {cpp_value})' 'StringOrNull':('({cpp_value}.IsNull() ? '+'v8::Null({isolate}).As<v8::Value>() : '+'V8String({isolate}, {cpp_value}).As<v8::Value>())') # Special cases 'EventHandler':'JSEventHandler::AsV8Value({isolate}, impl, {cpp_value})' 'NodeFilter':'ToV8({cpp_value}, {creation_context}, {isolate})' 'OnBeforeUnloadEventHandler':'JSEventHandler::AsV8Value({isolate}, impl, {cpp_value})' 'OnErrorEventHandler':'JSEventHandler::AsV8Value({isolate}, impl, {cpp_value})' 'Record':'ToV8({cpp_value}, {creation_context}, {isolate})' 'ScriptValue':'{cpp_value}.V8Value()' # General 'sequence':'ToV8({cpp_value}, {creation_context}, {isolate})' 'FrozenArray':'FreezeV8Object(ToV8({cpp_value}, {creation_context}, {isolate}), {isolate})' 'DOMWrapper':'ToV8({cpp_value}, {creation_context}, {isolate})' # Passing nullable dictionaries isn't a pattern currently used # anywhere in the web platform, and more work would be needed in # the code generator to distinguish between passing null, and # passing an object which happened to not contain any of the # dictionary's defined attributes. For now, don't define # NullableDictionary here, which will cause an exception to be # thrown during code generation if an argument to a method is a # nullable dictionary type. # # Union types or dictionaries 'DictionaryOrUnion':'ToV8({cpp_value}, {creation_context}, {isolate})' }<def_stmt>cpp_value_to_v8_value idl_type cpp_value isolate='info.GetIsolate()' creation_context='info.Holder()' extended_attributes=<none><block_start>"""Returns an expression that converts a C++ value to a V8 value."""<line_sep># the isolate parameter is needed for callback interfaces idl_type,cpp_value=preprocess_idl_type_and_value(idl_type cpp_value extended_attributes)<line_sep>this_v8_conversion_type=idl_type.v8_conversion_type(extended_attributes)<line_sep>format_string=CPP_VALUE_TO_V8_VALUE[this_v8_conversion_type]<line_sep>statement=format_string.format(cpp_value=cpp_value isolate=isolate creation_context=creation_context)<line_sep><return>statement<block_end>IdlTypeBase.cpp_value_to_v8_value=cpp_value_to_v8_value<def_stmt>literal_cpp_value idl_type idl_literal<block_start>"""Converts an expression that is a valid C++ literal for this type."""<line_sep># FIXME: add validation that idl_type and idl_literal are compatible <if_stmt>idl_type.base_type<in>('any' 'object')<and>idl_literal.is_null<block_start><return>'ScriptValue::CreateNull(script_state->GetIsolate())'<block_end>literal_value=str(idl_literal)<if_stmt>idl_type.base_type<in>('octet' 'unsigned short' 'unsigned long')<block_start><return>literal_value+'u'<block_end><if_stmt>idl_type.is_dictionary<and>literal_value<eq>'{}'<block_start><return>'MakeGarbageCollected<{}>()'.format(idl_type.base_type)<block_end><return>literal_value<block_end><def_stmt>union_literal_cpp_value idl_type idl_literal<block_start><if_stmt>idl_literal.is_null<block_start><return>idl_type.name+'()'<block_end><elif_stmt>idl_literal.idl_type<eq>'DOMString'<block_start>member_type=idl_type.string_member_type<block_end><elif_stmt>idl_literal.idl_type<in>('integer' 'float')<block_start>member_type=idl_type.numeric_member_type<block_end><elif_stmt>idl_literal.idl_type<eq>'boolean'<block_start>member_type=idl_type.boolean_member_type<block_end><elif_stmt>idl_literal.idl_type<eq>'sequence'<block_start>member_type=idl_type.sequence_member_type<block_end><elif_stmt>idl_literal.idl_type<eq>'dictionary'<block_start>member_type=idl_type.dictionary_member_type<block_end><else_stmt><block_start><raise>ValueError('Unsupported literal type: '+idl_literal.idl_type)<block_end><return>'%s::From%s(%s)'%(idl_type.cpp_type_args() member_type.name member_type.literal_cpp_value(idl_literal))<block_end><def_stmt>array_or_sequence_literal_cpp_value idl_type idl_literal# Only support empty sequences. <block_start><if_stmt>idl_literal.value<eq>'[]'<block_start><return>cpp_type(idl_type)+'()'<block_end><raise>ValueError('Unsupported literal type: '+idl_literal.idl_type)<block_end>IdlType.literal_cpp_value=literal_cpp_value<line_sep>IdlUnionType.literal_cpp_value=union_literal_cpp_value<line_sep>IdlArrayOrSequenceType.literal_cpp_value=array_or_sequence_literal_cpp_value<line_sep>_IDL_TYPE_TO_NATIVE_VALUE_TRAITS_TAG_MAP={'DOMString':'IDLString' 'USVString':'IDLUSVString' 'DOMStringOrNull':'IDLStringOrNull' 'USVStringOrNull':'IDLUSVStringOrNull' 'any':'ScriptValue' 'boolean':'IDLBoolean' 'long':'IDLLong' 'sequence<DOMString>':'IDLSequence<IDLString>' 'unsigned short':'IDLUnsignedShort' 'void':<none> }<def_stmt>idl_type_to_native_value_traits_tag idl_type<block_start>idl_type_str=str(idl_type)<if_stmt>idl_type.is_nullable<block_start>idl_type_str<augadd>"OrNull"<block_end><if_stmt>idl_type_str<in>_IDL_TYPE_TO_NATIVE_VALUE_TRAITS_TAG_MAP<block_start><return>_IDL_TYPE_TO_NATIVE_VALUE_TRAITS_TAG_MAP[idl_type_str]<block_end><else_stmt><block_start><raise>Exception("Type `%s' is not supported."%idl_type_str)<block_end><block_end>################################################################################ # Utility properties for nullable types ################################################################################ <def_stmt>cpp_type_has_null_value idl_type# - String types (String/AtomicString) represent null as a null string, # i.e. one for which String::IsNull() returns true. # - Enum types, as they are implemented as Strings. # - Interface types and Dictionary types represent null as a null pointer. # - Union types, as thier container classes can represent null value. # - 'Object' and 'any' type. We use ScriptValue for object type. <block_start><return>(idl_type.is_string_type<or>idl_type.is_enum<or>idl_type.is_interface_type<or>idl_type.is_callback_interface<or>idl_type.is_callback_function<or>idl_type.is_custom_callback_function<or>idl_type.is_dictionary<or>idl_type.is_union_type<or>idl_type.base_type<eq>'object'<or>idl_type.base_type<eq>'any')<block_end>IdlTypeBase.cpp_type_has_null_value=property(cpp_type_has_null_value)<def_stmt>is_implicit_nullable idl_type# Nullable type where the corresponding C++ type supports a null value. <block_start><return>idl_type.is_nullable<and>idl_type.cpp_type_has_null_value<block_end><def_stmt>is_explicit_nullable idl_type# Nullable type that isn't implicit nullable (see above.) For such types, # we use base::Optional<T> or similar explicit ways to represent a null value. <block_start><return>idl_type.is_nullable<and><not>idl_type.is_implicit_nullable<block_end>IdlTypeBase.is_implicit_nullable=property(is_implicit_nullable)<line_sep>IdlUnionType.is_implicit_nullable=<false><line_sep>IdlTypeBase.is_explicit_nullable=property(is_explicit_nullable)<def_stmt>includes_nullable_type_union idl_type# http://heycam.github.io/webidl/#dfn-includes-a-nullable-type <block_start><return>idl_type.number_of_nullable_member_types<eq>1<block_end>IdlTypeBase.includes_nullable_type=<false><line_sep>IdlNullableType.includes_nullable_type=<true><line_sep>IdlUnionType.includes_nullable_type=property(includes_nullable_type_union)<line_sep>
# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>os<import_stmt>os.path<import_stmt>sys<line_sep>PYJSON5_DIR=os.path.join(os.path.dirname(__file__) '..' '..' '..' '..' 'pyjson5' 'src')<line_sep>sys.path.insert(0 PYJSON5_DIR)<import_stmt>json5# pylint: disable=import-error <class_stmt>ARIAReader(object)<block_start><def_stmt>__init__ self json5_file_path<block_start>self._input_files=[json5_file_path]<with_stmt>open(os.path.abspath(json5_file_path))<as>json5_file<block_start>self._data=json5.loads(json5_file.read())<block_end><block_end><def_stmt>attributes_list self<block_start><return>{'data':[item[u'name']<for>item self._data['attributes']]}<block_end><block_end>
<import_stmt>itertools<import_stmt>random<import_stmt>json<import_stmt>os<import_from_stmt>interfaces.SentenceOperation SentenceOperation<import_from_stmt>tasks.TaskTypes TaskType<line_sep>""" Base Class for implementing the different input transformations a generation should be robust against. """<def_stmt>generate_sentence sentence spell_errors prob_of_typo seed<block_start>output=[]<for_stmt>word sentence.split()<block_start>random.seed(seed)<if_stmt>word.lower()<in>list(spell_errors.keys())<and>random.choice(range(0 100))<le>prob_of_typo<block_start>output.append(random.choice(spell_errors[word.lower()]))<block_end><else_stmt><block_start>output.append(word)<block_end><block_end>output=" ".join(output)<line_sep><return>output<block_end><def_stmt>generate_sentences text prob=0.1 seed=0 max_outputs=1<block_start>spell_errors=os.path.join('transformations' 'replace_spelling' 'spell_errors.json')<with_stmt>open(spell_errors 'r')<as>fp<block_start>spell_errors=json.load(fp)<block_end>prob_of_typo=int(prob<times>100)<line_sep>perturbed_texts=[]<for_stmt>idx range(max_outputs)<block_start>new_text=generate_sentence(text spell_errors prob_of_typo seed+idx)<line_sep>perturbed_texts.append(new_text)<block_end><return>perturbed_texts<block_end><class_stmt>SpellingTransformation(SentenceOperation)<block_start>tasks=[TaskType.TEXT_CLASSIFICATION TaskType.TEXT_TO_TEXT_GENERATION TaskType.TEXT_TAGGING ]<line_sep>languages=["en"]<def_stmt>__init__ self seed=0 max_outputs=3<block_start>super().__init__(seed max_outputs=max_outputs)<block_end><def_stmt>generate self sentence:str<block_start>perturbed_texts=generate_sentences(text=sentence prob=0.20 seed=self.seed max_outputs=self.max_outputs )<line_sep><return>perturbed_texts<block_end><block_end>
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for ParameterShift specific C++ ops."""<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>sympy<import_stmt>cirq<import_from_stmt>tensorflow_quantum.core.ops tfq_ps_util_ops<import_from_stmt>tensorflow_quantum.python util<def_stmt>_complex_test_circuit <block_start>t=sympy.Symbol('t')<line_sep>r=sympy.Symbol('r')<line_sep>qubits=cirq.GridQubit.rect(1 6)<line_sep>circuit_batch=[cirq.Circuit(cirq.Moment([cirq.H(q)<for>q qubits]) cirq.Moment([cirq.X(qubits[4]) cirq.PhasedXPowGate(phase_exponent=np.random.random()<times>t).on(qubits[5]) cirq.ISwapPowGate(exponent=np.random.random()<times>t).on(qubits[0] qubits[1]) cirq.FSimGate(theta=np.random.random()<times>t phi=np.random.random()<times>r).on(qubits[2] qubits[3])]) cirq.Moment([cirq.H(q)<for>q qubits])) cirq.Circuit(cirq.FSimGate(theta=np.random.random()<times>t phi=np.random.random()<times>r).on(*qubits[:2]) cirq.FSimGate(theta=np.random.random()<times>r phi=np.random.random()<times>t).on(qubits[1] qubits[0])) cirq.Circuit(cirq.Moment([cirq.ISwapPowGate(exponent=np.random.random()<times>t).on(*qubits[:2]) cirq.PhasedXPowGate(phase_exponent=np.random.random()<times>r).on(qubits[2]) cirq.ISwapPowGate(exponent=np.random.random()<times>r).on(*qubits[3:5])]))]<line_sep><return>circuit_batch<block_end><class_stmt>PSDecomposeTest(tf.test.TestCase)<block_start>"""Tests on tfq_ps_decompose"""<def_stmt>test_iswap_gate_test self<block_start>"""Test 1 ISwapPowGate decomposition."""<line_sep>t=sympy.Symbol('t')<line_sep>qubits=cirq.GridQubit.rect(1 2)<line_sep>circuit=cirq.Circuit(cirq.ISwapPowGate(exponent=np.random.random()<times>t).on(*qubits))<line_sep>inputs=util.convert_to_tensor([circuit])<line_sep>outputs=tfq_ps_util_ops.tfq_ps_decompose(inputs)<line_sep>decomposed_programs=util.from_tensor(outputs)<line_sep>rand_resolver={'t':np.random.random()}<line_sep>self.assertAllClose(cirq.unitary(cirq.resolve_parameters(circuit rand_resolver)) cirq.unitary(cirq.resolve_parameters(decomposed_programs[0] rand_resolver)) atol=1e-5)<block_end><def_stmt>test_phased_x_pow_gate_test self<block_start>"""Test 1 PhasedXPowGate decomposition."""<line_sep>t=sympy.Symbol('t')<line_sep>r=sympy.Symbol('r')<line_sep>q=cirq.GridQubit(0 0)<line_sep>circuit=cirq.Circuit(cirq.PhasedXPowGate(phase_exponent=np.random.random()<times>r exponent=np.random.random()<times>t).on(q))<line_sep>inputs=util.convert_to_tensor([circuit])<line_sep>outputs=tfq_ps_util_ops.tfq_ps_decompose(inputs)<line_sep>decomposed_programs=util.from_tensor(outputs)<line_sep>rand_resolver={'t':np.random.random() 'r':np.random.random()}<line_sep>self.assertAllClose(cirq.unitary(cirq.resolve_parameters(circuit rand_resolver)) cirq.unitary(cirq.resolve_parameters(decomposed_programs[0] rand_resolver)) atol=1e-5)<block_end><def_stmt>test_fsim_gate_test self<block_start>"""Test 1 FSimPowGate decomposition."""<line_sep>t=sympy.Symbol('t')<line_sep>r=sympy.Symbol('r')<line_sep>qubits=cirq.GridQubit.rect(1 2)<line_sep>circuit=cirq.Circuit(cirq.FSimGate(theta=np.random.random()<times>r phi=np.random.random()<times>t).on(*qubits))<line_sep>inputs=util.convert_to_tensor([circuit])<line_sep>outputs=tfq_ps_util_ops.tfq_ps_decompose(inputs)<line_sep>decomposed_programs=util.from_tensor(outputs)<line_sep>rand_resolver={'t':np.random.random() 'r':np.random.random()}<line_sep>self.assertAllClose(cirq.unitary(cirq.resolve_parameters(circuit rand_resolver)) cirq.unitary(cirq.resolve_parameters(decomposed_programs[0] rand_resolver)) atol=1e-5)<block_end><def_stmt>test_decompose_with_complex_circuit self<block_start>"""Test decompose with complex circuit."""<line_sep>names=['CLAE' 'HRYV' 'IRKB' 'LKRV' 'PJOU' 'CJKX' 'NASW']<line_sep># Test circuit has a Moment with 1) FSimGate & PhasedXPowGate, # 2) PhasedXPowGate & ISwapPowGate and 3) FSimGate & ISwapPowGate. # Be careful, they are not decomposed if not parameterized. circuit_batch=[cirq.Circuit([cirq.Moment([cirq.FSimGate(theta=0.10338130973488413<times>sympy.Symbol('CLAE') phi=0.10338130973488413<times>sympy.Symbol('IRKB')).on(cirq.GridQubit(0 2) cirq.GridQubit(0 3)) cirq.PhasedXPowGate(phase_exponent=1.0 exponent=0.86426029696045281<times>sympy.Symbol('HRYV')).on(cirq.GridQubit(0 1)) ]) cirq.Moment([cirq.Y.on(cirq.GridQubit(0 3)) cirq.Z.on(cirq.GridQubit(0 0)) cirq.FSimGate(theta=1 phi=1).on(cirq.GridQubit(0 1) cirq.GridQubit(0 2)) ]) cirq.Moment([(cirq.CNOT<power>(0.92874230274398684<times>sympy.Symbol('IRKB'))).on(cirq.GridQubit(0 1) cirq.GridQubit(0 2)) ]) cirq.Moment([cirq.PhasedXPowGate(phase_exponent=sympy.Symbol('PJOU') exponent=0.2081415255258906<times>sympy.Symbol('LKRV')).on(cirq.GridQubit(0 2)) (cirq.ISWAP<power>(0.32860954996781722<times>sympy.Symbol('PJOU'))).on(cirq.GridQubit(0 1) cirq.GridQubit(0 3)) ]) cirq.Moment([cirq.PhasedXPowGate(phase_exponent=sympy.Symbol('CJKX')).on(cirq.GridQubit(0 1)) cirq.ZZ.on(cirq.GridQubit(0 0) cirq.GridQubit(0 3)) (cirq.X<power>(0.6826594585474709<times>sympy.Symbol('HRYV'))).on(cirq.GridQubit(0 2)) ]) cirq.Moment([(cirq.ZZ<power>(0.18781276022427218<times>sympy.Symbol('PJOU'))).on(cirq.GridQubit(0 0) cirq.GridQubit(0 3)) ]) cirq.Moment([cirq.Y.on(cirq.GridQubit(0 0)) ]) cirq.Moment([cirq.FSimGate(theta=0.13793763138552417<times>sympy.Symbol('CJKX') phi=0.13793763138552417<times>sympy.Symbol('PJOU')).on(cirq.GridQubit(0 2) cirq.GridQubit(0 3)) (cirq.ISWAP<power>(0.028165738453673095<times>sympy.Symbol('NASW'))).on(cirq.GridQubit(0 0) cirq.GridQubit(0 1)) ]) cirq.Moment([cirq.FSimGate(theta=0.74356520426349459<times>sympy.Symbol('CJKX') phi=0.74356520426349459<times>sympy.Symbol('NASW')).on(cirq.GridQubit(0 3) cirq.GridQubit(0 0)) ]) cirq.Moment([cirq.CNOT.on(cirq.GridQubit(0 0) cirq.GridQubit(0 2)) cirq.SWAP.on(cirq.GridQubit(0 3) cirq.GridQubit(0 1)) ]) cirq.Moment([cirq.H.on(cirq.GridQubit(0 3)) cirq.H.on(cirq.GridQubit(0 2)) cirq.CNOT.on(cirq.GridQubit(0 1) cirq.GridQubit(0 0)) ]) cirq.Moment([cirq.CNOT.on(cirq.GridQubit(0 0) cirq.GridQubit(0 1)) cirq.YY.on(cirq.GridQubit(0 2) cirq.GridQubit(0 3)) ]) cirq.Moment([cirq.CZ.on(cirq.GridQubit(0 1) cirq.GridQubit(0 0)) cirq.CNOT.on(cirq.GridQubit(0 2) cirq.GridQubit(0 3)) ]) cirq.Moment([cirq.FSimGate(theta=1 phi=1).on(cirq.GridQubit(0 0) cirq.GridQubit(0 2)) cirq.CNOT.on(cirq.GridQubit(0 3) cirq.GridQubit(0 1)) ]) cirq.Moment([cirq.FSimGate(theta=1 phi=1).on(cirq.GridQubit(0 0) cirq.GridQubit(0 3)) cirq.SWAP.on(cirq.GridQubit(0 2) cirq.GridQubit(0 1)) ]) cirq.Moment([cirq.Y.on(cirq.GridQubit(0 0)) cirq.PhasedXPowGate(phase_exponent=1.0).on(cirq.GridQubit(0 2)) cirq.FSimGate(theta=1 phi=1).on(cirq.GridQubit(0 1) cirq.GridQubit(0 3)) ]) ])]<line_sep># Decompose programs. inputs=util.convert_to_tensor(circuit_batch)<line_sep>outputs=tfq_ps_util_ops.tfq_ps_decompose(inputs)<line_sep>decomposed_programs=util.from_tensor(outputs)<line_sep>self.assertEqual(len(decomposed_programs) len(circuit_batch))<line_sep># Original programs has parameterized ISP, PXP, FSIM, but this result # has no such gates at all. All parameterized gates have at most two # eigenvalues. There are still ISwap and PhasedX(1.0) because they are # not parameterized, which doesn't affect ParameterShift differentiation # at all. <for_stmt>program decomposed_programs<block_start><for_stmt>moment program<block_start><for_stmt>gate_op moment# Consider parameterized gates only <block_start><if_stmt>cirq.is_parameterized(gate_op.gate)# Check I. The gate should have _eigen_components. <block_start>self.assertTrue(hasattr(gate_op.gate '_eigen_components'))<line_sep># Check II. The gate should have two eigen values. self.assertEqual(len(gate_op.gate._eigen_components()) 2 gate_op.gate)<block_end><block_end><block_end><block_end># Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has # 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components. # Check if two programs are identical. rand_resolver={name:np.random.random()<for>name names}<line_sep>self.assertAllClose(cirq.unitary(cirq.resolve_parameters(circuit_batch[0] rand_resolver)) cirq.unitary(cirq.resolve_parameters(decomposed_programs[0] rand_resolver)) atol=1e-5)<block_end><def_stmt>test_moment_preservation self<block_start>"""Test Moment-structure preservation."""<line_sep>t=sympy.Symbol('t')<line_sep>r=sympy.Symbol('r')<line_sep>qubits=cirq.LineQubit.range(6)<line_sep>circuit_batch=[cirq.Circuit(cirq.Moment([cirq.H(q)<for>q qubits]) cirq.Moment([cirq.X(qubits[4]) cirq.PhasedXPowGate(phase_exponent=np.random.random()<times>t).on(qubits[5]) cirq.ISwapPowGate(exponent=np.random.random()<times>t).on(qubits[0] qubits[1]) cirq.FSimGate(theta=np.random.random()<times>t phi=np.random.random()<times>r).on(qubits[2] qubits[3])]) cirq.Moment([cirq.H(q)<for>q qubits]))]<line_sep>inputs=util.convert_to_tensor(circuit_batch)<line_sep>outputs=tfq_ps_util_ops.tfq_ps_decompose(inputs)<line_sep>decomposed_programs=util.from_tensor(outputs)<line_sep># Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has # 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components. # Check if two programs are identical. rand_resolver={'t':np.random.random() 'r':np.random.random()}<line_sep>self.assertAllClose(cirq.unitary(cirq.resolve_parameters(circuit_batch[0] rand_resolver)) cirq.unitary(cirq.resolve_parameters(decomposed_programs[0] rand_resolver)) atol=1e-5)<line_sep># Check if the Moments are conserved. max_decomposed_length=3<line_sep>n_non_decomposed_moments=2<line_sep>self.assertEqual(len(decomposed_programs[0]) n_non_decomposed_moments+max_decomposed_length)<line_sep># Total length of Moments = 5 # The non-decomposed moments should be the same. self.assertEqual(decomposed_programs[0][0] circuit_batch[0][0])<line_sep>self.assertEqual(decomposed_programs[0][-1] circuit_batch[0][-1])<line_sep># Check paralellized decompose gates in Moment[1]~[3]. # The target ops are replaced by the first decomposition gates. It means # the first Moment has exactly the same number of gate ops. self.assertEqual(len(decomposed_programs[0][1]) len(circuit_batch[0][1]))<line_sep># From the second Moments, the Moments only have decomposition gates. # In this example, two ISwapPowGate & one PhasedXPowGate are located. # Since PhasedXPowGate, ISwapPowGate, FSimGate has 3, 2, 3 result gates # Moment[2] have 3 gate ops and Moment[3] have 2 gate ops. self.assertEqual(len(decomposed_programs[0][2]) 3)<line_sep>self.assertEqual(len(decomposed_programs[0][3]) 2)<block_end><def_stmt>test_more_complex_moment_preservation self<block_start>"""Test Moment-structure preservation."""<line_sep>circuit_batch=_complex_test_circuit()<line_sep>inputs=util.convert_to_tensor(circuit_batch)<line_sep>outputs=tfq_ps_util_ops.tfq_ps_decompose(inputs)<line_sep>decomposed_programs=util.from_tensor(outputs)<line_sep># Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has # 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components. # Check if two programs are identical. rand_resolver={'t':np.random.random() 'r':np.random.random()}<for_stmt>i range(3)<block_start>self.assertAllClose(cirq.unitary(cirq.resolve_parameters(circuit_batch[i] rand_resolver)) cirq.unitary(cirq.resolve_parameters(decomposed_programs[i] rand_resolver)) atol=1e-5)<block_end># Check if the Moments are conserved. # Circuit 1. max_decomposed_length=3<line_sep>n_non_decomposed_moments=2<line_sep>self.assertEqual(len(decomposed_programs[0]) n_non_decomposed_moments+max_decomposed_length)<line_sep># Total length of Moments = 5 # The non-decomposed moments should be the same. self.assertEqual(decomposed_programs[0][0] circuit_batch[0][0])<line_sep>self.assertEqual(decomposed_programs[0][-1] circuit_batch[0][-1])<line_sep># Check paralellized decompose gates in Moment[1]~[3]. # The target ops are replaced by the first decomposition gates. It means # the first Moment has exactly the same number of gate ops. self.assertEqual(len(decomposed_programs[0][1]) len(circuit_batch[0][1]))<line_sep># From the second Moments, the Moments only have decomposition gates. # In this example, two ISwapPowGate & one PhasedXPowGate are located. # Since PhasedXPowGate, ISwapPowGate, FSimGate has 3, 2, 3 result gates # Moment[2] have 3 gate ops and Moment[3] have 2 gate ops. self.assertEqual(len(decomposed_programs[0][2]) 3)<line_sep>self.assertEqual(len(decomposed_programs[0][3]) 2)<line_sep># Circuit 2. two FSimGates. self.assertEqual(len(decomposed_programs[1]) 2<times>max_decomposed_length)<line_sep># Circuit 3. one PXP between two ISwapPowGates. self.assertEqual(len(decomposed_programs[2]) max_decomposed_length)<block_end><block_end><class_stmt>PSSymbolReplaceTest(tf.test.TestCase)<block_start>"""Tests tfq_ps_symbol_replace."""<def_stmt>test_simple_case self<block_start>"""Test trivial case."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuit=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('alpha') )<line_sep>inputs=util.convert_to_tensor([circuit])<line_sep>symbols=tf.convert_to_tensor(['alpha'])<line_sep>new=tf.convert_to_tensor(['new'])<line_sep>res=tfq_ps_util_ops.tfq_ps_symbol_replace(inputs symbols new)<line_sep>output=util.from_tensor(res)<line_sep>correct_00=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('new') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('alpha') )<line_sep>correct_01=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('new') cirq.Z(bit)<power>sympy.Symbol('alpha') )<line_sep>correct_02=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('new') )<line_sep>self.assertEqual(correct_00 output[0][0][0])<line_sep>self.assertEqual(correct_01 output[0][0][1])<line_sep>self.assertEqual(correct_02 output[0][0][2])<block_end><def_stmt>test_error self<block_start>"""Ensure that errors happen with bad inputs."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuit=cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2))<line_sep>inputs=util.convert_to_tensor([[circuit]])<line_sep>symbols=tf.convert_to_tensor(['test'])<line_sep>replacements=tf.convert_to_tensor(['nothing'])<with_stmt>self.assertRaisesRegex(Exception expected_regex='rank 1. Got rank 2.')<block_start>tfq_ps_util_ops.tfq_ps_symbol_replace(inputs symbols replacements)<block_end>inputs=tf.convert_to_tensor(['junk'])<with_stmt>self.assertRaisesRegex(Exception expected_regex='Unparseable proto:')<block_start>tfq_ps_util_ops.tfq_ps_symbol_replace(inputs symbols replacements)<block_end>inputs=util.convert_to_tensor([circuit])<line_sep>symbols=tf.convert_to_tensor([['test']])<line_sep>replacements=tf.convert_to_tensor(['nothing'])<with_stmt>self.assertRaisesRegex(Exception expected_regex='rank 1. Got rank 2.')<block_start>tfq_ps_util_ops.tfq_ps_symbol_replace(inputs symbols replacements)<block_end>symbols=tf.convert_to_tensor(['test'])<line_sep>replacements=tf.convert_to_tensor([['nothing']])<with_stmt>self.assertRaisesRegex(Exception expected_regex='rank 1. Got rank 2.')<block_start>tfq_ps_util_ops.tfq_ps_symbol_replace(inputs symbols replacements)<block_end>symbols=tf.convert_to_tensor(['test'])<line_sep>replacements=tf.convert_to_tensor(['nothing' 'too long'])<with_stmt>self.assertRaisesRegex(Exception expected_regex='symbols.shape is not equal to replacement_symbols.shape')<block_start>tfq_ps_util_ops.tfq_ps_symbol_replace(inputs symbols replacements)<block_end><block_end><def_stmt>test_weight_coefficient self<block_start>"""Test that scalar multiples of trivial case work."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuit=cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2.4) cirq.Y(bit)<power>(sympy.Symbol('alpha')<times>3.4) cirq.Z(bit)<power>(sympy.Symbol('alpha')<times>4.4) )<line_sep>inputs=util.convert_to_tensor([circuit])<line_sep>symbols=tf.convert_to_tensor(['alpha'])<line_sep>new=tf.convert_to_tensor(['new'])<line_sep>res=tfq_ps_util_ops.tfq_ps_symbol_replace(inputs symbols new)<line_sep>output=util.from_tensor(res)<line_sep>correct_00=cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('new')<times>2.4) cirq.Y(bit)<power>(sympy.Symbol('alpha')<times>3.4) cirq.Z(bit)<power>(sympy.Symbol('alpha')<times>4.4) )<line_sep>correct_01=cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2.4) cirq.Y(bit)<power>(sympy.Symbol('new')<times>3.4) cirq.Z(bit)<power>(sympy.Symbol('alpha')<times>4.4) )<line_sep>correct_02=cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2.4) cirq.Y(bit)<power>(sympy.Symbol('alpha')<times>3.4) cirq.Z(bit)<power>(sympy.Symbol('new')<times>4.4) )<for_stmt>i,c enumerate([correct_00 correct_01 correct_02])<block_start>u1=cirq.unitary(cirq.resolve_parameters(c param_resolver={'alpha':1.23 'new':4.56}))<line_sep>u2=cirq.unitary(cirq.resolve_parameters(output[0][0][i] param_resolver={'alpha':1.23 'new':4.56}))<line_sep>self.assertTrue(cirq.approx_eq(u1 u2 atol=1e-5))<block_end><block_end><def_stmt>test_simple_pad self<block_start>"""Test simple padding."""<line_sep>bit=cirq.LineQubit(1)<line_sep>circuit=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('alpha') )<line_sep>circuit2=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('beta') cirq.Y(bit)<power>sympy.Symbol('beta') cirq.Z(bit)<power>sympy.Symbol('beta') )<line_sep>circuit3=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('alpha') )<line_sep>inputs=util.convert_to_tensor([circuit circuit2 circuit3])<line_sep>symbols=tf.convert_to_tensor(['alpha' 'beta' 'gamma'])<line_sep>new=tf.convert_to_tensor(['new' 'old' 'nothing'])<line_sep>res=tfq_ps_util_ops.tfq_ps_symbol_replace(inputs symbols new)<line_sep>output=util.from_tensor(res)<line_sep>correct_00=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('new') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('alpha') )<line_sep>correct_01=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('new') cirq.Z(bit)<power>sympy.Symbol('alpha') )<line_sep>correct_02=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('new') )<line_sep>self.assertEqual(correct_00 output[0][0][0])<line_sep>self.assertEqual(correct_01 output[0][0][1])<line_sep>self.assertEqual(correct_02 output[0][0][2])<line_sep>self.assertEqual(correct_00 output[2][0][0])<line_sep>self.assertEqual(correct_01 output[2][0][1])<line_sep>self.assertEqual(correct_02 output[2][0][2])<line_sep>correct_10=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('old') cirq.Y(bit)<power>sympy.Symbol('beta') cirq.Z(bit)<power>sympy.Symbol('beta') )<line_sep>correct_11=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('beta') cirq.Y(bit)<power>sympy.Symbol('old') cirq.Z(bit)<power>sympy.Symbol('beta') )<line_sep>correct_12=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('beta') cirq.Y(bit)<power>sympy.Symbol('beta') cirq.Z(bit)<power>sympy.Symbol('old') )<line_sep>self.assertEqual(correct_10 output[1][1][0])<line_sep>self.assertEqual(correct_11 output[1][1][1])<line_sep>self.assertEqual(correct_12 output[1][1][2])<line_sep>correct_20=cirq.Circuit()<line_sep>correct_21=cirq.Circuit()<line_sep>correct_22=cirq.Circuit()<line_sep>self.assertEqual(correct_20 output[2][2][0])<line_sep>self.assertEqual(correct_21 output[2][2][1])<line_sep>self.assertEqual(correct_22 output[2][2][2])<line_sep>correct=cirq.Circuit()<for_stmt>i range(3)<block_start><for_stmt>j range(3)<block_start><for_stmt>k range(3)<block_start><if_stmt>i<ne>j<and>(<not>(i<eq>2<and>j<eq>0))<block_start>self.assertEqual(correct output[i][j][k])<block_end><block_end><block_end><block_end><block_end><def_stmt>test_complex_pad self<block_start>"""Test trickier padding."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>bit2=cirq.GridQubit(0 1)<line_sep>circuit=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('alpha') cirq.XX(bit bit2)<power>sympy.Symbol('alpha'))<line_sep>circuit2=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('beta') cirq.Y(bit)<power>sympy.Symbol('beta') cirq.Z(bit)<power>sympy.Symbol('beta') cirq.XX(bit bit2)<power>sympy.Symbol('alpha'))<line_sep>circuit3=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('alpha') cirq.XX(bit bit2)<power>sympy.Symbol('alpha'))<line_sep>inputs=util.convert_to_tensor([circuit circuit2 circuit3])<line_sep>symbols=tf.convert_to_tensor(['alpha' 'beta' 'gamma'])<line_sep>new=tf.convert_to_tensor(['new' 'old' 'nothing'])<line_sep>res=tfq_ps_util_ops.tfq_ps_symbol_replace(inputs symbols new)<line_sep>output=util.from_tensor(res)<line_sep>correct_000=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('new') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('alpha') cirq.XX(bit bit2)<power>sympy.Symbol('alpha'))<line_sep>correct_001=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('new') cirq.Z(bit)<power>sympy.Symbol('alpha') cirq.XX(bit bit2)<power>sympy.Symbol('alpha'))<line_sep>correct_002=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('new') cirq.XX(bit bit2)<power>sympy.Symbol('alpha'))<line_sep>correct_003=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('alpha') cirq.Y(bit)<power>sympy.Symbol('alpha') cirq.Z(bit)<power>sympy.Symbol('alpha') cirq.XX(bit bit2)<power>sympy.Symbol('new'))<line_sep>self.assertEqual(correct_000 output[0][0][0])<line_sep>self.assertEqual(correct_001 output[0][0][1])<line_sep>self.assertEqual(correct_002 output[0][0][2])<line_sep>self.assertEqual(correct_003 output[0][0][3])<line_sep>self.assertEqual(correct_000 output[2][0][0])<line_sep>self.assertEqual(correct_001 output[2][0][1])<line_sep>self.assertEqual(correct_002 output[2][0][2])<line_sep>self.assertEqual(correct_003 output[2][0][3])<line_sep>correct_110=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('old') cirq.Y(bit)<power>sympy.Symbol('beta') cirq.Z(bit)<power>sympy.Symbol('beta') cirq.XX(bit bit2)<power>sympy.Symbol('alpha'))<line_sep>correct_111=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('beta') cirq.Y(bit)<power>sympy.Symbol('old') cirq.Z(bit)<power>sympy.Symbol('beta') cirq.XX(bit bit2)<power>sympy.Symbol('alpha'))<line_sep>correct_112=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('beta') cirq.Y(bit)<power>sympy.Symbol('beta') cirq.Z(bit)<power>sympy.Symbol('old') cirq.XX(bit bit2)<power>sympy.Symbol('alpha'))<line_sep>correct_113=cirq.Circuit()<line_sep>self.assertEqual(correct_110 output[1][1][0])<line_sep>self.assertEqual(correct_111 output[1][1][1])<line_sep>self.assertEqual(correct_112 output[1][1][2])<line_sep>self.assertEqual(correct_113 output[1][1][3])<line_sep>correct_100=cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('beta') cirq.Y(bit)<power>sympy.Symbol('beta') cirq.Z(bit)<power>sympy.Symbol('beta') cirq.XX(bit bit2)<power>sympy.Symbol('new'))<line_sep>correct_101=cirq.Circuit()<line_sep>correct_102=cirq.Circuit()<line_sep>correct_103=cirq.Circuit()<line_sep>self.assertEqual(correct_100 output[1][0][0])<line_sep>self.assertEqual(correct_101 output[1][0][1])<line_sep>self.assertEqual(correct_102 output[1][0][2])<line_sep>self.assertEqual(correct_103 output[1][0][3])<line_sep>correct_220=cirq.Circuit()<line_sep>correct_221=cirq.Circuit()<line_sep>correct_222=cirq.Circuit()<line_sep>correct_223=cirq.Circuit()<line_sep>self.assertEqual(correct_220 output[2][2][0])<line_sep>self.assertEqual(correct_221 output[2][2][1])<line_sep>self.assertEqual(correct_222 output[2][2][2])<line_sep>self.assertEqual(correct_223 output[2][2][3])<line_sep>correct=cirq.Circuit()<for_stmt>i range(3)<block_start><for_stmt>j range(3)<block_start><for_stmt>k range(3)<block_start><if_stmt>i<ne>j<and>(<not>(i<eq>2<and>j<eq>0))<and>(<not>(i<eq>1<and>j<eq>0))<block_start>self.assertEqual(correct output[i][j][k])<block_end><block_end><block_end><block_end><block_end><block_end><class_stmt>PSWeightsFromSymbolTest(tf.test.TestCase)<block_start>"""Tests tfq_ps_weights_from_symbols."""<def_stmt>test_simple self<block_start>"""Ensure that weight extraction works."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuit=cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2))<line_sep>inputs=util.convert_to_tensor([circuit])<line_sep>symbols=tf.convert_to_tensor(['alpha'])<line_sep>res=tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep>self.assertAllClose(res np.array([[[2.0]]]))<block_end><def_stmt>test_empty self<block_start>"""Test empty circuit. and symbol free circuit. does nothing."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuit=cirq.Circuit(cirq.X(bit))<line_sep>circuit2=cirq.Circuit()<line_sep>inputs=util.convert_to_tensor([circuit circuit2])<line_sep>symbols=tf.convert_to_tensor(['alpha'])<line_sep>res=tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep>self.assertAllClose(res np.array([[[]] [[]]]))<block_end><def_stmt>test_rotation_gates self<block_start>"""Test that rotation gates work."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuit=cirq.Circuit(cirq.rx(sympy.Symbol('alpha')<times>5.0)(bit))<line_sep>inputs=util.convert_to_tensor([circuit])<line_sep>symbols=tf.convert_to_tensor(['alpha'])<line_sep>res=tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep>self.assertAllClose(res np.array([[[5.0/np.pi]]]))<block_end><def_stmt>test_error self<block_start>"""Ensure if a symbol can't be found the op errors."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuit=cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('delta')<times>2))<line_sep>inputs=util.convert_to_tensor([circuit])<line_sep>symbols=tf.convert_to_tensor(['alpha' 'delta'])<line_sep>tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep>symbols=tf.convert_to_tensor(['alpha'])<with_stmt>self.assertRaisesRegex(Exception expected_regex='sympy.Symbol')<block_start>tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<block_end>symbols=tf.convert_to_tensor([['delta']])<with_stmt>self.assertRaisesRegex(Exception expected_regex='rank 1. Got rank 2.')<block_start>tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<block_end>inputs=tf.convert_to_tensor(['junk'])<line_sep>symbols=tf.convert_to_tensor(['delta'])<with_stmt>self.assertRaisesRegex(Exception expected_regex='Unparseable proto:')<block_start>tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<block_end>inputs=util.convert_to_tensor([[circuit]])<with_stmt>self.assertRaisesRegex(Exception expected_regex='rank 1. Got rank 2.')<block_start>tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<block_end><block_end><def_stmt>test_many_values self<block_start>"""Ensure that padding with few symbols and many values works."""<line_sep>bit=cirq.LineQubit(1)<line_sep>circuits=[cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2.0) cirq.Y(bit)<power>(sympy.Symbol('alpha')<times>3.0) cirq.Z(bit)<power>(sympy.Symbol('alpha')) cirq.X(bit)<power>(sympy.Symbol('alpha')<times>4.0)) cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>9.0)) cirq.Circuit(cirq.X(bit)<power>sympy.Symbol('beta'))]<line_sep>inputs=util.convert_to_tensor(circuits)<line_sep>symbols=tf.convert_to_tensor(['alpha' 'beta'])<line_sep>res=tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep>self.assertAllClose(res np.array([[[2.0 3.0 1.0 4.0] [0.0 0.0 0.0 0.0]] [[9.0 0.0 0.0 0.0] [0.0 0.0 0.0 0.0]] [[0.0 0.0 0.0 0.0] [1.0 0.0 0.0 0.0]]]))<block_end><def_stmt>test_many_symbols self<block_start>"""Ensure that padding with few values and many symbols works."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuits=[cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2.0)) cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('beta')<times>6)) cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>5.0)) cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('gamma')<times>8)) cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('delta')<times>9))]<line_sep>inputs=util.convert_to_tensor(circuits)<line_sep>symbols=tf.convert_to_tensor(['alpha' 'beta' 'gamma' 'delta'])<line_sep>res=tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep>self.assertAllClose(res np.array([[[2.0] [0.0] [0.0] [0.0]] [[0.0] [6.0] [0.0] [0.0]] [[5.0] [0.0] [0.0] [0.0]] [[0.0] [0.0] [8.0] [0.0]] [[0.0] [0.0] [0.0] [9.0]]]))<block_end><def_stmt>test_out_of_order self<block_start>"""Test that discovery order of symbols in circuits doesn't matter."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuit=cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2) cirq.Y(bit)<power>(sympy.Symbol('beta')<times>3))<line_sep>inputs=util.convert_to_tensor([circuit])<line_sep>symbols=tf.convert_to_tensor(['alpha' 'beta'])<line_sep>res=tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep>self.assertAllClose(res np.array([[[2.0] [3.0]]]))<line_sep>symbols=tf.convert_to_tensor(['beta' 'alpha'])<line_sep>res=tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep>self.assertAllClose(res np.array([[[3.0] [2.0]]]))<block_end><def_stmt>test_padding self<block_start>"""Ensure that the padding is correct in a complex example."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuits=[cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2.0) cirq.Y(bit)<power>(sympy.Symbol('alpha')<times>3.0) cirq.Z(bit)<power>(sympy.Symbol('beta')<times>4.0) ) cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2.0) cirq.Y(bit)<power>(sympy.Symbol('beta')<times>3.0) cirq.Z(bit)<power>(sympy.Symbol('beta')<times>4.0) ) cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2.0) cirq.Y(bit)<power>(sympy.Symbol('beta')<times>3.0) cirq.Z(bit)<power>(sympy.Symbol('gamma')<times>4.0) )]<line_sep>inputs=util.convert_to_tensor(circuits)<line_sep>symbols=tf.convert_to_tensor(['alpha' 'beta' 'gamma'])<line_sep>res=tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep>self.assertAllClose(res np.array([[[2.0 3.0] [4.0 0.0] [0.0 0.0]] [[2.0 0.0] [3.0 4.0] [0.0 0.0]] [[2.0 0.0] [3.0 0.0] [4.0 0.0]]]))<block_end><def_stmt>test_padding_with_non_parameterized_gates self<block_start>"""Ensure that the padding is correct in a complex example."""<line_sep>bit=cirq.GridQubit(0 0)<line_sep>circuits=[cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2.0) cirq.Y(bit)<power>3.0 cirq.Z(bit)<power>(sympy.Symbol('beta')<times>4.0) ) cirq.Circuit(cirq.X(bit)<power>(sympy.Symbol('alpha')<times>2.0) cirq.Y(bit)<power>(sympy.Symbol('beta')<times>3.0) cirq.Z(bit)<power>4.0 ) cirq.Circuit(cirq.X(bit)<power>2.0 cirq.Y(bit)<power>(sympy.Symbol('beta')<times>3.0) cirq.Z(bit)<power>(sympy.Symbol('gamma')<times>4.0) )]<line_sep>inputs=util.convert_to_tensor(circuits)<line_sep>symbols=tf.convert_to_tensor(['alpha' 'beta' 'gamma'])<line_sep>res=tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep>self.assertAllClose(res np.array([[[2.0] [4.0] [0.0]] [[2.0] [3.0] [0.0]] [[0.0] [3.0] [4.0]]]))<block_end><def_stmt>test_ignorance self<block_start>"""Test ignorance of ISP, PXP, FSIM gates."""<line_sep>circuit_batch=_complex_test_circuit()<line_sep>inputs=util.convert_to_tensor(circuit_batch)<line_sep>symbols=tf.convert_to_tensor(['r' 't'])<line_sep>res=tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs symbols)<line_sep># Because there are no weights to be gathered, the last dimension = 0 self.assertAllClose(tf.shape(res) [len(circuit_batch) 2 0])<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
<import_from_stmt>holoviews.element RGB Tiles Points Bounds<import_from_stmt>holoviews.element.tiles StamenTerrain _ATTRIBUTIONS<import_from_stmt>.test_plot TestPlotlyPlot plotly_renderer<import_stmt>numpy<as>np<class_stmt>TestMapboxTilesPlot(TestPlotlyPlot)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep># Precompute coordinates self.xs=[3000000 2000000 1000000]<line_sep>self.ys=[-3000000 -2000000 -1000000]<line_sep>self.x_range=(-5000000 4000000)<line_sep>self.x_center=sum(self.x_range)/2.0<line_sep>self.y_range=(-3000000 2000000)<line_sep>self.y_center=sum(self.y_range)/2.0<line_sep>self.lon_range,self.lat_range=Tiles.easting_northing_to_lon_lat(self.x_range self.y_range)<line_sep>self.lon_centers,self.lat_centers=Tiles.easting_northing_to_lon_lat([self.x_center] [self.y_center])<line_sep>self.lon_center,self.lat_center=self.lon_centers[0] self.lat_centers[0]<line_sep>self.lons,self.lats=Tiles.easting_northing_to_lon_lat(self.xs self.ys)<block_end><def_stmt>test_mapbox_tiles_defaults self<block_start>tiles=Tiles("").redim.range(x=self.x_range y=self.y_range)<line_sep>fig_dict=plotly_renderer.get_plot_state(tiles)<line_sep># Check dummy trace self.assertEqual(len(fig_dict["data"]) 1)<line_sep>dummy_trace=fig_dict["data"][0]<line_sep>self.assertEqual(dummy_trace["type"] "scattermapbox")<line_sep>self.assertEqual(dummy_trace["lon"] [])<line_sep>self.assertEqual(dummy_trace["lat"] [])<line_sep>self.assertEqual(dummy_trace["showlegend"] <false>)<line_sep># Check mapbox subplot subplot=fig_dict["layout"]["mapbox"]<line_sep>self.assertEqual(subplot["style"] "white-bg")<line_sep>self.assertEqual(subplot['center'] {'lat':self.lat_center 'lon':self.lon_center})<line_sep># Check that xaxis and yaxis entries are not created self.assertNotIn("xaxis" fig_dict["layout"])<line_sep>self.assertNotIn("yaxis" fig_dict["layout"])<line_sep># Check no layers are introduced when an empty tile server string is # passed layers=fig_dict["layout"]["mapbox"].get("layers" [])<line_sep>self.assertEqual(len(layers) 0)<block_end><def_stmt>test_styled_mapbox_tiles self<block_start>tiles=Tiles().opts(mapboxstyle="dark" accesstoken="token-str").redim.range(x=self.x_range y=self.y_range)<line_sep>fig_dict=plotly_renderer.get_plot_state(tiles)<line_sep># Check mapbox subplot subplot=fig_dict["layout"]["mapbox"]<line_sep>self.assertEqual(subplot["style"] "dark")<line_sep>self.assertEqual(subplot["accesstoken"] "token-str")<line_sep>self.assertEqual(subplot['center'] {'lat':self.lat_center 'lon':self.lon_center})<block_end><def_stmt>test_raster_layer self<block_start>tiles=StamenTerrain().redim.range(x=self.x_range y=self.y_range).opts(alpha=0.7 min_zoom=3 max_zoom=7)<line_sep>fig_dict=plotly_renderer.get_plot_state(tiles)<line_sep># Check dummy trace self.assertEqual(len(fig_dict["data"]) 1)<line_sep>dummy_trace=fig_dict["data"][0]<line_sep>self.assertEqual(dummy_trace["type"] "scattermapbox")<line_sep>self.assertEqual(dummy_trace["lon"] [])<line_sep>self.assertEqual(dummy_trace["lat"] [])<line_sep>self.assertEqual(dummy_trace["showlegend"] <false>)<line_sep># Check mapbox subplot subplot=fig_dict["layout"]["mapbox"]<line_sep>self.assertEqual(subplot["style"] "white-bg")<line_sep>self.assertEqual(subplot['center'] {'lat':self.lat_center 'lon':self.lon_center})<line_sep># Check for raster layer layers=fig_dict["layout"]["mapbox"].get("layers" [])<line_sep>self.assertEqual(len(layers) 1)<line_sep>layer=layers[0]<line_sep>self.assertEqual(layer["source"][0].lower() tiles.data.lower())<line_sep>self.assertEqual(layer["opacity"] 0.7)<line_sep>self.assertEqual(layer["sourcetype"] "raster")<line_sep>self.assertEqual(layer["minzoom"] 3)<line_sep>self.assertEqual(layer["maxzoom"] 7)<line_sep>self.assertEqual(layer["sourceattribution"] _ATTRIBUTIONS[('stamen' 'net/t')])<block_end><def_stmt>test_overlay self# Base layer is mapbox vector layer <block_start>tiles=Tiles("").opts(mapboxstyle="dark" accesstoken="token-str")<line_sep># Raster tile layer stamen_raster=StamenTerrain().opts(alpha=0.7)<line_sep># RGB layer rgb_data=np.random.rand(10 10 3)<line_sep>rgb=RGB(rgb_data bounds=(self.x_range[0] self.y_range[0] self.x_range[1] self.y_range[1])).opts(opacity=0.5)<line_sep># Points layer points=Points([(0 0) (self.x_range[1] self.y_range[1])]).opts(show_legend=<true>)<line_sep># Bounds bounds=Bounds((self.x_range[0] self.y_range[0] 0 0))<line_sep># Overlay overlay=(tiles<times>stamen_raster<times>rgb<times>points<times>bounds).redim.range(x=self.x_range y=self.y_range)<line_sep># Render to plotly figure dictionary fig_dict=plotly_renderer.get_plot_state(overlay)<line_sep># Check number of traces and layers traces=fig_dict["data"]<line_sep>subplot=fig_dict["layout"]["mapbox"]<line_sep>layers=subplot["layers"]<line_sep>self.assertEqual(len(traces) 5)<line_sep>self.assertEqual(len(layers) 2)<line_sep># Check vector layer dummy_trace=traces[0]<line_sep>self.assertEqual(dummy_trace["type"] "scattermapbox")<line_sep>self.assertEqual(dummy_trace["lon"] [])<line_sep>self.assertEqual(dummy_trace["lat"] [])<line_sep>self.assertFalse(dummy_trace["showlegend"])<line_sep>self.assertEqual(subplot["style"] "dark")<line_sep>self.assertEqual(subplot["accesstoken"] "token-str")<line_sep>self.assertEqual(subplot['center'] {'lat':self.lat_center 'lon':self.lon_center})<line_sep># Check raster layer dummy_trace=traces[1]<line_sep>raster_layer=layers[0]<line_sep>self.assertEqual(dummy_trace["type"] "scattermapbox")<line_sep>self.assertEqual(dummy_trace["lon"] [])<line_sep>self.assertEqual(dummy_trace["lat"] [])<line_sep>self.assertFalse(dummy_trace["showlegend"])<line_sep># Check raster_layer self.assertEqual(raster_layer["below"] "traces")<line_sep>self.assertEqual(raster_layer["opacity"] 0.7)<line_sep>self.assertEqual(raster_layer["sourcetype"] "raster")<line_sep>self.assertEqual(raster_layer["source"][0].lower() stamen_raster.data.lower())<line_sep># Check RGB layer dummy_trace=traces[2]<line_sep>rgb_layer=layers[1]<line_sep>self.assertEqual(dummy_trace["type"] "scattermapbox")<line_sep>self.assertEqual(dummy_trace["lon"] [<none>])<line_sep>self.assertEqual(dummy_trace["lat"] [<none>])<line_sep>self.assertFalse(dummy_trace["showlegend"])<line_sep># Check rgb_layer self.assertEqual(rgb_layer["below"] "traces")<line_sep>self.assertEqual(rgb_layer["opacity"] 0.5)<line_sep>self.assertEqual(rgb_layer["sourcetype"] "image")<line_sep>self.assertTrue(rgb_layer["source"].startswith("data:image/png;base64,iVBOR"))<line_sep>self.assertEqual(rgb_layer["coordinates"] [[self.lon_range[0] self.lat_range[1]] [self.lon_range[1] self.lat_range[1]] [self.lon_range[1] self.lat_range[0]] [self.lon_range[0] self.lat_range[0]]])<line_sep># Check Points layer points_trace=traces[3]<line_sep>self.assertEqual(points_trace["type"] "scattermapbox")<line_sep>self.assertEqual(points_trace["lon"] np.array([0 self.lon_range[1]]))<line_sep>self.assertEqual(points_trace["lat"] np.array([0 self.lat_range[1]]))<line_sep>self.assertEqual(points_trace["mode"] "markers")<line_sep>self.assertTrue(points_trace.get("showlegend" <true>))<line_sep># Check Bounds layer bounds_trace=traces[4]<line_sep>self.assertEqual(bounds_trace["type"] "scattermapbox")<line_sep>self.assertEqual(bounds_trace["lon"] np.array([self.lon_range[0] self.lon_range[0] 0 0 self.lon_range[0]]))<line_sep>self.assertEqual(bounds_trace["lat"] np.array([self.lat_range[0] 0 0 self.lat_range[0] self.lat_range[0]]))<line_sep>self.assertEqual(bounds_trace["mode"] "lines")<line_sep>self.assertTrue(points_trace["showlegend"] <false>)<line_sep># No xaxis/yaxis self.assertNotIn("xaxis" fig_dict["layout"])<line_sep>self.assertNotIn("yaxis" fig_dict["layout"])<block_end><block_end>
<import_from_stmt>collections namedtuple OrderedDict<import_stmt>gym<import_stmt>logging<import_stmt>re<import_from_stmt>typing Callable Dict List Optional Tuple Type<import_from_stmt>ray.util.debug log_once<import_from_stmt>ray.rllib.models.tf.tf_action_dist TFActionDistribution<import_from_stmt>ray.rllib.models.modelv2 ModelV2<import_from_stmt>ray.rllib.policy.policy Policy<import_from_stmt>ray.rllib.policy.sample_batch SampleBatch<import_from_stmt>ray.rllib.policy.tf_policy TFPolicy<import_from_stmt>ray.rllib.policy.view_requirement ViewRequirement<import_from_stmt>ray.rllib.models.catalog ModelCatalog<import_from_stmt>ray.rllib.utils force_list<import_from_stmt>ray.rllib.utils.annotations override DeveloperAPI<import_from_stmt>ray.rllib.utils.debug summarize<import_from_stmt>ray.rllib.utils.deprecation deprecation_warning DEPRECATED_VALUE<import_from_stmt>ray.rllib.utils.framework try_import_tf<import_from_stmt>ray.rllib.utils.tf_ops get_placeholder<import_from_stmt>ray.rllib.utils.typing LocalOptimizer ModelGradients TensorType TrainerConfigDict<line_sep>tf1,tf,tfv=try_import_tf()<line_sep>logger=logging.getLogger(__name__)<line_sep># Variable scope in which created variables will be placed under. TOWER_SCOPE_NAME="tower"<line_sep>@DeveloperAPI<class_stmt>DynamicTFPolicy(TFPolicy)<block_start>"""A TFPolicy that auto-defines placeholders dynamically at runtime. Do not sub-class this class directly (neither should you sub-class TFPolicy), but rather use rllib.policy.tf_policy_template.build_tf_policy to generate your custom tf (graph-mode or eager) Policy classes. Initialization of this class occurs in two phases. * Phase 1: the model is created and model variables are initialized. * Phase 2: a fake batch of data is created, sent to the trajectory postprocessor, and then used to create placeholders for the loss function. The loss and stats functions are initialized with these placeholders. Initialization defines the static graph. Attributes: observation_space (gym.Space): observation space of the policy. action_space (gym.Space): action space of the policy. config (dict): config of the policy model (ModelV2): TF model instance dist_class (type): TF action distribution class """<line_sep>@DeveloperAPI<def_stmt>__init__ self obs_space:gym.spaces.Space action_space:gym.spaces.Space config:TrainerConfigDict loss_fn:Callable[[Policy ModelV2 Type[TFActionDistribution] SampleBatch] TensorType] * stats_fn:Optional[Callable[[Policy SampleBatch] Dict[str TensorType]]]=<none> grad_stats_fn:Optional[Callable[[Policy SampleBatch ModelGradients] Dict[str TensorType]]]=<none> before_loss_init:Optional[Callable[[Policy gym.spaces.Space gym.spaces.Space TrainerConfigDict] <none>]]=<none> make_model:Optional[Callable[[Policy gym.spaces.Space gym.spaces.Space TrainerConfigDict] ModelV2]]=<none> action_sampler_fn:Optional[Callable[[TensorType List[TensorType]] Tuple[TensorType TensorType]]]=<none> action_distribution_fn:Optional[Callable[[Policy ModelV2 TensorType TensorType TensorType] Tuple[TensorType type List[TensorType]]]]=<none> existing_inputs:Optional[Dict[str "tf1.placeholder"]]=<none> existing_model:Optional[ModelV2]=<none> get_batch_divisibility_req:Optional[Callable[[Policy] int]]=<none> obs_include_prev_action_reward=DEPRECATED_VALUE<block_start>"""Initializes a DynamicTFPolicy instance. Args: observation_space (gym.spaces.Space): Observation space of the policy. action_space (gym.spaces.Space): Action space of the policy. config (TrainerConfigDict): Policy-specific configuration data. loss_fn (Callable[[Policy, ModelV2, Type[TFActionDistribution], SampleBatch], TensorType]): Function that returns a loss tensor for the policy graph. stats_fn (Optional[Callable[[Policy, SampleBatch], Dict[str, TensorType]]]): Optional function that returns a dict of TF fetches given the policy and batch input tensors. grad_stats_fn (Optional[Callable[[Policy, SampleBatch, ModelGradients], Dict[str, TensorType]]]): Optional function that returns a dict of TF fetches given the policy, sample batch, and loss gradient tensors. before_loss_init (Optional[Callable[ [Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict], None]]): Optional function to run prior to loss init that takes the same arguments as __init__. make_model (Optional[Callable[[Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict], ModelV2]]): Optional function that returns a ModelV2 object given policy, obs_space, action_space, and policy config. All policy variables should be created in this function. If not specified, a default model will be created. action_sampler_fn (Optional[Callable[[Policy, ModelV2, Dict[ str, TensorType], TensorType, TensorType], Tuple[TensorType, TensorType]]]): A callable returning a sampled action and its log-likelihood given Policy, ModelV2, input_dict, explore, timestep, and is_training. action_distribution_fn (Optional[Callable[[Policy, ModelV2, Dict[str, TensorType], TensorType, TensorType], Tuple[TensorType, type, List[TensorType]]]]): A callable returning distribution inputs (parameters), a dist-class to generate an action distribution object from, and internal-state outputs (or an empty list if not applicable). Note: No Exploration hooks have to be called from within `action_distribution_fn`. It's should only perform a simple forward pass through some model. If None, pass inputs through `self.model()` to get distribution inputs. The callable takes as inputs: Policy, ModelV2, input_dict, explore, timestep, is_training. existing_inputs (Optional[Dict[str, tf1.placeholder]]): When copying a policy, this specifies an existing dict of placeholders to use instead of defining new ones. existing_model (Optional[ModelV2]): When copying a policy, this specifies an existing model to clone and share weights with. get_batch_divisibility_req (Optional[Callable[[Policy], int]]): Optional callable that returns the divisibility requirement for sample batches. If None, will assume a value of 1. """<if_stmt>obs_include_prev_action_reward<ne>DEPRECATED_VALUE<block_start>deprecation_warning(old="obs_include_prev_action_reward" error=<false>)<block_end>self.observation_space=obs_space<line_sep>self.action_space=action_space<line_sep>self.config=config<line_sep>self.framework="tf"<line_sep>self._loss_fn=loss_fn<line_sep>self._stats_fn=stats_fn<line_sep>self._grad_stats_fn=grad_stats_fn<line_sep>self._seq_lens=<none><line_sep>self._is_tower=existing_inputs<is><not><none><line_sep>dist_class=<none><if_stmt>action_sampler_fn<or>action_distribution_fn<block_start><if_stmt><not>make_model<block_start><raise>ValueError("`make_model` is required if `action_sampler_fn` OR "<concat>"`action_distribution_fn` is given")<block_end><block_end><else_stmt><block_start>dist_class,logit_dim=ModelCatalog.get_action_dist(action_space self.config["model"])<block_end># Setup self.model. <if_stmt>existing_model<block_start><if_stmt>isinstance(existing_model list)<block_start>self.model=existing_model[0]<line_sep># TODO: (sven) hack, but works for `target_[q_]?model`. <for_stmt>i range(1 len(existing_model))<block_start>setattr(self existing_model[i][0] existing_model[i][1])<block_end><block_end><block_end><elif_stmt>make_model<block_start>self.model=make_model(self obs_space action_space config)<block_end><else_stmt><block_start>self.model=ModelCatalog.get_model_v2(obs_space=obs_space action_space=action_space num_outputs=logit_dim model_config=self.config["model"] framework="tf")<block_end># Auto-update model's inference view requirements, if recurrent. self._update_model_view_requirements_from_init_state()<line_sep># Input placeholders already given -> Use these. <if_stmt>existing_inputs<block_start>self._state_inputs=[v<for>k,v existing_inputs.items()<if>k.startswith("state_in_")]<line_sep># Placeholder for RNN time-chunk valid lengths. <if_stmt>self._state_inputs<block_start>self._seq_lens=existing_inputs[SampleBatch.SEQ_LENS]<block_end><block_end># Create new input placeholders. <else_stmt><block_start>self._state_inputs=[get_placeholder(space=vr.space time_axis=<not>isinstance(vr.shift int) )<for>k,vr self.model.view_requirements.items()<if>k.startswith("state_in_")]<line_sep># Placeholder for RNN time-chunk valid lengths. <if_stmt>self._state_inputs<block_start>self._seq_lens=tf1.placeholder(dtype=tf.int32 shape=[<none>] name="seq_lens")<block_end><block_end># Use default settings. # Add NEXT_OBS, STATE_IN_0.., and others. self.view_requirements=self._get_default_view_requirements()<line_sep># Combine view_requirements for Model and Policy. self.view_requirements.update(self.model.view_requirements)<line_sep># Disable env-info placeholder. <if_stmt>SampleBatch.INFOS<in>self.view_requirements<block_start>self.view_requirements[SampleBatch.INFOS].used_for_training=<false><block_end># Setup standard placeholders. <if_stmt>self._is_tower<block_start>timestep=existing_inputs["timestep"]<line_sep>explore=<false><line_sep>self._input_dict,self._dummy_batch=self._get_input_dict_and_dummy_batch(self.view_requirements existing_inputs)<block_end><else_stmt><block_start>action_ph=ModelCatalog.get_action_placeholder(action_space)<line_sep>prev_action_ph={}<if_stmt>SampleBatch.PREV_ACTIONS<not><in>self.view_requirements<block_start>prev_action_ph={SampleBatch.PREV_ACTIONS:ModelCatalog.get_action_placeholder(action_space "prev_action")}<block_end>self._input_dict,self._dummy_batch=self._get_input_dict_and_dummy_batch(self.view_requirements dict({SampleBatch.ACTIONS:action_ph} **prev_action_ph))<line_sep># Placeholder for (sampling steps) timestep (int). timestep=tf1.placeholder_with_default(tf.zeros(() dtype=tf.int64) () name="timestep")<line_sep># Placeholder for `is_exploring` flag. explore=tf1.placeholder_with_default(<true> () name="is_exploring")<block_end># Placeholder for `is_training` flag. self._input_dict.is_training=self._get_is_training_placeholder()<line_sep># Multi-GPU towers do not need any action computing/exploration # graphs. sampled_action=<none><line_sep>sampled_action_logp=<none><line_sep>dist_inputs=<none><line_sep>self._state_out=<none><if_stmt><not>self._is_tower# Create the Exploration object to use for this Policy. <block_start>self.exploration=self._create_exploration()<line_sep># Fully customized action generation (e.g., custom policy). <if_stmt>action_sampler_fn<block_start>sampled_action,sampled_action_logp=action_sampler_fn(self self.model obs_batch=self._input_dict[SampleBatch.CUR_OBS] state_batches=self._state_inputs seq_lens=self._seq_lens prev_action_batch=self._input_dict.get(SampleBatch.PREV_ACTIONS) prev_reward_batch=self._input_dict.get(SampleBatch.PREV_REWARDS) explore=explore is_training=self._input_dict.is_training)<block_end># Distribution generation is customized, e.g., DQN, DDPG. <else_stmt><block_start><if_stmt>action_distribution_fn# Try new action_distribution_fn signature, supporting # state_batches and seq_lens. <block_start>in_dict=self._input_dict<try_stmt><block_start>dist_inputs,dist_class,self._state_out=action_distribution_fn(self self.model input_dict=in_dict state_batches=self._state_inputs seq_lens=self._seq_lens explore=explore timestep=timestep is_training=in_dict.is_training)<block_end># Trying the old way (to stay backward compatible). # TODO: Remove in future. <except_stmt>TypeError<as>e<block_start><if_stmt>"positional argument"<in>e.args[0]<or>"unexpected keyword argument"<in>e.args[0]<block_start>dist_inputs,dist_class,self._state_out=action_distribution_fn(self self.model obs_batch=in_dict[SampleBatch.CUR_OBS] state_batches=self._state_inputs seq_lens=self._seq_lens prev_action_batch=in_dict.get(SampleBatch.PREV_ACTIONS) prev_reward_batch=in_dict.get(SampleBatch.PREV_REWARDS) explore=explore is_training=in_dict.is_training)<block_end><else_stmt><block_start><raise>e<block_end><block_end><block_end># Default distribution generation behavior: # Pass through model. E.g., PG, PPO. <else_stmt><block_start><if_stmt>isinstance(self.model tf.keras.Model)<block_start>dist_inputs,self._state_out,self._extra_action_fetches=self.model(self._input_dict)<block_end><else_stmt><block_start>dist_inputs,self._state_out=self.model(self._input_dict self._state_inputs self._seq_lens)<block_end><block_end>action_dist=dist_class(dist_inputs self.model)<line_sep># Using exploration to get final action (e.g. via sampling). sampled_action,sampled_action_logp=self.exploration.get_exploration_action(action_distribution=action_dist timestep=timestep explore=explore)<block_end><block_end># Phase 1 init. sess=tf1.get_default_session()<or>tf1.Session(config=tf1.ConfigProto(**self.config["tf_session_args"]))<line_sep>batch_divisibility_req=get_batch_divisibility_req(self)<if>callable(get_batch_divisibility_req)<else>(get_batch_divisibility_req<or>1)<line_sep>super().__init__(observation_space=obs_space action_space=action_space config=config sess=sess obs_input=self._input_dict[SampleBatch.OBS] action_input=self._input_dict[SampleBatch.ACTIONS] sampled_action=sampled_action sampled_action_logp=sampled_action_logp dist_inputs=dist_inputs dist_class=dist_class loss=<none> # dynamically initialized on run loss_inputs=[] model=self.model state_inputs=self._state_inputs state_outputs=self._state_out prev_action_input=self._input_dict.get(SampleBatch.PREV_ACTIONS) prev_reward_input=self._input_dict.get(SampleBatch.PREV_REWARDS) seq_lens=self._seq_lens max_seq_len=config["model"]["max_seq_len"] batch_divisibility_req=batch_divisibility_req explore=explore timestep=timestep)<line_sep># Phase 2 init. <if_stmt>before_loss_init<is><not><none><block_start>before_loss_init(self obs_space action_space config)<block_end># Loss initialization and model/postprocessing test calls. <if_stmt><not>self._is_tower<block_start>self._initialize_loss_from_dummy_batch(auto_remove_unneeded_view_reqs=<true>)<line_sep># Create MultiGPUTowerStacks, if we have at least one actual # GPU or >1 CPUs (fake GPUs). <if_stmt>len(self.devices)<g>1<or>any("gpu"<in>d<for>d self.devices)# Per-GPU graph copies created here must share vars with the # policy. Therefore, `reuse` is set to tf1.AUTO_REUSE because # Adam nodes are created after all of the device copies are # created. <block_start><with_stmt>tf1.variable_scope("" reuse=tf1.AUTO_REUSE)<block_start>self.multi_gpu_tower_stacks=[TFMultiGPUTowerStack(policy=self)<for>i range(self.config.get("num_multi_gpu_tower_stacks" 1))]<block_end><block_end># Initialize again after loss and tower init. self.get_session().run(tf1.global_variables_initializer())<block_end><block_end>@override(TFPolicy)@DeveloperAPI<def_stmt>copy self existing_inputs:List[Tuple[str "tf1.placeholder"]]<arrow>TFPolicy<block_start>"""Creates a copy of self using existing input placeholders."""<line_sep># Note that there might be RNN state inputs at the end of the list <if_stmt>len(self._loss_input_dict)<ne>len(existing_inputs)<block_start><raise>ValueError("Tensor list mismatch" self._loss_input_dict self._state_inputs existing_inputs)<block_end><for_stmt>i,(k v) enumerate(self._loss_input_dict_no_rnn.items())<block_start><if_stmt>v.shape.as_list()<ne>existing_inputs[i].shape.as_list()<block_start><raise>ValueError("Tensor shape mismatch" i k v.shape existing_inputs[i].shape)<block_end><block_end># By convention, the loss inputs are followed by state inputs and then # the seq len tensor. rnn_inputs=[]<for_stmt>i range(len(self._state_inputs))<block_start>rnn_inputs.append(("state_in_{}".format(i) existing_inputs[len(self._loss_input_dict_no_rnn)+i]))<block_end><if_stmt>rnn_inputs<block_start>rnn_inputs.append((SampleBatch.SEQ_LENS existing_inputs[-1]))<block_end>input_dict=OrderedDict([("is_exploring" self._is_exploring) ("timestep" self._timestep)]+[(k existing_inputs[i])<for>i,k enumerate(self._loss_input_dict_no_rnn.keys())]+rnn_inputs)<line_sep>instance=self.__class__(self.observation_space self.action_space self.config existing_inputs=input_dict existing_model=[self.model # Deprecated: Target models should all reside under # `policy.target_model` now. ("target_q_model" getattr(self "target_q_model" <none>)) ("target_model" getattr(self "target_model" <none>)) ])<line_sep>instance._loss_input_dict=input_dict<line_sep>losses=instance._do_loss_init(SampleBatch(input_dict))<line_sep>loss_inputs=[(k existing_inputs[i])<for>i,k enumerate(self._loss_input_dict_no_rnn.keys())]<line_sep>TFPolicy._initialize_loss(instance losses loss_inputs)<if_stmt>instance._grad_stats_fn<block_start>instance._stats_fetches.update(instance._grad_stats_fn(instance input_dict instance._grads))<block_end><return>instance<block_end>@override(Policy)@DeveloperAPI<def_stmt>get_initial_state self<arrow>List[TensorType]<block_start><if_stmt>self.model<block_start><return>self.model.get_initial_state()<block_end><else_stmt><block_start><return>[]<block_end><block_end>@override(Policy)@DeveloperAPI<def_stmt>load_batch_into_buffer self batch:SampleBatch buffer_index:int=0 <arrow>int# Set the is_training flag of the batch. <block_start>batch.is_training=<true><line_sep># Shortcut for 1 CPU only: Store batch in # `self._loaded_single_cpu_batch`. <if_stmt>len(self.devices)<eq>1<and>self.devices[0]<eq>"/cpu:0"<block_start><assert_stmt>buffer_index<eq>0<line_sep>self._loaded_single_cpu_batch=batch<line_sep><return>len(batch)<block_end>input_dict=self._get_loss_inputs_dict(batch shuffle=<false>)<line_sep>data_keys=list(self._loss_input_dict_no_rnn.values())<if_stmt>self._state_inputs<block_start>state_keys=self._state_inputs+[self._seq_lens]<block_end><else_stmt><block_start>state_keys=[]<block_end>inputs=[input_dict[k]<for>k data_keys]<line_sep>state_inputs=[input_dict[k]<for>k state_keys]<line_sep><return>self.multi_gpu_tower_stacks[buffer_index].load_data(sess=self.get_session() inputs=inputs state_inputs=state_inputs )<block_end>@override(Policy)@DeveloperAPI<def_stmt>get_num_samples_loaded_into_buffer self buffer_index:int=0<arrow>int# Shortcut for 1 CPU only: Batch should already be stored in # `self._loaded_single_cpu_batch`. <block_start><if_stmt>len(self.devices)<eq>1<and>self.devices[0]<eq>"/cpu:0"<block_start><assert_stmt>buffer_index<eq>0<line_sep><return>len(self._loaded_single_cpu_batch)<if>self._loaded_single_cpu_batch<is><not><none><else>0<block_end><return>self.multi_gpu_tower_stacks[buffer_index].num_tuples_loaded<block_end>@override(Policy)@DeveloperAPI<def_stmt>learn_on_loaded_batch self offset:int=0 buffer_index:int=0# Shortcut for 1 CPU only: Batch should already be stored in # `self._loaded_single_cpu_batch`. <block_start><if_stmt>len(self.devices)<eq>1<and>self.devices[0]<eq>"/cpu:0"<block_start><assert_stmt>buffer_index<eq>0<if_stmt>self._loaded_single_cpu_batch<is><none><block_start><raise>ValueError("Must call Policy.load_batch_into_buffer() before "<concat>"Policy.learn_on_loaded_batch()!")<block_end># Get the correct slice of the already loaded batch to use, # based on offset and batch size. batch_size=self.config.get("sgd_minibatch_size" self.config["train_batch_size"])<if_stmt>batch_size<ge>len(self._loaded_single_cpu_batch)<block_start>sliced_batch=self._loaded_single_cpu_batch<block_end><else_stmt><block_start>sliced_batch=self._loaded_single_cpu_batch.slice(start=offset end=offset+batch_size)<block_end><return>self.learn_on_batch(sliced_batch)<block_end><return>self.multi_gpu_tower_stacks[buffer_index].optimize(self.get_session() offset)<block_end><def_stmt>_get_input_dict_and_dummy_batch self view_requirements existing_inputs<block_start>"""Creates input_dict and dummy_batch for loss initialization. Used for managing the Policy's input placeholders and for loss initialization. Input_dict: Str -> tf.placeholders, dummy_batch: str -> np.arrays. Args: view_requirements (ViewReqs): The view requirements dict. existing_inputs (Dict[str, tf.placeholder]): A dict of already existing placeholders. Returns: Tuple[Dict[str, tf.placeholder], Dict[str, np.ndarray]]: The input_dict/dummy_batch tuple. """<line_sep>input_dict={}<for_stmt>view_col,view_req view_requirements.items()# Point state_in to the already existing self._state_inputs. <block_start>mo=re.match("state_in_(\d+)" view_col)<if_stmt>mo<is><not><none><block_start>input_dict[view_col]=self._state_inputs[int(mo.group(1))]<block_end># State-outs (no placeholders needed). <elif_stmt>view_col.startswith("state_out_")<block_start><continue><block_end># Skip action dist inputs placeholder (do later). <elif_stmt>view_col<eq>SampleBatch.ACTION_DIST_INPUTS<block_start><continue><block_end># This is a tower, input placeholders already exist. <elif_stmt>view_col<in>existing_inputs<block_start>input_dict[view_col]=existing_inputs[view_col]<block_end># All others. <else_stmt><block_start>time_axis=<not>isinstance(view_req.shift int)<if_stmt>view_req.used_for_training# Create a +time-axis placeholder if the shift is not an # int (range or list of ints). <block_start>flatten=view_col<not><in>[SampleBatch.OBS SampleBatch.NEXT_OBS]<or><not>self.config["_disable_preprocessor_api"]<line_sep>input_dict[view_col]=get_placeholder(space=view_req.space name=view_col time_axis=time_axis flatten=flatten )<block_end><block_end><block_end>dummy_batch=self._get_dummy_batch_from_view_requirements(batch_size=32)<line_sep><return>SampleBatch(input_dict seq_lens=self._seq_lens) dummy_batch<block_end>@override(Policy)<def_stmt>_initialize_loss_from_dummy_batch self auto_remove_unneeded_view_reqs:bool=<true> stats_fn=<none><arrow><none># Create the optimizer/exploration optimizer here. Some initialization # steps (e.g. exploration postprocessing) may need this. <block_start><if_stmt><not>self._optimizers<block_start>self._optimizers=force_list(self.optimizer())<line_sep># Backward compatibility. self._optimizer=self._optimizers[0]<block_end># Test calls depend on variable init, so initialize model first. self.get_session().run(tf1.global_variables_initializer())<line_sep>logger.info("Testing `compute_actions` w/ dummy batch.")<line_sep>actions,state_outs,extra_fetches=self.compute_actions_from_input_dict(self._dummy_batch explore=<false> timestep=0)<for_stmt>key,value extra_fetches.items()<block_start>self._dummy_batch[key]=value<line_sep>self._input_dict[key]=get_placeholder(value=value name=key)<if_stmt>key<not><in>self.view_requirements<block_start>logger.info("Adding extra-action-fetch `{}` to "<concat>"view-reqs.".format(key))<line_sep>self.view_requirements[key]=ViewRequirement(space=gym.spaces.Box(-1.0 1.0 shape=value.shape[1:] dtype=value.dtype) used_for_compute_actions=<false> )<block_end><block_end>dummy_batch=self._dummy_batch<line_sep>logger.info("Testing `postprocess_trajectory` w/ dummy batch.")<line_sep>self.exploration.postprocess_trajectory(self dummy_batch self.get_session())<line_sep>_=self.postprocess_trajectory(dummy_batch)<line_sep># Add new columns automatically to (loss) input_dict. <for_stmt>key dummy_batch.added_keys<block_start><if_stmt>key<not><in>self._input_dict<block_start>self._input_dict[key]=get_placeholder(value=dummy_batch[key] name=key)<block_end><if_stmt>key<not><in>self.view_requirements<block_start>self.view_requirements[key]=ViewRequirement(space=gym.spaces.Box(-1.0 1.0 shape=dummy_batch[key].shape[1:] dtype=dummy_batch[key].dtype) used_for_compute_actions=<false> )<block_end><block_end>train_batch=SampleBatch(dict(self._input_dict **self._loss_input_dict))<if_stmt>self._state_inputs<block_start>train_batch[SampleBatch.SEQ_LENS]=self._seq_lens<line_sep>self._loss_input_dict.update({SampleBatch.SEQ_LENS:train_batch[SampleBatch.SEQ_LENS]})<block_end>self._loss_input_dict.update({k:v<for>k,v train_batch.items()})<if_stmt>log_once("loss_init")<block_start>logger.debug("Initializing loss function with dummy input:\n\n{}\n".format(summarize(train_batch)))<block_end>losses=self._do_loss_init(train_batch)<line_sep>all_accessed_keys=train_batch.accessed_keys|dummy_batch.accessed_keys|dummy_batch.added_keys|set(self.model.view_requirements.keys())<line_sep>TFPolicy._initialize_loss(self losses [(k v)<for>k,v train_batch.items()<if>k<in>all_accessed_keys]+([(SampleBatch.SEQ_LENS train_batch[SampleBatch.SEQ_LENS])]<if>SampleBatch.SEQ_LENS<in>train_batch<else>[]))<if_stmt>"is_training"<in>self._loss_input_dict<block_start><del_stmt>self._loss_input_dict["is_training"]<block_end># Call the grads stats fn. # TODO: (sven) rename to simply stats_fn to match eager and torch. <if_stmt>self._grad_stats_fn<block_start>self._stats_fetches.update(self._grad_stats_fn(self train_batch self._grads))<block_end># Add new columns automatically to view-reqs. <if_stmt>auto_remove_unneeded_view_reqs# Add those needed for postprocessing and training. <block_start>all_accessed_keys=train_batch.accessed_keys|dummy_batch.accessed_keys<line_sep># Tag those only needed for post-processing (with some exceptions). <for_stmt>key dummy_batch.accessed_keys<block_start><if_stmt>key<not><in>train_batch.accessed_keys<and>key<not><in>self.model.view_requirements<and>key<not><in>[SampleBatch.EPS_ID SampleBatch.AGENT_INDEX SampleBatch.UNROLL_ID SampleBatch.DONES SampleBatch.REWARDS SampleBatch.INFOS]<block_start><if_stmt>key<in>self.view_requirements<block_start>self.view_requirements[key].used_for_training=<false><block_end><if_stmt>key<in>self._loss_input_dict<block_start><del_stmt>self._loss_input_dict[key]<block_end><block_end><block_end># Remove those not needed at all (leave those that are needed # by Sampler to properly execute sample collection). # Also always leave DONES, REWARDS, and INFOS, no matter what. <for_stmt>key list(self.view_requirements.keys())<block_start><if_stmt>key<not><in>all_accessed_keys<and>key<not><in>[SampleBatch.EPS_ID SampleBatch.AGENT_INDEX SampleBatch.UNROLL_ID SampleBatch.DONES SampleBatch.REWARDS SampleBatch.INFOS]<and>key<not><in>self.model.view_requirements# If user deleted this key manually in postprocessing # fn, warn about it and do not remove from # view-requirements. <block_start><if_stmt>key<in>dummy_batch.deleted_keys<block_start>logger.warning("SampleBatch key '{}' was deleted manually in "<concat>"postprocessing function! RLlib will "<concat>"automatically remove non-used items from the "<concat>"data stream. Remove the `del` from your "<concat>"postprocessing function.".format(key))<block_end><else_stmt><block_start><del_stmt>self.view_requirements[key]<block_end><if_stmt>key<in>self._loss_input_dict<block_start><del_stmt>self._loss_input_dict[key]<block_end><block_end><block_end># Add those data_cols (again) that are missing and have # dependencies by view_cols. <for_stmt>key list(self.view_requirements.keys())<block_start>vr=self.view_requirements[key]<if_stmt>(vr.data_col<is><not><none><and>vr.data_col<not><in>self.view_requirements)<block_start>used_for_training=vr.data_col<in>train_batch.accessed_keys<line_sep>self.view_requirements[vr.data_col]=ViewRequirement(space=vr.space used_for_training=used_for_training)<block_end><block_end><block_end>self._loss_input_dict_no_rnn={k:v<for>k,v self._loss_input_dict.items()<if>(v<not><in>self._state_inputs<and>v<ne>self._seq_lens)}<block_end><def_stmt>_do_loss_init self train_batch:SampleBatch<block_start>losses=self._loss_fn(self self.model self.dist_class train_batch)<line_sep>losses=force_list(losses)<if_stmt>self._stats_fn<block_start>self._stats_fetches.update(self._stats_fn(self train_batch))<block_end># Override the update ops to be those of the model. self._update_ops=[]<if_stmt><not>isinstance(self.model tf.keras.Model)<block_start>self._update_ops=self.model.update_ops()<block_end><return>losses<block_end><block_end><class_stmt>TFMultiGPUTowerStack<block_start>"""Optimizer that runs in parallel across multiple local devices. TFMultiGPUTowerStack automatically splits up and loads training data onto specified local devices (e.g. GPUs) with `load_data()`. During a call to `optimize()`, the devices compute gradients over slices of the data in parallel. The gradients are then averaged and applied to the shared weights. The data loaded is pinned in device memory until the next call to `load_data`, so you can make multiple passes (possibly in randomized order) over the same data once loaded. This is similar to tf1.train.SyncReplicasOptimizer, but works within a single TensorFlow graph, i.e. implements in-graph replicated training: https://www.tensorflow.org/api_docs/python/tf/train/SyncReplicasOptimizer """<def_stmt>__init__ self # Deprecated. optimizer=<none> devices=<none> input_placeholders=<none> rnn_inputs=<none> max_per_device_batch_size=<none> build_graph=<none> grad_norm_clipping=<none> # Use only `policy` argument from here on. policy:TFPolicy=<none> <block_start>"""Initializes a TFMultiGPUTowerStack instance. Args: policy (TFPolicy): The TFPolicy object that this tower stack belongs to. """<line_sep># Obsoleted usage, use only `policy` arg from here on. <if_stmt>policy<is><none><block_start>deprecation_warning(old="TFMultiGPUTowerStack(...)" new="TFMultiGPUTowerStack(policy=[Policy])" error=<false> )<line_sep>self.policy=<none><line_sep>self.optimizers=optimizer<line_sep>self.devices=devices<line_sep>self.max_per_device_batch_size=max_per_device_batch_size<line_sep>self.policy_copy=build_graph<block_end><else_stmt><block_start>self.policy:TFPolicy=policy<line_sep>self.optimizers:List[LocalOptimizer]=self.policy._optimizers<line_sep>self.devices=self.policy.devices<line_sep>self.max_per_device_batch_size=(max_per_device_batch_size<or>policy.config.get("sgd_minibatch_size" policy.config.get("train_batch_size" 999999)))<floordiv>len(self.devices)<line_sep>input_placeholders=list(self.policy._loss_input_dict_no_rnn.values())<line_sep>rnn_inputs=[]<if_stmt>self.policy._state_inputs<block_start>rnn_inputs=self.policy._state_inputs+[self.policy._seq_lens]<block_end>grad_norm_clipping=self.policy.config.get("grad_clip")<line_sep>self.policy_copy=self.policy.copy<block_end><assert_stmt>len(self.devices)<g>1<or>"gpu"<in>self.devices[0]<line_sep>self.loss_inputs=input_placeholders+rnn_inputs<line_sep>shared_ops=tf1.get_collection(tf1.GraphKeys.UPDATE_OPS scope=tf1.get_variable_scope().name)<line_sep># Then setup the per-device loss graphs that use the shared weights self._batch_index=tf1.placeholder(tf.int32 name="batch_index")<line_sep># Dynamic batch size, which may be shrunk if there isn't enough data self._per_device_batch_size=tf1.placeholder(tf.int32 name="per_device_batch_size")<line_sep>self._loaded_per_device_batch_size=max_per_device_batch_size<line_sep># When loading RNN input, we dynamically determine the max seq len self._max_seq_len=tf1.placeholder(tf.int32 name="max_seq_len")<line_sep>self._loaded_max_seq_len=1<line_sep># Split on the CPU in case the data doesn't fit in GPU memory. <with_stmt>tf.device("/cpu:0")<block_start>data_splits=zip(*[tf.split(ph len(self.devices))<for>ph self.loss_inputs])<block_end>self._towers=[]<for_stmt>tower_i,(device device_placeholders) enumerate(zip(self.devices data_splits))<block_start>self._towers.append(self._setup_device(tower_i device device_placeholders len(input_placeholders)))<block_end><if_stmt>self.policy.config["_tf_policy_handles_more_than_one_loss"]<block_start>avgs=[]<for_stmt>i,optim enumerate(self.optimizers)<block_start>avg=average_gradients([t.grads[i]<for>t self._towers])<if_stmt>grad_norm_clipping<block_start>clipped=[]<for_stmt>grad,_ avg<block_start>clipped.append(grad)<block_end>clipped,_=tf.clip_by_global_norm(clipped grad_norm_clipping)<for_stmt>i,(grad var) enumerate(avg)<block_start>avg[i]=(clipped[i] var)<block_end><block_end>avgs.append(avg)<block_end># Gather update ops for any batch norm layers. # TODO(ekl) here we # will use all the ops found which won't work for DQN / DDPG, but # those aren't supported with multi-gpu right now anyways. self._update_ops=tf1.get_collection(tf1.GraphKeys.UPDATE_OPS scope=tf1.get_variable_scope().name)<for_stmt>op shared_ops<block_start>self._update_ops.remove(op)# only care about tower update ops <block_end><if_stmt>self._update_ops<block_start>logger.debug("Update ops to run on apply gradient: {}".format(self._update_ops))<block_end><with_stmt>tf1.control_dependencies(self._update_ops)<block_start>self._train_op=tf.group([o.apply_gradients(a)<for>o,a zip(self.optimizers avgs)])<block_end><block_end><else_stmt><block_start>avg=average_gradients([t.grads<for>t self._towers])<if_stmt>grad_norm_clipping<block_start>clipped=[]<for_stmt>grad,_ avg<block_start>clipped.append(grad)<block_end>clipped,_=tf.clip_by_global_norm(clipped grad_norm_clipping)<for_stmt>i,(grad var) enumerate(avg)<block_start>avg[i]=(clipped[i] var)<block_end><block_end># Gather update ops for any batch norm layers. # TODO(ekl) here we # will use all the ops found which won't work for DQN / DDPG, but # those aren't supported with multi-gpu right now anyways. self._update_ops=tf1.get_collection(tf1.GraphKeys.UPDATE_OPS scope=tf1.get_variable_scope().name)<for_stmt>op shared_ops<block_start>self._update_ops.remove(op)# only care about tower update ops <block_end><if_stmt>self._update_ops<block_start>logger.debug("Update ops to run on apply gradient: {}".format(self._update_ops))<block_end><with_stmt>tf1.control_dependencies(self._update_ops)<block_start>self._train_op=self.optimizers[0].apply_gradients(avg)<block_end><block_end><block_end><def_stmt>load_data self sess inputs state_inputs<block_start>"""Bulk loads the specified inputs into device memory. The shape of the inputs must conform to the shapes of the input placeholders this optimizer was constructed with. The data is split equally across all the devices. If the data is not evenly divisible by the batch size, excess data will be discarded. Args: sess: TensorFlow session. inputs: List of arrays matching the input placeholders, of shape [BATCH_SIZE, ...]. state_inputs: List of RNN input arrays. These arrays have size [BATCH_SIZE / MAX_SEQ_LEN, ...]. Returns: The number of tuples loaded per device. """<if_stmt>log_once("load_data")<block_start>logger.info("Training on concatenated sample batches:\n\n{}\n".format(summarize({"placeholders":self.loss_inputs "inputs":inputs "state_inputs":state_inputs})))<block_end>feed_dict={}<assert_stmt>len(self.loss_inputs)<eq>len(inputs+state_inputs) (self.loss_inputs inputs state_inputs)<line_sep># Let's suppose we have the following input data, and 2 devices: # 1 2 3 4 5 6 7 <- state inputs shape # A A A B B B C C C D D D E E E F F F G G G <- inputs shape # The data is truncated and split across devices as follows: # |---| seq len = 3 # |---------------------------------| seq batch size = 6 seqs # |----------------| per device batch size = 9 tuples <if_stmt>len(state_inputs)<g>0<block_start>smallest_array=state_inputs[0]<line_sep>seq_len=len(inputs[0])<floordiv>len(state_inputs[0])<line_sep>self._loaded_max_seq_len=seq_len<block_end><else_stmt><block_start>smallest_array=inputs[0]<line_sep>self._loaded_max_seq_len=1<block_end>sequences_per_minibatch=(self.max_per_device_batch_size<floordiv>self._loaded_max_seq_len<times>len(self.devices))<if_stmt>sequences_per_minibatch<l>1<block_start>logger.warning(("Target minibatch size is {}, however the rollout sequence "<concat>"length is {}, hence the minibatch size will be raised to "<concat>"{}.").format(self.max_per_device_batch_size self._loaded_max_seq_len self._loaded_max_seq_len<times>len(self.devices)))<line_sep>sequences_per_minibatch=1<block_end><if_stmt>len(smallest_array)<l>sequences_per_minibatch# Dynamically shrink the batch size if insufficient data <block_start>sequences_per_minibatch=make_divisible_by(len(smallest_array) len(self.devices))<block_end><if_stmt>log_once("data_slicing")<block_start>logger.info(("Divided {} rollout sequences, each of length {}, among "<concat>"{} devices.").format(len(smallest_array) self._loaded_max_seq_len len(self.devices)))<block_end><if_stmt>sequences_per_minibatch<l>len(self.devices)<block_start><raise>ValueError("Must load at least 1 tuple sequence per device. Try "<concat>"increasing `sgd_minibatch_size` or reducing `max_seq_len` "<concat>"to ensure that at least one sequence fits per device.")<block_end>self._loaded_per_device_batch_size=(sequences_per_minibatch<floordiv>len(self.devices)<times>self._loaded_max_seq_len)<if_stmt>len(state_inputs)<g>0# First truncate the RNN state arrays to the sequences_per_minib. <block_start>state_inputs=[make_divisible_by(arr sequences_per_minibatch)<for>arr state_inputs]<line_sep># Then truncate the data inputs to match inputs=[arr[:len(state_inputs[0])<times>seq_len]<for>arr inputs]<assert_stmt>len(state_inputs[0])<times>seq_len<eq>len(inputs[0]) (len(state_inputs[0]) sequences_per_minibatch seq_len len(inputs[0]))<for_stmt>ph,arr zip(self.loss_inputs inputs+state_inputs)<block_start>feed_dict[ph]=arr<block_end>truncated_len=len(inputs[0])<block_end><else_stmt><block_start>truncated_len=0<for_stmt>ph,arr zip(self.loss_inputs inputs)<block_start>truncated_arr=make_divisible_by(arr sequences_per_minibatch)<line_sep>feed_dict[ph]=truncated_arr<if_stmt>truncated_len<eq>0<block_start>truncated_len=len(truncated_arr)<block_end><block_end><block_end>sess.run([t.init_op<for>t self._towers] feed_dict=feed_dict)<line_sep>self.num_tuples_loaded=truncated_len<line_sep>samples_per_device=truncated_len<floordiv>len(self.devices)<assert_stmt>samples_per_device<g>0 "No data loaded?"<assert_stmt>samples_per_device%self._loaded_per_device_batch_size<eq>0<line_sep># Return loaded samples per-device. <return>samples_per_device<block_end><def_stmt>optimize self sess batch_index<block_start>"""Run a single step of SGD. Runs a SGD step over a slice of the preloaded batch with size given by self._loaded_per_device_batch_size and offset given by the batch_index argument. Updates shared model weights based on the averaged per-device gradients. Args: sess: TensorFlow session. batch_index: Offset into the preloaded data. This value must be between `0` and `tuples_per_device`. The amount of data to process is at most `max_per_device_batch_size`. Returns: The outputs of extra_ops evaluated over the batch. """<line_sep>feed_dict={self._batch_index:batch_index self._per_device_batch_size:self._loaded_per_device_batch_size self._max_seq_len:self._loaded_max_seq_len }<for_stmt>tower self._towers<block_start>feed_dict.update(tower.loss_graph.extra_compute_grad_feed_dict())<block_end>fetches={"train":self._train_op}<for_stmt>tower_num,tower enumerate(self._towers)<block_start>tower_fetch=tower.loss_graph._get_grad_and_stats_fetches()<line_sep>fetches["tower_{}".format(tower_num)]=tower_fetch<block_end><return>sess.run(fetches feed_dict=feed_dict)<block_end><def_stmt>get_device_losses self<block_start><return>[t.loss_graph<for>t self._towers]<block_end><def_stmt>_setup_device self tower_i device device_input_placeholders num_data_in<block_start><assert_stmt>num_data_in<le>len(device_input_placeholders)<with_stmt>tf.device(device)<block_start><with_stmt>tf1.name_scope(TOWER_SCOPE_NAME+f"_{tower_i}")<block_start>device_input_batches=[]<line_sep>device_input_slices=[]<for_stmt>i,ph enumerate(device_input_placeholders)<block_start>current_batch=tf1.Variable(ph trainable=<false> validate_shape=<false> collections=[])<line_sep>device_input_batches.append(current_batch)<if_stmt>i<l>num_data_in<block_start>scale=self._max_seq_len<line_sep>granularity=self._max_seq_len<block_end><else_stmt><block_start>scale=self._max_seq_len<line_sep>granularity=1<block_end>current_slice=tf.slice(current_batch ([self._batch_index<floordiv>scale<times>granularity]+[0]<times>len(ph.shape[1:])) ([self._per_device_batch_size<floordiv>scale<times>granularity]+[-1]<times>len(ph.shape[1:])))<line_sep>current_slice.set_shape(ph.shape)<line_sep>device_input_slices.append(current_slice)<block_end>graph_obj=self.policy_copy(device_input_slices)<line_sep>device_grads=graph_obj.gradients(self.optimizers graph_obj._losses)<block_end><return>Tower(tf.group(*[batch.initializer<for>batch device_input_batches]) device_grads graph_obj)<block_end><block_end><block_end># Each tower is a copy of the loss graph pinned to a specific device. Tower=namedtuple("Tower" ["init_op" "grads" "loss_graph"])<def_stmt>make_divisible_by a n<block_start><if_stmt>type(a)<is>int<block_start><return>a-a%n<block_end><return>a[0:a.shape[0]-a.shape[0]%n]<block_end><def_stmt>average_gradients tower_grads<block_start>"""Averages gradients across towers. Calculate the average gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been averaged across all towers. TODO(ekl): We could use NCCL if this becomes a bottleneck. """<line_sep>average_grads=[]<for_stmt>grad_and_vars zip(*tower_grads)# Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) <block_start>grads=[]<for_stmt>g,_ grad_and_vars<block_start><if_stmt>g<is><not><none># Add 0 dimension to the gradients to represent the tower. <block_start>expanded_g=tf.expand_dims(g 0)<line_sep># Append on a 'tower' dimension which we will average over # below. grads.append(expanded_g)<block_end><block_end><if_stmt><not>grads<block_start><continue><block_end># Average over the 'tower' dimension. grad=tf.concat(axis=0 values=grads)<line_sep>grad=tf.reduce_mean(grad 0)<line_sep># Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v=grad_and_vars[0][1]<line_sep>grad_and_var=(grad v)<line_sep>average_grads.append(grad_and_var)<block_end><return>average_grads<block_end>
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>json<import_stmt>logging<import_stmt>StringIO<import_stmt>unittest<import_from_stmt>telemetry.internal.backends.remote trybot_browser_finder<import_from_stmt>telemetry.internal.browser browser_options<import_from_stmt>telemetry.testing simple_mock<import_from_stmt>telemetry.testing system_stub<class_stmt>TrybotBrowserFinderTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.log_output=StringIO.StringIO()<line_sep>self.stream_handler=logging.StreamHandler(self.log_output)<line_sep>logging.getLogger().addHandler(self.stream_handler)<line_sep>self._real_subprocess=trybot_browser_finder.subprocess<line_sep>self._real_urllib2=trybot_browser_finder.urllib2<line_sep>self._stubs=system_stub.Override(trybot_browser_finder ['sys' 'open' 'os'])<block_end><def_stmt>tearDown self<block_start>logging.getLogger().removeHandler(self.stream_handler)<line_sep>self.log_output.close()<line_sep>trybot_browser_finder.subprocess=self._real_subprocess<line_sep>trybot_browser_finder.urllib2=self._real_urllib2<line_sep>self._stubs.Restore()<block_end><def_stmt>_ExpectProcesses self args<block_start>mock_subprocess=simple_mock.MockObject()<line_sep>mock_subprocess.SetAttribute('PIPE' simple_mock.MockObject())<for_stmt>arg args<block_start>mock_popen=simple_mock.MockObject()<line_sep>mock_popen.ExpectCall('communicate').WillReturn(arg[1][1:])<line_sep>mock_popen.ExpectCall('poll').WillReturn(arg[1][0])<line_sep>mock_subprocess.ExpectCall('Popen').WithArgs(arg[0]).WillReturn(mock_popen)<block_end>trybot_browser_finder.subprocess=mock_subprocess<block_end><def_stmt>_MockTryserverJson self bots_dict<block_start>trybot_browser_finder.urllib2=simple_mock.MockObject()<line_sep>trybot_browser_finder.urllib2.ExpectCall('urlopen').WithArgs('http://build.chromium.org/p/tryserver.chromium.perf/json').WillReturn(StringIO.StringIO(json.dumps({'builders':bots_dict})))<block_end><def_stmt>test_find_all_browser_types_list self<block_start>finder_options=browser_options.BrowserFinderOptions(browser_type='list')<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff' 'mac_10_9_perf_bisect':'otherstuff' 'win_perf_bisect_builder':'not a trybot' })<line_sep>expected_trybots_list=['trybot-all' 'trybot-all-android' 'trybot-all-linux' 'trybot-all-mac' 'trybot-all-win' 'trybot-android-nexus4' 'trybot-mac-10-9']<line_sep>self.assertEquals(expected_trybots_list sorted(trybot_browser_finder.FindAllBrowserTypes(finder_options)))<block_end><def_stmt>test_find_all_browser_types_trybot self<block_start>finder_options=browser_options.BrowserFinderOptions(browser_type='trybot-win')<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff' 'mac_10_9_perf_bisect':'otherstuff' 'win_perf_bisect_builder':'not a trybot' })<line_sep>expected_trybots_list=['trybot-all' 'trybot-all-android' 'trybot-all-linux' 'trybot-all-mac' 'trybot-all-win' 'trybot-android-nexus4' 'trybot-mac-10-9']<line_sep>self.assertEquals(expected_trybots_list sorted(trybot_browser_finder.FindAllBrowserTypes(finder_options)))<block_end><def_stmt>test_find_all_browser_types_non_trybot_browser self<block_start>finder_options=browser_options.BrowserFinderOptions(browser_type='release')<line_sep>trybot_browser_finder.urllib2=simple_mock.MockObject()<line_sep>self.assertEquals([] # pylint: disable=W0212 sorted(trybot_browser_finder.FindAllBrowserTypes(finder_options)))<block_end><def_stmt>test_constructor self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff' 'mac_10_9_perf_bisect':'otherstuff' 'win_perf_bisect_builder':'not a trybot' })<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-android-nexus4' finder_options)<line_sep>self.assertEquals('android' browser.target_os)<line_sep># pylint: disable=W0212 self.assertTrue('android'<in>browser._builder_names)<line_sep>self.assertEquals(['android_nexus4_perf_bisect'] browser._builder_names.get('android'))<block_end><def_stmt>test_constructor_trybot_all self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff' 'android_nexus5_perf_bisect':'stuff2' 'mac_10_9_perf_bisect':'otherstuff' 'mac_perf_bisect':'otherstuff1' 'win_perf_bisect':'otherstuff2' 'linux_perf_bisect':'otherstuff3' 'win_x64_perf_bisect':'otherstuff4' 'win_perf_bisect_builder':'not a trybot' })<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-all' finder_options)<line_sep>self.assertEquals('all' browser.target_os)<line_sep># pylint: disable=W0212 self.assertEquals(['android' 'linux' 'mac' 'win' 'win-x64'] sorted(browser._builder_names))<line_sep>self.assertEquals(['android_nexus4_perf_bisect' 'android_nexus5_perf_bisect'] sorted(browser._builder_names.get('android')))<line_sep>self.assertEquals(['mac_10_9_perf_bisect' 'mac_perf_bisect'] sorted(browser._builder_names.get('mac')))<line_sep>self.assertEquals(['linux_perf_bisect'] sorted(browser._builder_names.get('linux')))<line_sep>self.assertEquals(['win_perf_bisect'] sorted(browser._builder_names.get('win')))<line_sep>self.assertEquals(['win_x64_perf_bisect'] sorted(browser._builder_names.get('win-x64')))<block_end><def_stmt>test_constructor_trybot_all_win self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff' 'android_nexus5_perf_bisect':'stuff2' 'win_8_perf_bisect':'otherstuff' 'win_perf_bisect':'otherstuff2' 'linux_perf_bisect':'otherstuff3' 'win_x64_perf_bisect':'otherstuff4' 'win_perf_bisect_builder':'not a trybot' })<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-all-win' finder_options)<line_sep>self.assertEquals('all' browser.target_os)<line_sep># pylint: disable=W0212 self.assertEquals(['win' 'win-x64'] sorted(browser._builder_names))<line_sep>self.assertEquals(['win_8_perf_bisect' 'win_perf_bisect'] sorted(browser._builder_names.get('win')))<line_sep>self.assertEquals(['win_x64_perf_bisect'] sorted(browser._builder_names.get('win-x64')))<block_end><def_stmt>test_constructor_trybot_all_android self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff' 'android_nexus5_perf_bisect':'stuff2' 'win_8_perf_bisect':'otherstuff' 'win_perf_bisect':'otherstuff2' 'linux_perf_bisect':'otherstuff3' 'win_x64_perf_bisect':'otherstuff4' 'win_perf_bisect_builder':'not a trybot' })<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-all-android' finder_options)<line_sep>self.assertEquals(['android_nexus4_perf_bisect' 'android_nexus5_perf_bisect'] sorted(browser._builder_names.get('android')))<block_end><def_stmt>test_constructor_trybot_all_mac self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff' 'win_8_perf_bisect':'otherstuff' 'mac_perf_bisect':'otherstuff2' 'win_perf_bisect_builder':'not a trybot' })<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-all-mac' finder_options)<line_sep>self.assertEquals('all' browser.target_os)<line_sep># pylint: disable=W0212 self.assertEquals(['mac'] sorted(browser._builder_names))<line_sep>self.assertEquals(['mac_perf_bisect'] sorted(browser._builder_names.get('mac')))<block_end><def_stmt>test_constructor_trybot_all_linux self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff' 'linux_perf_bisect':'stuff1' 'win_8_perf_bisect':'otherstuff' 'mac_perf_bisect':'otherstuff2' 'win_perf_bisect_builder':'not a trybot' })<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-all-linux' finder_options)<line_sep>self.assertEquals('all' browser.target_os)<line_sep># pylint: disable=W0212 self.assertEquals(['linux'] sorted(browser._builder_names))<line_sep>self.assertEquals(['linux_perf_bisect'] sorted(browser._builder_names.get('linux')))<block_end><def_stmt>test_no_git self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff'})<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-android-nexus4' finder_options)<line_sep>self._ExpectProcesses(((['git' 'rev-parse' '--abbrev-ref' 'HEAD'] (128 <none> <none>)) ))<line_sep>browser.RunRemote()<line_sep>self.assertEquals('Must be in a git repository to send changes to trybots.\n' self.log_output.getvalue())<block_end><def_stmt>test_dirty_tree self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff'})<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-android-nexus4' finder_options)<line_sep>self._ExpectProcesses(((['git' 'rev-parse' '--abbrev-ref' 'HEAD'] (0 'br' <none>)) (['git' 'update-index' '--refresh' '-q'] (0 <none> <none> )) (['git' 'diff-index' 'HEAD'] (0 'dirty tree' <none>)) ))<line_sep>browser.RunRemote()<line_sep>self.assertEquals('Cannot send a try job with a dirty tree. Commit locally first.\n' self.log_output.getvalue())<block_end><def_stmt>test_no_local_commits self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff'})<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-android-nexus4' finder_options)<line_sep>self._ExpectProcesses(((['git' 'rev-parse' '--abbrev-ref' 'HEAD'] (0 'br' <none>)) (['git' 'update-index' '--refresh' '-q'] (0 <none> <none> )) (['git' 'diff-index' 'HEAD'] (0 '' <none>)) (['git' 'log' 'origin/master..HEAD'] (0 '' <none>)) (['git' 'rev-parse' '--abbrev-ref' 'HEAD'] (0 'br' <none>)) (['git' 'update-index' '--refresh' '-q'] (0 <none> <none> )) (['git' 'diff-index' 'HEAD'] (0 '' <none>)) (['git' 'log' 'origin/master..HEAD'] (0 '' <none>)) ))<line_sep>browser.RunRemote()<line_sep>self.assertEquals(('No local changes found in chromium or blink trees. '<concat>'browser=trybot-android-nexus4 argument sends local changes to the '<concat>'perf trybot(s): '<concat>'[[\'android_nexus4_perf_bisect\']].\n') self.log_output.getvalue())<block_end><def_stmt>test_branch_checkout_fails self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff'})<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-android-nexus4' finder_options)<line_sep>self._ExpectProcesses(((['git' 'rev-parse' '--abbrev-ref' 'HEAD'] (0 'br' <none>)) (['git' 'update-index' '--refresh' '-q'] (0 <none> <none> )) (['git' 'diff-index' 'HEAD'] (0 '' <none>)) (['git' 'log' 'origin/master..HEAD'] (0 'logs here' <none>)) (['git' 'checkout' '-b' 'telemetry-tryjob'] (1 <none> 'fatal: A branch named \'telemetry-try\' already exists.')) ))<line_sep>browser.RunRemote()<line_sep>self.assertEquals(('Error creating branch telemetry-tryjob. '<concat>'Please delete it if it exists.\n'<concat>'fatal: A branch named \'telemetry-try\' already exists.\n') self.log_output.getvalue())<block_end><def_stmt>_GetConfigForBrowser self name platform branch cfg_filename is_blink=<false><block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>bot='%s_perf_bisect'%name.replace('trybot-' '').replace('-' '_')<line_sep>self._MockTryserverJson({bot:'stuff'})<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser(name finder_options)<line_sep>first_processes=()<if_stmt>is_blink<block_start>first_processes=((['git' 'rev-parse' '--abbrev-ref' 'HEAD'] (0 'br' <none>)) (['git' 'update-index' '--refresh' '-q'] (0 <none> <none> )) (['git' 'diff-index' 'HEAD'] (0 '' <none>)) (['git' 'log' 'origin/master..HEAD'] (0 '' <none>)))<block_end>self._ExpectProcesses(first_processes+((['git' 'rev-parse' '--abbrev-ref' 'HEAD'] (0 branch <none>)) (['git' 'update-index' '--refresh' '-q'] (0 <none> <none> )) (['git' 'diff-index' 'HEAD'] (0 '' <none>)) (['git' 'log' 'origin/master..HEAD'] (0 'logs here' <none>)) (['git' 'checkout' '-b' 'telemetry-tryjob'] (0 <none> <none>)) (['git' 'branch' '--set-upstream-to' 'origin/master'] (0 <none> <none>)) (['git' 'commit' '-a' '-m' 'bisect config: %s'%platform] (0 <none> <none>)) (['git' 'cl' 'upload' '-f' '--bypass-hooks' '-m' 'CL for perf tryjob on %s'%platform] (0 'stuff https://codereview.chromium.org/12345 stuff' <none>)) (['git' 'cl' 'try' '-m' 'tryserver.chromium.perf' '-b' bot] (0 <none> <none>)) (['git' 'checkout' branch] (0 <none> <none>)) (['git' 'branch' '-D' 'telemetry-tryjob'] (0 <none> <none>))))<line_sep>self._stubs.sys.argv=['tools/perf/run_benchmark' '--browser=%s'%browser 'sunspider']<line_sep>cfg=StringIO.StringIO()<line_sep>self._stubs.open.files={cfg_filename:cfg}<line_sep>browser.RunRemote()<line_sep><return>cfg.getvalue()<block_end><def_stmt>test_config_android self<block_start>config=self._GetConfigForBrowser('trybot-android-nexus4' 'android' 'somebranch' 'tools/run-perf-test.cfg')<line_sep>self.assertEquals(('config = {\n'<concat>' "command": "./tools/perf/run_benchmark '<concat>'--browser=android-chromium sunspider",\n'<concat>' "max_time_minutes": "120",\n'<concat>' "repeat_count": "1",\n'<concat>' "target_arch": "ia32",\n'<concat>' "truncate_percent": "0"\n'<concat>'}') config)<block_end><def_stmt>test_config_mac self<block_start>config=self._GetConfigForBrowser('trybot-mac-10-9' 'mac' 'currentwork' 'tools/run-perf-test.cfg')<line_sep>self.assertEquals(('config = {\n'<concat>' "command": "./tools/perf/run_benchmark '<concat>'--browser=release sunspider",\n'<concat>' "max_time_minutes": "120",\n'<concat>' "repeat_count": "1",\n'<concat>' "target_arch": "ia32",\n'<concat>' "truncate_percent": "0"\n'<concat>'}') config)<block_end><def_stmt>test_config_win_x64 self<block_start>config=self._GetConfigForBrowser('trybot-win-x64' 'win-x64' 'currentwork' 'tools/run-perf-test.cfg')<line_sep>self.assertEquals(('config = {\n'<concat>' "command": "python tools\\\\perf\\\\run_benchmark '<concat>'--browser=release_x64 sunspider",\n'<concat>' "max_time_minutes": "120",\n'<concat>' "repeat_count": "1",\n'<concat>' "target_arch": "x64",\n'<concat>' "truncate_percent": "0"\n'<concat>'}') config)<block_end><def_stmt>test_config_blink self<block_start>config=self._GetConfigForBrowser('trybot-mac-10-9' 'mac' 'blinkbranch' 'Tools/run-perf-test.cfg' <true>)<line_sep>self.assertEquals(('config = {\n'<concat>' "command": "./tools/perf/run_benchmark '<concat>'--browser=release sunspider",\n'<concat>' "max_time_minutes": "120",\n'<concat>' "repeat_count": "1",\n'<concat>' "target_arch": "ia32",\n'<concat>' "truncate_percent": "0"\n'<concat>'}') config)<block_end><def_stmt>test_update_config_git_commit_tryboterror self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff'})<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-android-nexus4' finder_options)<line_sep>self._ExpectProcesses(((['git' 'commit' '-a' '-m' 'bisect config: android'] (128 'None' 'commit failed')) (['git' 'cl' 'upload' '-f' '--bypass-hooks' '-m' 'CL for perf tryjob on android'] (0 'stuff https://codereview.chromium.org/12345 stuff' <none>)) (['git' 'cl' 'try' '-m' 'tryserver.chromium.perf' '-b' 'android_nexus4_perf_bisect'] (0 <none> <none>))))<line_sep>self._stubs.sys.argv=['tools/perf/run_benchmark' '--browser=%s'%browser 'sunspider']<line_sep>cfg_filename='tools/run-perf-test.cfg'<line_sep>cfg=StringIO.StringIO()<line_sep>self._stubs.open.files={cfg_filename:cfg}<line_sep>self.assertRaises(trybot_browser_finder.TrybotError browser._UpdateConfigAndRunTryjob 'android' cfg_filename)<block_end><def_stmt>test_update_config_git_upload_tryboterror self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff'})<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-android-nexus4' finder_options)<line_sep>self._ExpectProcesses(((['git' 'commit' '-a' '-m' 'bisect config: android'] (0 'None' <none>)) (['git' 'cl' 'upload' '-f' '--bypass-hooks' '-m' 'CL for perf tryjob on android'] (128 <none> 'error')) (['git' 'cl' 'try' '-m' 'tryserver.chromium.perf' '-b' 'android_nexus4_perf_bisect'] (0 <none> <none>))))<line_sep>self._stubs.sys.argv=['tools/perf/run_benchmark' '--browser=%s'%browser 'sunspider']<line_sep>cfg_filename='tools/run-perf-test.cfg'<line_sep>cfg=StringIO.StringIO()<line_sep>self._stubs.open.files={cfg_filename:cfg}<line_sep>self.assertRaises(trybot_browser_finder.TrybotError browser._UpdateConfigAndRunTryjob 'android' cfg_filename)<block_end><def_stmt>test_update_config_git_try_tryboterror self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff'})<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-android-nexus4' finder_options)<line_sep>self._ExpectProcesses(((['git' 'commit' '-a' '-m' 'bisect config: android'] (0 'None' <none>)) (['git' 'cl' 'upload' '-f' '--bypass-hooks' '-m' 'CL for perf tryjob on android'] (0 'stuff https://codereview.chromium.org/12345 stuff' <none>)) (['git' 'cl' 'try' '-m' 'tryserver.chromium.perf' '-b' 'android_nexus4_perf_bisect'] (128 <none> <none>))))<line_sep>self._stubs.sys.argv=['tools/perf/run_benchmark' '--browser=%s'%browser 'sunspider']<line_sep>cfg_filename='tools/run-perf-test.cfg'<line_sep>cfg=StringIO.StringIO()<line_sep>self._stubs.open.files={cfg_filename:cfg}<line_sep>self.assertRaises(trybot_browser_finder.TrybotError browser._UpdateConfigAndRunTryjob 'android' cfg_filename)<block_end><def_stmt>test_update_config_git_try self<block_start>finder_options=browser_options.BrowserFinderOptions()<line_sep>self._MockTryserverJson({'android_nexus4_perf_bisect':'stuff'})<line_sep>browser=trybot_browser_finder.PossibleTrybotBrowser('trybot-android-nexus4' finder_options)<line_sep>self._ExpectProcesses(((['git' 'commit' '-a' '-m' 'bisect config: android'] (0 'None' <none>)) (['git' 'cl' 'upload' '-f' '--bypass-hooks' '-m' 'CL for perf tryjob on android'] (0 'stuff https://codereview.chromium.org/12345 stuff' <none>)) (['git' 'cl' 'try' '-m' 'tryserver.chromium.perf' '-b' 'android_nexus4_perf_bisect'] (0 <none> <none>))))<line_sep>self._stubs.sys.argv=['tools/perf/run_benchmark' '--browser=%s'%browser 'sunspider']<line_sep>cfg_filename='tools/run-perf-test.cfg'<line_sep>cfg=StringIO.StringIO()<line_sep>self._stubs.open.files={cfg_filename:cfg}<line_sep>self.assertEquals((0 'https://codereview.chromium.org/12345') browser._UpdateConfigAndRunTryjob('android' cfg_filename))<block_end><block_end>
"""Test the utilities."""<import_from_future_stmt> absolute_import print_function division<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>numpy.testing assert_allclose<import_from_stmt>safe_learning.utilities dlqr get_storage set_storage get_feed_dict unique_rows compute_trajectory <import_from_stmt>safe_learning LinearSystem<def_stmt>test_dlqr <block_start>"""Test the dlqr function."""<line_sep>true_k=np.array([[0.61803399]])<line_sep>true_p=np.array([[1.61803399]])<line_sep>k,p=dlqr(1 1 1 1)<line_sep>assert_allclose(k true_k)<line_sep>assert_allclose(p true_p)<line_sep>k,p=dlqr([[1]] [[1]] [[1]] [[1]])<line_sep>assert_allclose(k true_k)<line_sep>assert_allclose(p true_p)<block_end><class_stmt>TestStorage(object)<block_start>"""Test the class storage."""<line_sep>@pytest.fixture<def_stmt>sample_class self<block_start>"""Sample class for testing."""<class_stmt>A(object)<block_start>"""Some class."""<def_stmt>__init__ self<block_start>"""Initialize."""<line_sep>super(A self).__init__()<line_sep>self.storage={}<block_end><def_stmt>method self value index=<none><block_start>storage=get_storage(self.storage index=index)<line_sep>set_storage(self.storage [('value' value)] index=index)<line_sep><return>storage<block_end><block_end><return>A()<block_end><def_stmt>test_storage self sample_class<block_start>"""Test the storage."""<line_sep>storage=sample_class.method(5)<assert_stmt>storage<is><none><line_sep>storage=sample_class.method(4)<assert_stmt>storage['value']<eq>5<line_sep>storage=sample_class.method(<none>)<assert_stmt>storage['value']<eq>4<line_sep># Test index storage=sample_class.method(3 index='test')<assert_stmt>storage<is><none><line_sep>storage=sample_class.method(4 index='test')<assert_stmt>storage['value']<eq>3<line_sep>storage=sample_class.method(3 index='test2')<assert_stmt>storage<is><none><line_sep>storage=sample_class.method(3 index='test')<assert_stmt>storage['value']<is>4<block_end><block_end><def_stmt>test_get_feed_dict <block_start>"""Test the global get_feed_dict method."""<line_sep>graph=tf.Graph()<line_sep>feed_dict=get_feed_dict(graph)<line_sep># Initialized new dictionary <assert_stmt>feed_dict<eq>{}<line_sep># Test assignment feed_dict['test']=5<line_sep># Make sure we keep getting the same object <assert_stmt>feed_dict<is>get_feed_dict(graph)<block_end><def_stmt>test_unique_rows <block_start>"""Test the unique_rows function."""<line_sep>a=np.array([[1 1] [1 2] [1 3] [1 2] [1 3] [1 4] [2 3]])<line_sep>uniques=np.array([[1 1] [1 2] [1 3] [1 4] [2 3]])<line_sep>assert_allclose(unique_rows(a) uniques)<block_end><def_stmt>test_compute_trajectory <block_start>"""Test the compute_trajectory function."""<line_sep>A=np.array([[1. 0.1] [0. 1.]])<line_sep>B=np.array([[0.01] [0.1]])<line_sep>dynamics=LinearSystem((A B))<line_sep>Q=np.diag([1. 0.01])<line_sep>R=np.array([[0.01]])<line_sep>K,_=dlqr(A B Q R)<line_sep>policy=LinearSystem([-K])<line_sep>x0=np.array([[0.1 0.]])<with_stmt>tf.Session()<as>sess<block_start>res=compute_trajectory(dynamics policy x0 num_steps=20)<block_end>states,actions=res<line_sep>assert_allclose(states[[0] :] x0)<line_sep>assert_allclose(states[-1 :] np.array([0. 0.]) atol=0.01)<line_sep>assert_allclose(actions states[:-1].dot(-K.T))<block_end>
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Weights and Biases Logger ------------------------- """<import_stmt>operator<import_stmt>os<import_from_stmt>argparse Namespace<import_from_stmt>pathlib Path<import_from_stmt>typing Any Dict List Optional Union<import_from_stmt>weakref ReferenceType<import_stmt>torch.nn<as>nn<import_from_stmt>pytorch_lightning.callbacks.model_checkpoint ModelCheckpoint<import_from_stmt>pytorch_lightning.loggers.base LightningLoggerBase rank_zero_experiment<import_from_stmt>pytorch_lightning.utilities _module_available rank_zero_only<import_from_stmt>pytorch_lightning.utilities.exceptions MisconfigurationException<import_from_stmt>pytorch_lightning.utilities.imports _compare_version<import_from_stmt>pytorch_lightning.utilities.warnings rank_zero_warn<line_sep>_WANDB_AVAILABLE=_module_available("wandb")<line_sep>_WANDB_GREATER_EQUAL_0_10_22=_compare_version("wandb" operator.ge "0.10.22")<try_stmt><block_start><import_stmt>wandb<import_from_stmt>wandb.wandb_run Run<block_end><except_stmt>ModuleNotFoundError# needed for test mocks, these tests shall be updated <block_start>wandb,Run=<none> <none><block_end><class_stmt>WandbLogger(LightningLoggerBase)<block_start>r""" Log using `Weights and Biases <https://docs.wandb.ai/integrations/lightning>`_. **Installation and set-up** Install with pip: .. code-block:: bash pip install wandb Create a `WandbLogger` instance: .. code-block:: python from pytorch_lightning.loggers import WandbLogger wandb_logger = WandbLogger(project="MNIST") Pass the logger instance to the `Trainer`: .. code-block:: python trainer = Trainer(logger=wandb_logger) A new W&B run will be created when training starts if you have not created one manually before with `wandb.init()`. **Log metrics** Log from :class:`~pytorch_lightning.core.lightning.LightningModule`: .. code-block:: python class LitModule(LightningModule): def training_step(self, batch, batch_idx): self.log("train/loss", loss) Use directly wandb module: .. code-block:: python wandb.log({"train/loss": loss}) **Log hyper-parameters** Save :class:`~pytorch_lightning.core.lightning.LightningModule` parameters: .. code-block:: python class LitModule(LightningModule): def __init__(self, *args, **kwarg): self.save_hyperparameters() Add other config parameters: .. code-block:: python # add one parameter wandb_logger.experiment.config["key"] = value # add multiple parameters wandb_logger.experiment.config.update({key1: val1, key2: val2}) # use directly wandb module wandb.config["key"] = value wandb.config.update() **Log gradients, parameters and model topology** Call the `watch` method for automatically tracking gradients: .. code-block:: python # log gradients and model topology wandb_logger.watch(model) # log gradients, parameter histogram and model topology wandb_logger.watch(model, log="all") # change log frequency of gradients and parameters (100 steps by default) wandb_logger.watch(model, log_freq=500) # do not log graph (in case of errors) wandb_logger.watch(model, log_graph=False) The `watch` method adds hooks to the model which can be removed at the end of training: .. code-block:: python wandb_logger.unwatch(model) **Log model checkpoints** Log model checkpoints at the end of training: .. code-block:: python wandb_logger = WandbLogger(log_model=True) Log model checkpoints as they get created during training: .. code-block:: python wandb_logger = WandbLogger(log_model="all") Custom checkpointing can be set up through :class:`~pytorch_lightning.callbacks.ModelCheckpoint`: .. code-block:: python # log model only if `val_accuracy` increases wandb_logger = WandbLogger(log_model="all") checkpoint_callback = ModelCheckpoint(monitor="val_accuracy", mode="max") trainer = Trainer(logger=wandb_logger, callbacks=[checkpoint_callback]) `latest` and `best` aliases are automatically set to easily retrieve a model checkpoint: .. code-block:: python # reference can be retrieved in artifacts panel # "VERSION" can be a version (ex: "v2") or an alias ("latest or "best") checkpoint_reference = "USER/PROJECT/MODEL-RUN_ID:VERSION" # download checkpoint locally (if not already cached) run = wandb.init(project="MNIST") artifact = run.use_artifact(checkpoint_reference, type="model") artifact_dir = artifact.download() # load checkpoint model = LitModule.load_from_checkpoint(Path(artifact_dir) / "model.ckpt") **Log media** Log text with: .. code-block:: python # using columns and data columns = ["input", "label", "prediction"] data = [["cheese", "english", "english"], ["fromage", "french", "spanish"]] wandb_logger.log_text(key="samples", columns=columns, data=data) # using a pandas DataFrame wandb_logger.log_text(key="samples", dataframe=my_dataframe) Log images with: .. code-block:: python # using tensors, numpy arrays or PIL images wandb_logger.log_image(key="samples", images=[img1, img2]) # adding captions wandb_logger.log_image(key="samples", images=[img1, img2], caption=["tree", "person"]) # using file path wandb_logger.log_image(key="samples", images=["img_1.jpg", "img_2.jpg"]) More arguments can be passed for logging segmentation masks and bounding boxes. Refer to `Image Overlays documentation <https://docs.wandb.ai/guides/track/log/media#image-overlays>`_. **Log Tables** `W&B Tables <https://docs.wandb.ai/guides/data-vis>`_ can be used to log, query and analyze tabular data. They support any type of media (text, image, video, audio, molecule, html, etc) and are great for storing, understanding and sharing any form of data, from datasets to model predictions. .. code-block:: python columns = ["caption", "image", "sound"] data = [["cheese", wandb.Image(img_1), wandb.Audio(snd_1)], ["wine", wandb.Image(img_2), wandb.Audio(snd_2)]] wandb_logger.log_table(key="samples", columns=columns, data=data) See Also: - `Demo in Google Colab <http://wandb.me/lightning>`__ with hyperparameter search and model logging - `W&B Documentation <https://docs.wandb.ai/integrations/lightning>`__ Args: name: Display name for the run. save_dir: Path where data is saved (wandb dir by default). offline: Run offline (data can be streamed later to wandb servers). id: Sets the version, mainly used to resume a previous run. version: Same as id. anonymous: Enables or explicitly disables anonymous logging. project: The name of the project to which this run will belong. log_model: Log checkpoints created by :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` as W&B artifacts. `latest` and `best` aliases are automatically set. * if ``log_model == 'all'``, checkpoints are logged during training. * if ``log_model == True``, checkpoints are logged at the end of training, except when :paramref:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint.save_top_k` ``== -1`` which also logs every checkpoint during training. * if ``log_model == False`` (default), no checkpoint is logged. prefix: A string to put at the beginning of metric keys. experiment: WandB experiment object. Automatically set when creating a run. \**kwargs: Arguments passed to :func:`wandb.init` like `entity`, `group`, `tags`, etc. Raises: ModuleNotFoundError: If required WandB package is not installed on the device. MisconfigurationException: If both ``log_model`` and ``offline`` is set to ``True``. """<line_sep>LOGGER_JOIN_CHAR="-"<def_stmt>__init__ self name:Optional[str]=<none> save_dir:Optional[str]=<none> offline:Optional[bool]=<false> id:Optional[str]=<none> anonymous:Optional[bool]=<none> version:Optional[str]=<none> project:Optional[str]=<none> log_model:Optional[bool]=<false> experiment=<none> prefix:Optional[str]="" **kwargs <block_start><if_stmt>wandb<is><none><block_start><raise>ModuleNotFoundError("You want to use `wandb` logger which is not installed yet,"<concat>" install it with `pip install wandb`."# pragma: no-cover )<block_end><if_stmt>offline<and>log_model<block_start><raise>MisconfigurationException(f"Providing log_model={log_model} and offline={offline} is an invalid configuration"<concat>" since model checkpoints cannot be uploaded in offline mode.\n"<concat>"Hint: Set `offline=False` to log your model.")<block_end><if_stmt>log_model<and><not>_WANDB_GREATER_EQUAL_0_10_22<block_start>rank_zero_warn(f"Providing log_model={log_model} requires wandb version >= 0.10.22"<concat>" for logging associated model metadata.\n"<concat>"Hint: Upgrade with `pip install --upgrade wandb`.")<block_end>super().__init__()<line_sep>self._offline=offline<line_sep>self._log_model=log_model<line_sep>self._prefix=prefix<line_sep>self._experiment=experiment<line_sep>self._logged_model_time={}<line_sep>self._checkpoint_callback=<none><line_sep># set wandb init arguments anonymous_lut={<true>:"allow" <false>:<none>}<line_sep>self._wandb_init=dict(name=name project=project id=version<or>id dir=save_dir resume="allow" anonymous=anonymous_lut.get(anonymous anonymous) )<line_sep>self._wandb_init.update(**kwargs)<line_sep># extract parameters self._save_dir=self._wandb_init.get("dir")<line_sep>self._name=self._wandb_init.get("name")<line_sep>self._id=self._wandb_init.get("id")<block_end><def_stmt>__getstate__ self<block_start>state=self.__dict__.copy()<line_sep># args needed to reload correct experiment state["_id"]=self._experiment.id<if>self._experiment<is><not><none><else><none><line_sep># cannot be pickled state["_experiment"]=<none><line_sep><return>state<block_end>@property@rank_zero_experiment<def_stmt>experiment self<arrow>Run<block_start>r""" Actual wandb object. To use wandb features in your :class:`~pytorch_lightning.core.lightning.LightningModule` do the following. Example:: .. code-block:: python self.logger.experiment.some_wandb_function() """<if_stmt>self._experiment<is><none><block_start><if_stmt>self._offline<block_start>os.environ["WANDB_MODE"]="dryrun"<block_end><if_stmt>wandb.run<is><none><block_start>self._experiment=wandb.init(**self._wandb_init)<block_end><else_stmt><block_start>rank_zero_warn("There is a wandb run already in progress and newly created instances of `WandbLogger` will reuse"<concat>" this run. If this is not desired, call `wandb.finish()` before instantiating `WandbLogger`.")<line_sep>self._experiment=wandb.run<block_end><block_end># define default x-axis (for latest wandb versions) <if_stmt>getattr(self._experiment "define_metric" <none>)<block_start>self._experiment.define_metric("trainer/global_step")<line_sep>self._experiment.define_metric("*" step_metric="trainer/global_step" step_sync=<true>)<block_end><return>self._experiment<block_end><def_stmt>watch self model:nn.Module log:str="gradients" log_freq:int=100 log_graph:bool=<true><block_start>self.experiment.watch(model log=log log_freq=log_freq log_graph=log_graph)<block_end>@rank_zero_only<def_stmt>log_hyperparams self params:Union[Dict[str Any] Namespace]<arrow><none><block_start>params=self._convert_params(params)<line_sep>params=self._flatten_dict(params)<line_sep>params=self._sanitize_callable_params(params)<line_sep>self.experiment.config.update(params allow_val_change=<true>)<block_end>@rank_zero_only<def_stmt>log_metrics self metrics:Dict[str float] step:Optional[int]=<none><arrow><none><block_start><assert_stmt>rank_zero_only.rank<eq>0 "experiment tried to log from global_rank != 0"<line_sep>metrics=self._add_prefix(metrics)<if_stmt>step<is><not><none><block_start>self.experiment.log({**metrics "trainer/global_step":step})<block_end><else_stmt><block_start>self.experiment.log(metrics)<block_end><block_end>@rank_zero_only<def_stmt>log_table self key:str columns:List[str]=<none> data:List[List[Any]]=<none> dataframe:Any=<none> step:Optional[int]=<none> <arrow><none><block_start>"""Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either with `columns` and `data` or with `dataframe`. """<line_sep>metrics={key:wandb.Table(columns=columns data=data dataframe=dataframe)}<line_sep>self.log_metrics(metrics step)<block_end>@rank_zero_only<def_stmt>log_text self key:str columns:List[str]=<none> data:List[List[str]]=<none> dataframe:Any=<none> step:Optional[int]=<none> <arrow><none><block_start>"""Log text as a Table. Can be defined either with `columns` and `data` or with `dataframe`. """<line_sep>self.log_table(key columns data dataframe step)<block_end>@rank_zero_only<def_stmt>log_image self key:str images:List[Any] **kwargs:str<arrow><none><block_start>"""Log images (tensors, numpy arrays, PIL Images or file paths). Optional kwargs are lists passed to each image (ex: caption, masks, boxes). """<if_stmt><not>isinstance(images list)<block_start><raise>TypeError(f'Expected a list as "images", found {type(images)}')<block_end>n=len(images)<for_stmt>k,v kwargs.items()<block_start><if_stmt>len(v)<ne>n<block_start><raise>ValueError(f"Expected {n} items but only found {len(v)} for {k}")<block_end><block_end>step=kwargs.pop("step" <none>)<line_sep>kwarg_list=[{k:kwargs[k][i]<for>k kwargs.keys()}<for>i range(n)]<line_sep>metrics={key:[wandb.Image(img **kwarg)<for>img,kwarg zip(images kwarg_list)]}<line_sep>self.log_metrics(metrics step)<block_end>@property<def_stmt>save_dir self<arrow>Optional[str]<block_start>"""Gets the save directory. Returns: The path to the save directory. """<line_sep><return>self._save_dir<block_end>@property<def_stmt>name self<arrow>Optional[str]<block_start>"""Gets the name of the experiment. Returns: The name of the experiment if the experiment exists else the name given to the constructor. """<line_sep># don't create an experiment if we don't have one <return>self._experiment.project_name()<if>self._experiment<else>self._name<block_end>@property<def_stmt>version self<arrow>Optional[str]<block_start>"""Gets the id of the experiment. Returns: The id of the experiment if the experiment exists else the id given to the constructor. """<line_sep># don't create an experiment if we don't have one <return>self._experiment.id<if>self._experiment<else>self._id<block_end><def_stmt>after_save_checkpoint self checkpoint_callback:"ReferenceType[ModelCheckpoint]"<arrow><none># log checkpoints as artifacts <block_start><if_stmt>self._log_model<eq>"all"<or>self._log_model<is><true><and>checkpoint_callback.save_top_k<eq>-1<block_start>self._scan_and_log_checkpoints(checkpoint_callback)<block_end><elif_stmt>self._log_model<is><true><block_start>self._checkpoint_callback=checkpoint_callback<block_end><block_end>@rank_zero_only<def_stmt>finalize self status:str<arrow><none># log checkpoints as artifacts <block_start><if_stmt>self._checkpoint_callback<block_start>self._scan_and_log_checkpoints(self._checkpoint_callback)<block_end><block_end><def_stmt>_scan_and_log_checkpoints self checkpoint_callback:"ReferenceType[ModelCheckpoint]"<arrow><none># get checkpoints to be saved with associated score <block_start>checkpoints={checkpoint_callback.last_model_path:checkpoint_callback.current_score checkpoint_callback.best_model_path:checkpoint_callback.best_model_score **checkpoint_callback.best_k_models }<line_sep>checkpoints=sorted((Path(p).stat().st_mtime p s)<for>p,s checkpoints.items()<if>Path(p).is_file())<line_sep>checkpoints=[c<for>c checkpoints<if>c[1]<not><in>self._logged_model_time.keys()<or>self._logged_model_time[c[1]]<l>c[0]]<line_sep># log iteratively all new checkpoints <for_stmt>t,p,s checkpoints<block_start>metadata=({"score":s "original_filename":Path(p).name "ModelCheckpoint":{k:getattr(checkpoint_callback k)<for>k ["monitor" "mode" "save_last" "save_top_k" "save_weights_only" "_every_n_train_steps" ]# ensure it does not break if `ModelCheckpoint` args change <if>hasattr(checkpoint_callback k)} }<if>_WANDB_GREATER_EQUAL_0_10_22<else><none>)<line_sep>artifact=wandb.Artifact(name=f"model-{self.experiment.id}" type="model" metadata=metadata)<line_sep>artifact.add_file(p name="model.ckpt")<line_sep>aliases=["latest" "best"]<if>p<eq>checkpoint_callback.best_model_path<else>["latest"]<line_sep>self.experiment.log_artifact(artifact aliases=aliases)<line_sep># remember logged models - timestamp needed in case filename didn't change (lastkckpt or custom name) self._logged_model_time[p]=t<block_end><block_end><block_end>
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2020 <NAME> """<import_stmt>torch<import_from_stmt>torch optim<as>optim<import_from_stmt>.adafactor Adafactor<import_from_stmt>.adahessian Adahessian<import_from_stmt>.adamp AdamP<import_from_stmt>.lookahead Lookahead<import_from_stmt>.nadam Nadam<import_from_stmt>.novograd NovoGrad<import_from_stmt>.nvnovograd NvNovoGrad<import_from_stmt>.radam RAdam<import_from_stmt>.rmsprop_tf RMSpropTF<import_from_stmt>.sgdp SGDP<try_stmt><block_start><import_from_stmt>apex.optimizers FusedNovoGrad FusedAdam FusedLAMB FusedSGD<line_sep>has_apex=<true><block_end><except_stmt>ImportError<block_start>has_apex=<false><block_end><def_stmt>add_weight_decay model weight_decay=1e-5 skip_list=()<block_start>decay=[]<line_sep>no_decay=[]<for_stmt>name,param model.named_parameters()<block_start><if_stmt><not>param.requires_grad<block_start><continue># frozen weights <block_end><if_stmt>len(param.shape)<eq>1<or>name.endswith(".bias")<or>name<in>skip_list<block_start>no_decay.append(param)<block_end><else_stmt><block_start>decay.append(param)<block_end><block_end><return>[{'params':no_decay 'weight_decay':0.} {'params':decay 'weight_decay':weight_decay}]<block_end><def_stmt>create_optimizer args model filter_bias_and_bn=<true><block_start>opt_lower=args.opt.lower()<line_sep>weight_decay=args.weight_decay<if_stmt>weight_decay<and>filter_bias_and_bn<block_start>skip={}<if_stmt>hasattr(model 'no_weight_decay')<block_start>skip=model.no_weight_decay()<block_end>parameters=add_weight_decay(model weight_decay skip)<line_sep>weight_decay=0.<block_end><else_stmt><block_start>parameters=model.parameters()<block_end><if_stmt>'fused'<in>opt_lower<block_start><assert_stmt>has_apex<and>torch.cuda.is_available() 'APEX and CUDA required for fused optimizers'<block_end>opt_args=dict(lr=args.lr weight_decay=weight_decay)<if_stmt>hasattr(args 'opt_eps')<and>args.opt_eps<is><not><none><block_start>opt_args['eps']=args.opt_eps<block_end><if_stmt>hasattr(args 'opt_betas')<and>args.opt_betas<is><not><none><block_start>opt_args['betas']=args.opt_betas<block_end><if_stmt>hasattr(args 'opt_args')<and>args.opt_args<is><not><none><block_start>opt_args.update(args.opt_args)<block_end>opt_split=opt_lower.split('_')<line_sep>opt_lower=opt_split[-1]<if_stmt>opt_lower<eq>'sgd'<or>opt_lower<eq>'nesterov'<block_start>opt_args.pop('eps' <none>)<line_sep>optimizer=optim.SGD(parameters momentum=args.momentum nesterov=<true> **opt_args)<block_end><elif_stmt>opt_lower<eq>'momentum'<block_start>opt_args.pop('eps' <none>)<line_sep>optimizer=optim.SGD(parameters momentum=args.momentum nesterov=<false> **opt_args)<block_end><elif_stmt>opt_lower<eq>'adam'<block_start>optimizer=optim.Adam(parameters **opt_args)<block_end><elif_stmt>opt_lower<eq>'adamw'<block_start>optimizer=optim.AdamW(parameters **opt_args)<block_end><elif_stmt>opt_lower<eq>'nadam'<block_start>optimizer=Nadam(parameters **opt_args)<block_end><elif_stmt>opt_lower<eq>'radam'<block_start>optimizer=RAdam(parameters **opt_args)<block_end><elif_stmt>opt_lower<eq>'adamp'<block_start>optimizer=AdamP(parameters wd_ratio=0.01 nesterov=<true> **opt_args)<block_end><elif_stmt>opt_lower<eq>'sgdp'<block_start>optimizer=SGDP(parameters momentum=args.momentum nesterov=<true> **opt_args)<block_end><elif_stmt>opt_lower<eq>'adadelta'<block_start>optimizer=optim.Adadelta(parameters **opt_args)<block_end><elif_stmt>opt_lower<eq>'adafactor'<block_start><if_stmt><not>args.lr<block_start>opt_args['lr']=<none><block_end>optimizer=Adafactor(parameters **opt_args)<block_end><elif_stmt>opt_lower<eq>'adahessian'<block_start>optimizer=Adahessian(parameters **opt_args)<block_end><elif_stmt>opt_lower<eq>'rmsprop'<block_start>optimizer=optim.RMSprop(parameters alpha=0.9 momentum=args.momentum **opt_args)<block_end><elif_stmt>opt_lower<eq>'rmsproptf'<block_start>optimizer=RMSpropTF(parameters alpha=0.9 momentum=args.momentum **opt_args)<block_end><elif_stmt>opt_lower<eq>'novograd'<block_start>optimizer=NovoGrad(parameters **opt_args)<block_end><elif_stmt>opt_lower<eq>'nvnovograd'<block_start>optimizer=NvNovoGrad(parameters **opt_args)<block_end><elif_stmt>opt_lower<eq>'fusedsgd'<block_start>opt_args.pop('eps' <none>)<line_sep>optimizer=FusedSGD(parameters momentum=args.momentum nesterov=<true> **opt_args)<block_end><elif_stmt>opt_lower<eq>'fusedmomentum'<block_start>opt_args.pop('eps' <none>)<line_sep>optimizer=FusedSGD(parameters momentum=args.momentum nesterov=<false> **opt_args)<block_end><elif_stmt>opt_lower<eq>'fusedadam'<block_start>optimizer=FusedAdam(parameters adam_w_mode=<false> **opt_args)<block_end><elif_stmt>opt_lower<eq>'fusedadamw'<block_start>optimizer=FusedAdam(parameters adam_w_mode=<true> **opt_args)<block_end><elif_stmt>opt_lower<eq>'fusedlamb'<block_start>optimizer=FusedLAMB(parameters **opt_args)<block_end><elif_stmt>opt_lower<eq>'fusednovograd'<block_start>opt_args.setdefault('betas' (0.95 0.98))<line_sep>optimizer=FusedNovoGrad(parameters **opt_args)<block_end><else_stmt><block_start><assert_stmt><false><and>"Invalid optimizer"<line_sep><raise>ValueError<block_end><if_stmt>len(opt_split)<g>1<block_start><if_stmt>opt_split[0]<eq>'lookahead'<block_start>optimizer=Lookahead(optimizer)<block_end><block_end><return>optimizer<block_end>
# Scatter plot of a gaussian distribution # with varying color and point sizes <import_from_stmt>vedo *<import_from_stmt>vedo.pyplot plot<import_stmt>numpy<as>np<line_sep>n=1000<line_sep>x=np.random.randn(n)<line_sep>y=np.random.randn(n)<line_sep># define what size must have each marker: marker_sizes=np.sin(2<times>x)/8<line_sep># define a (r,g,b) list of colors for each marker: marker_cols=np.c_[np.cos(2<times>x) np.zeros(n) np.zeros(n)]<line_sep>txt0=Text2D("A scatter plot of a\n2D gaussian distribution")<line_sep>plt0=plot(x y ma=0.3 lw=0 # ma = marker alpha marker="*" # marker style xtitle="variable A" ytitle="variable B" )<line_sep>txt1=Text2D("marker size proportional to sin(2x) ")<line_sep>plt1=plot(x y ma=0.3 lw=0 marker="*" # marker style ms=marker_sizes # VARIABLE marker sizes mc='red' # same fixed color for markers )<line_sep>txt2=Text2D("marker size proportional to sin(2x)\nred level proportional to cos(2x)")<line_sep>plt2=plot(x y ma=0.3 lw=0 marker=">" # marker style ms=marker_sizes # VARIABLE marker sizes mc=marker_cols # VARIABLE marker colors )<line_sep>show(plt0 txt0 at=0 N=3 size=(1800 500))<line_sep>show(plt1 txt1 at=1)<line_sep>show(plt2 txt2 at=2 interactive=<true>).close()<line_sep>
"""Support for Telegram bot using polling."""<import_stmt>logging<import_from_stmt>telegram Update<import_from_stmt>telegram.error NetworkError RetryAfter TelegramError TimedOut<import_from_stmt>telegram.ext CallbackContext TypeHandler Updater<import_from_stmt>homeassistant.const EVENT_HOMEASSISTANT_START EVENT_HOMEASSISTANT_STOP<import_from_stmt>. BaseTelegramBotEntity<line_sep>_LOGGER=logging.getLogger(__name__)<async_keyword><def_stmt>async_setup_platform hass bot config<block_start>"""Set up the Telegram polling platform."""<line_sep>pollbot=PollBot(hass bot config)<line_sep>hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START pollbot.start_polling)<line_sep>hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP pollbot.stop_polling)<line_sep><return><true><block_end><def_stmt>process_error update:Update context:CallbackContext<block_start>"""Telegram bot error handler."""<try_stmt><block_start><raise>context.error<block_end><except_stmt>(TimedOut NetworkError RetryAfter)# Long polling timeout or connection problem. Nothing serious. <block_start><pass><block_end><except_stmt>TelegramError<block_start>_LOGGER.error('Update "%s" caused error: "%s"' update context.error)<block_end><block_end><class_stmt>PollBot(BaseTelegramBotEntity)<block_start>""" Controls the Updater object that holds the bot and a dispatcher. The dispatcher is set up by the super class to pass telegram updates to `self.handle_update` """<def_stmt>__init__ self hass bot config<block_start>"""Create Updater and Dispatcher before calling super()."""<line_sep>self.bot=bot<line_sep>self.updater=Updater(bot=bot workers=4)<line_sep>self.dispatcher=self.updater.dispatcher<line_sep>self.dispatcher.add_handler(TypeHandler(Update self.handle_update))<line_sep>self.dispatcher.add_error_handler(process_error)<line_sep>super().__init__(hass config)<block_end><def_stmt>start_polling self event=<none><block_start>"""Start the polling task."""<line_sep>_LOGGER.debug("Starting polling")<line_sep>self.updater.start_polling()<block_end><def_stmt>stop_polling self event=<none><block_start>"""Stop the polling task."""<line_sep>_LOGGER.debug("Stopping polling")<line_sep>self.updater.stop()<block_end><block_end>
''' Created on Nov 11, 2010 @author: Mark V Systems Limited (c) Copyright 2010 Mark V Systems Limited, All rights reserved. '''<import_stmt>os<import_from_stmt>arelle XmlUtil<import_from_stmt>arelle.ModelObject ModelObject<line_sep>newRssWatchOptions={"feedSource":"" "feedSourceUri":<none> "matchTextExpr":"" "formulaFileUri":"" "logFileUri":"" "emailAddress":"" "validateXbrlRules":<false> "validateDisclosureSystemRules":<false> "validateCalcLinkbase":<false> "validateFormulaAssertions":<false> "alertMatchedFactText":<false> "alertAssertionUnsuccessful":<false> "alertValiditionError":<false> "latestPubDate":<none> }<line_sep># Note: if adding to this list keep DialogRssWatch in sync <class_stmt>ModelRssItem(ModelObject)<block_start><def_stmt>init self modelDocument<block_start>super(ModelRssItem self).init(modelDocument)<try_stmt><block_start><if_stmt>(self.modelXbrl.modelManager.rssWatchOptions.latestPubDate<and>self.pubDate<le>self.modelXbrl.modelManager.rssWatchOptions.latestPubDate)<block_start>self.status=_("tested")<block_end><else_stmt><block_start>self.status=_("not tested")<block_end><block_end><except_stmt>AttributeError<block_start>self.status=_("not tested")<block_end>self.results=<none><line_sep>self.assertions=<none><line_sep># find edgar namespace self.edgr=<none><for_stmt>elt self.iterdescendants("{*}xbrlFiling")<block_start>self.edgr=elt.qname.namespaceURI<line_sep><break><block_end><if_stmt>self.edgr<block_start>edgrPrefix="{"+self.edgr+"}"<block_end><else_stmt><block_start>edgrPrefix=""<block_end>self.edgrDescription=edgrPrefix+"description"<line_sep>self.edgrFile=edgrPrefix+"file"<line_sep>self.edgrInlineXBRL=edgrPrefix+"inlineXBRL"<line_sep>self.edgrSequence=edgrPrefix+"sequence"<line_sep>self.edgrType=edgrPrefix+"type"<line_sep>self.edgrUrl=edgrPrefix+"url"<block_end>@property<def_stmt>cikNumber self<block_start><return>XmlUtil.text(XmlUtil.descendant(self self.edgr "cikNumber"))<block_end>@property<def_stmt>accessionNumber self<block_start><return>XmlUtil.text(XmlUtil.descendant(self self.edgr "accessionNumber"))<block_end>@property<def_stmt>fileNumber self<block_start><return>XmlUtil.text(XmlUtil.descendant(self self.edgr "fileNumber"))<block_end>@property<def_stmt>companyName self<block_start><return>XmlUtil.text(XmlUtil.descendant(self self.edgr "companyName"))<block_end>@property<def_stmt>formType self<block_start><return>XmlUtil.text(XmlUtil.descendant(self self.edgr "formType"))<block_end>@property<def_stmt>pubDate self<block_start><try_stmt><block_start><return>self._pubDate<block_end><except_stmt>AttributeError<block_start><import_from_stmt>arelle.UrlUtil parseRfcDatetime<line_sep>self._pubDate=parseRfcDatetime(XmlUtil.text(XmlUtil.descendant(self <none> "pubDate")))<line_sep><return>self._pubDate<block_end><block_end>@property<def_stmt>filingDate self<block_start><try_stmt><block_start><return>self._filingDate<block_end><except_stmt>AttributeError<block_start><import_stmt>datetime<line_sep>self._filingDate=<none><line_sep>date=XmlUtil.text(XmlUtil.descendant(self self.edgr "filingDate"))<line_sep>d=date.split("/")<if_stmt>d<and>len(d)<eq>3<block_start>self._filingDate=datetime.date(_INT(d[2]) _INT(d[0]) _INT(d[1]))<block_end><return>self._filingDate<block_end><block_end>@property<def_stmt>period self<block_start>per=XmlUtil.text(XmlUtil.descendant(self self.edgr "period"))<if_stmt>per<and>len(per)<eq>8<block_start><return>"{0}-{1}-{2}".format(per[0:4] per[4:6] per[6:8])<block_end><return><none><block_end>@property<def_stmt>assignedSic self<block_start><return>XmlUtil.text(XmlUtil.descendant(self self.edgr "assignedSic"))<block_end>@property<def_stmt>acceptanceDatetime self<block_start><try_stmt><block_start><return>self._acceptanceDatetime<block_end><except_stmt>AttributeError<block_start><import_stmt>datetime<line_sep>self._acceptanceDatetime=<none><line_sep>date=XmlUtil.text(XmlUtil.descendant(self self.edgr "acceptanceDatetime"))<if_stmt>date<and>len(date)<eq>14<block_start>self._acceptanceDatetime=datetime.datetime(_INT(date[0:4]) _INT(date[4:6]) _INT(date[6:8]) _INT(date[8:10]) _INT(date[10:12]) _INT(date[12:14]))<block_end><return>self._acceptanceDatetime<block_end><block_end>@property<def_stmt>fiscalYearEnd self<block_start>yrEnd=XmlUtil.text(XmlUtil.descendant(self self.edgr "fiscalYearEnd"))<if_stmt>yrEnd<and>len(yrEnd)<eq>4<block_start><return>"{0}-{1}".format(yrEnd[0:2] yrEnd[2:4])<block_end><return><none><block_end>@property<def_stmt>htmlUrl self# main filing document <block_start>htmlDocElt=XmlUtil.descendant(self self.edgr "xbrlFile" attrName=self.edgrSequence attrValue="1")<if_stmt>htmlDocElt<is><not><none><block_start><return>htmlDocElt.get(self.edgrUrl)<block_end><return><none><block_end>@property<def_stmt>url self<block_start><try_stmt><block_start><return>self._url<block_end><except_stmt>AttributeError<block_start>self._url=<none><for_stmt>instDocElt XmlUtil.descendants(self self.edgr "xbrlFile")<block_start><if_stmt>instDocElt.get(self.edgrType).endswith(".INS")<or>instDocElt.get(self.edgrInlineXBRL)<eq>"true"<block_start>self._url=instDocElt.get(self.edgrUrl)<line_sep><break><block_end><block_end><return>self._url<block_end><block_end>@property<def_stmt>enclosureUrl self<block_start><return>XmlUtil.childAttr(self <none> "enclosure" "url")<block_end>@property<def_stmt>zippedUrl self<block_start>enclosure=XmlUtil.childAttr(self <none> "enclosure" "url")<if_stmt>enclosure# modify url to use zip file <block_start>_path,sep,file=(self.url<or>"").rpartition("/")<line_sep># return path + sep + self.accessionNumber + "-xbrl.zip" + sep + file <return>enclosure+sep+file<block_end><else_stmt># no zipped enclosure, just use unzipped file <block_start><return>self.url<block_end><block_end>@property<def_stmt>htmURLs self<block_start><try_stmt><block_start><return>self._htmURLs<block_end><except_stmt>AttributeError<block_start>self._htmURLs=[(instDocElt.get(self.edgrDescription) instDocElt.get(self.edgrUrl))<for>instDocElt XmlUtil.descendants(self self.edgr "xbrlFile")<if>instDocElt.get(self.edgrFile).endswith(".htm")]<line_sep><return>self._htmURLs<block_end><block_end>@property<def_stmt>primaryDocumentURL self<block_start><try_stmt><block_start><return>self._primaryDocumentURL<block_end><except_stmt>AttributeError<block_start>formType=self.formType<line_sep>self._primaryDocumentURL=<none><for_stmt>instDocElt XmlUtil.descendants(self self.edgr "xbrlFile")<block_start><if_stmt>instDocElt.get(self.edgrType)<eq>formType<block_start>self._primaryDocumentURL=instDocElt.get(self.edgrUrl)<line_sep><break><block_end><block_end><return>self._primaryDocumentURL<block_end><block_end><def_stmt>setResults self modelXbrl<block_start>self.results=[]<line_sep>self.assertionUnsuccessful=<false><line_sep># put error codes first, sorted, then assertion result (dict's) self.status="pass"<for_stmt>error modelXbrl.errors<block_start><if_stmt>isinstance(error dict)# assertion results <block_start>self.assertions=error<for_stmt>countSuccessful,countNotsuccessful error.items()<block_start><if_stmt>countNotsuccessful<g>0<block_start>self.assertionUnsuccessful=<true><line_sep>self.status="unsuccessful"<block_end><block_end><block_end><else_stmt># error code results <block_start>self.results.append(error)<line_sep>self.status="fail"# error code <block_end><block_end>self.results.sort()<block_end>@property<def_stmt>propertyView self<block_start><return>(("CIK" self.cikNumber) ("company" self.companyName) ("published" self.pubDate) ("form type" self.formType) ("filing date" self.filingDate) ("period" self.period) ("year end" self.fiscalYearEnd) ("status" self.status) ("instance" os.path.basename(self.url)) )<block_end><def_stmt>__repr__ self<block_start><return>("rssItem[{0}]{1})".format(self.objectId() self.propertyView))<block_end><block_end>
# # Copyright (c) 2019, Neptune Labs Sp. z o.o. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>re<class_stmt>GPUCardIndicesProvider(object)<block_start><def_stmt>__init__ self cuda_visible_devices gpu_card_count<block_start>self.__cuda_visible_devices=cuda_visible_devices<line_sep>self.__gpu_card_count=gpu_card_count<line_sep>self.__cuda_visible_devices_regex=r"^-?\d+(,-?\d+)*$"<block_end><def_stmt>get self<block_start><if_stmt>self.__is_cuda_visible_devices_correct()<block_start><return>self.__gpu_card_indices_from_cuda_visible_devices()<block_end><else_stmt><block_start><return>list(range(self.__gpu_card_count))<block_end><block_end><def_stmt>__is_cuda_visible_devices_correct self<block_start><return>self.__cuda_visible_devices<is><not><none><and>re.match(self.__cuda_visible_devices_regex self.__cuda_visible_devices)<block_end><def_stmt>__gpu_card_indices_from_cuda_visible_devices self<block_start>correct_indices=[]<line_sep># According to CUDA Toolkit specification. # https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars <for_stmt>gpu_index_str self.__cuda_visible_devices.split(",")<block_start>gpu_index=int(gpu_index_str)<if_stmt>0<le>gpu_index<l>self.__gpu_card_count<block_start>correct_indices.append(gpu_index)<block_end><else_stmt><block_start><break><block_end><block_end><return>list(set(correct_indices))<block_end><block_end>
<import_stmt>json<import_stmt>tempfile<import_from_stmt>contextlib contextmanager<import_from_stmt>functools partial<import_from_stmt>typing List<import_from_stmt>unittest mock<import_stmt>pytest<import_from_stmt>detect_secrets.core baseline<import_from_stmt>detect_secrets.core.secrets_collection SecretsCollection<import_from_stmt>detect_secrets.pre_commit_hook main<import_from_stmt>detect_secrets.settings transient_settings<import_from_stmt>testing.mocks disable_gibberish_filter<line_sep>@pytest.fixture(autouse=<true>)<def_stmt>configure_settings <block_start><with_stmt>transient_settings({'plugins_used':[{'name':'Base64HighEntropyString' 'limit':4.5}] })<block_start><yield><block_end><block_end><def_stmt>test_file_with_secrets <block_start>assert_commit_blocked(['test_data/files/file_with_secrets.py'])<block_end><def_stmt>test_file_with_no_secrets <block_start>assert_commit_succeeds(['test_data/files/file_with_no_secrets.py'])<block_end><def_stmt>test_quit_early_if_bad_baseline <block_start><with_stmt>pytest.raises(SystemExit)<block_start>main(['test_data/files/file_with_secrets.py' '--baseline' 'does-not-exist'])<block_end><block_end><def_stmt>test_quit_if_baseline_is_changed_but_not_staged <block_start><with_stmt>mock.patch('detect_secrets.pre_commit_hook.raise_exception_if_baseline_file_is_unstaged' )<as>m<block_start>m.side_effect=ValueError<line_sep>assert_commit_blocked(['test_data/files/file_with_no_secrets.py' '--baseline' '.secrets.baseline' ])<block_end><block_end><def_stmt>test_baseline_filters_out_known_secrets <block_start>secrets=SecretsCollection()<line_sep>secrets.scan_file('test_data/each_secret.py')<assert_stmt>secrets<with_stmt>disable_gibberish_filter()<block_start><with_stmt>tempfile.NamedTemporaryFile()<as>f<block_start>baseline.save_to_file(secrets f.name)<line_sep>f.seek(0)<line_sep># This succeeds, because all the secrets are known. assert_commit_succeeds(['test_data/each_secret.py' '--baseline' f.name ])<block_end># Remove one arbitrary secret, so that it won't be the full set. secrets.data['test_data/each_secret.py'].pop()<with_stmt>tempfile.NamedTemporaryFile()<as>f<block_start>baseline.save_to_file(secrets f.name)<line_sep>f.seek(0)<line_sep># Test that it isn't the case that a baseline is provided, and everything passes. assert_commit_blocked(['test_data/each_secret.py' '--baseline' f.name ])<block_end><block_end><block_end><class_stmt>TestModifiesBaselineFromVersionChange<block_start>FILENAME='test_data/files/file_with_secrets.py'<def_stmt>test_success self<block_start><with_stmt>self.get_baseline_file()<as>f<block_start>assert_commit_blocked_with_diff_exit_code([# We use file_with_no_secrets so that we can be certain that the commit is blocked # due to the version change only. 'test_data/files/file_with_no_secrets.py' '--baseline' f.name ])<block_end><block_end><def_stmt>test_maintains_labelled_data self<block_start><def_stmt>label_secret secrets<block_start>list(secrets[self.FILENAME])[0].is_secret=<true><line_sep><return>baseline.format_for_output(secrets)<block_end><with_stmt>self.get_baseline_file(formatter=label_secret)<as>f<block_start>assert_commit_blocked_with_diff_exit_code(['test_data/files/file_with_no_secrets.py' '--baseline' f.name ])<line_sep>f.seek(0)<line_sep>data=json.loads(f.read())<assert_stmt>data['results'][self.FILENAME][0]['is_secret']<block_end><block_end><def_stmt>test_maintains_slim_mode self<block_start><with_stmt>self.get_baseline_file(formatter=partial(baseline.format_for_output is_slim_mode=<true>) )<as>f<block_start>assert_commit_blocked_with_diff_exit_code(['test_data/files/file_with_no_secrets.py' '--baseline' f.name ])<line_sep>f.seek(0)<assert_stmt>b'line_number'<not><in>f.read()<block_end><block_end>@contextmanager<def_stmt>get_baseline_file self formatter=baseline.format_for_output<block_start>secrets=SecretsCollection()<line_sep>secrets.scan_file(self.FILENAME)<with_stmt>tempfile.NamedTemporaryFile()<as>f<block_start><with_stmt>mock.patch('detect_secrets.core.baseline.VERSION' '0.0.1')<block_start>data=formatter(secrets)<block_end># Simulating old version data['plugins_used'][0]['base64_limit']=data['plugins_used'][0].pop('limit')<line_sep>baseline.save_to_file(data f.name)<line_sep><yield>f<block_end><block_end><block_end><class_stmt>TestLineNumberChanges<block_start>FILENAME='test_data/files/file_with_secrets.py'<def_stmt>test_modifies_baseline self modified_baseline<block_start><with_stmt>tempfile.NamedTemporaryFile()<as>f<block_start>baseline.save_to_file(modified_baseline f.name)<line_sep>assert_commit_blocked_with_diff_exit_code([self.FILENAME '--baseline' f.name ])<block_end><block_end><def_stmt>test_does_not_modify_slim_baseline self modified_baseline<block_start><with_stmt>tempfile.NamedTemporaryFile()<as>f<block_start>baseline.save_to_file(baseline.format_for_output(modified_baseline is_slim_mode=<true>) f.name )<line_sep>assert_commit_succeeds([self.FILENAME '--baseline' f.name ])<block_end><block_end>@pytest.fixture<def_stmt>modified_baseline self<block_start>secrets=SecretsCollection()<line_sep>secrets.scan_file(self.FILENAME)<for_stmt>_,secret secrets<block_start>secret.line_number<augadd>1<block_end><yield>secrets<block_end><block_end><def_stmt>assert_commit_succeeds command:List[str]<block_start><assert_stmt>main(command)<eq>0<block_end><def_stmt>assert_commit_blocked command:List[str]<block_start><assert_stmt>main(command)<eq>1<block_end><def_stmt>assert_commit_blocked_with_diff_exit_code command:List[str]<block_start><assert_stmt>main(command)<eq>3<block_end>
<import_from_stmt>numpy arange searchsorted array eye ones<import_from_stmt>numpy.linalg norm# type: ignore <import_from_stmt>pyNastran.bdf.field_writer_8 print_card_8<import_from_stmt>pyNastran.bdf.bdf_interface.assign_type integer integer_or_blank<import_from_stmt>pyNastran.dev.bdf_vectorized.cards.elements.solid.chexa8 quad_area_centroid volume8<import_from_stmt>pyNastran.dev.bdf_vectorized.cards.elements.solid.solid_element SolidElement<class_stmt>CHEXA20(SolidElement)<block_start>type='CHEXA20'<line_sep>nnodes=20<def_stmt>__init__ self model<block_start>""" Defines the CHEXA object. Parameters ---------- model : BDF the BDF object """<line_sep>SolidElement.__init__(self model)<block_end><def_stmt>add_card self card comment=''<block_start>i=self.i<line_sep>eid=integer(card 1 'element_id')<if_stmt>comment<block_start>self.set_comment(eid comment)<block_end>#: Element ID self.element_id[i]=eid<line_sep>#: Property ID self.property_id[i]=integer(card 2 'property_id')<line_sep>#: Node IDs nids=array([integer(card 3 'node_id_1') integer(card 4 'node_id_2') integer(card 5 'node_id_3') integer(card 6 'node_id_4') integer(card 7 'node_id_5') integer(card 8 'node_id_6') integer(card 9 'node_id_7') integer(card 10 'node_id_8') integer_or_blank(card 11 'node_id_9' 0) integer_or_blank(card 12 'node_id_10' 0) integer_or_blank(card 13 'node_id_11' 0) integer_or_blank(card 14 'node_id_12' 0) integer_or_blank(card 15 'node_id_13' 0) integer_or_blank(card 16 'node_id_14' 0) integer_or_blank(card 17 'node_id_15' 0) integer_or_blank(card 18 'node_id_16' 0) integer_or_blank(card 19 'node_id_17' 0) integer_or_blank(card 20 'node_id_18' 0) integer_or_blank(card 21 'node_id_19' 0) integer_or_blank(card 22 'node_id_20' 0)] dtype='int32')<line_sep>self.node_ids[i :]=nids<assert_stmt>len(card)<le>23 'len(CHEXA20 card) = %i\ncard=%s'%(len(card) card)<line_sep>self.i<augadd>1<block_end><def_stmt>build self<block_start><if_stmt>self.n<block_start>i=self.element_id.argsort()<line_sep>self.element_id=self.element_id[i]<line_sep>self.property_id=self.property_id[i]<line_sep>self.node_ids=self.node_ids[i :]<line_sep>self._cards=[]<block_end><else_stmt><block_start>self.element_id=array([] dtype='int32')<line_sep>self.property_id=array([] dtype='int32')<block_end><block_end><def_stmt>update self maps<block_start>""" maps = { 'node_id' : nid_map, 'property' : pid_map, } """<if_stmt>self.n<block_start>eid_map=maps['element']<line_sep>nid_map=maps['node']<line_sep>pid_map=maps['property']<for_stmt>i,(eid pid nids) enumerate(zip(self.element_id self.property_id self.node_ids))<block_start>print(self.print_card(i))<line_sep>self.element_id[i]=eid_map[eid]<line_sep>self.property_id[i]=pid_map[pid]<line_sep>self.node_ids[i 0]=nid_map[nids[0]]<line_sep>self.node_ids[i 1]=nid_map[nids[1]]<line_sep>self.node_ids[i 2]=nid_map[nids[2]]<line_sep>self.node_ids[i 3]=nid_map[nids[3]]<line_sep>self.node_ids[i 4]=nid_map[nids[4]]<line_sep>self.node_ids[i 5]=nid_map[nids[5]]<line_sep>self.node_ids[i 6]=nid_map[nids[6]]<line_sep>self.node_ids[i 7]=nid_map[nids[7]]<line_sep>self.node_ids[i 8]=nid_map[nids[8]]<line_sep>self.node_ids[i 9]=nid_map[nids[9]]<line_sep>self.node_ids[i 10]=nid_map[nids[10]]<line_sep>self.node_ids[i 11]=nid_map[nids[11]]<line_sep>self.node_ids[i 12]=nid_map[nids[12]]<line_sep>self.node_ids[i 13]=nid_map[nids[13]]<line_sep>self.node_ids[i 14]=nid_map[nids[14]]<line_sep>self.node_ids[i 15]=nid_map[nids[15]]<line_sep>self.node_ids[i 16]=nid_map[nids[16]]<line_sep>self.node_ids[i 17]=nid_map[nids[17]]<line_sep>self.node_ids[i 18]=nid_map[nids[18]]<line_sep>self.node_ids[i 19]=nid_map[nids[19]]<block_end><block_end><block_end><def_stmt>get_mass_matrix self i model positions index0s is_lumped=<true><block_start>nnodes=8<line_sep>ndof=3<times>nnodes<line_sep>pid=self.property_id[i]<line_sep>rho=self.model.elements.properties_solid.psolid.get_density_by_property_id(pid)[0]<line_sep>n0,n1,n2,n3,n4,n5,n6,n7=self.node_ids[i :]<line_sep>V=volume8(positions[self.node_ids[i 0]] positions[self.node_ids[i 1]] positions[self.node_ids[i 2]] positions[self.node_ids[i 3]] positions[self.node_ids[i 4]] positions[self.node_ids[i 5]] positions[self.node_ids[i 6]] positions[self.node_ids[i 7]] )<line_sep>mass=rho<times>V<if_stmt>is_lumped<block_start>mi=mass/4.<line_sep>nnodes=4<line_sep>M=eye(ndof dtype='float32')<block_end><else_stmt><block_start>mi=mass/20.<line_sep>M=ones((ndof ndof) dtype='float32')<for_stmt>i range(nnodes)<block_start>j=i<times>3<line_sep>M[j:j+3 j:j+3]=2.<block_end><block_end>M<augmul>mi<line_sep>dofs,nijv=self.get_dofs_nijv(index0s n0 n1 n2 n3 n4 n5 n6 n7)<line_sep><return>M dofs nijv<block_end><def_stmt>get_stiffness_matrix self i model positions index0s<block_start><return>K dofs nijv<block_end><def_stmt>get_dofs_nijv self index0s n0 n1 n2 n3 n4 n5 n6 n7<block_start>i0=index0s[n0]<line_sep>i1=index0s[n1]<line_sep>i2=index0s[n2]<line_sep>i3=index0s[n3]<line_sep>i4=index0s[n4]<line_sep>i5=index0s[n5]<line_sep>i6=index0s[n6]<line_sep>i7=index0s[n7]<line_sep>dofs=array([i0 i0+1 i0+2 i1 i1+1 i1+2 i2 i2+1 i2+2 i3 i3+1 i3+2 i4 i4+1 i4+2 i5 i5+1 i5+2 i6 i6+1 i6+2 i7 i7+1 i7+2 ] 'int32')<line_sep>nijv=[# translation (n0 1) (n0 2) (n0 3) (n1 1) (n1 2) (n1 3) (n2 1) (n2 2) (n2 3) (n3 1) (n3 2) (n3 3) (n4 1) (n4 2) (n4 3) (n5 1) (n5 2) (n5 3) (n6 1) (n6 2) (n6 3) (n7 1) (n7 2) (n7 3) ]<line_sep><return>dofs nijv<block_end><def_stmt>_verify self xref=<true><block_start>eid=self.eid<line_sep>pid=self.Pid()<line_sep>nids=self.node_ids<assert_stmt>isinstance(eid int)<assert_stmt>isinstance(pid int)<for_stmt>i,nid enumerate(nids)<block_start><assert_stmt>isinstance(nid int) 'nid%i is not an integer; nid=%s'%(i nid)<block_end><if_stmt>xref<block_start>c=self.centroid()<line_sep>v=self.volume()<assert_stmt>isinstance(v float)<for_stmt>i range(3)<block_start><assert_stmt>isinstance(c[i] float)<block_end><block_end><block_end><def_stmt>get_node_indicies self i=<none><block_start><if_stmt>i<is><none><block_start>i1=self.model.grid.get_node_index_by_node_id(self.node_ids[: 0])<line_sep>i2=self.model.grid.get_node_index_by_node_id(self.node_ids[: 1])<line_sep>i3=self.model.grid.get_node_index_by_node_id(self.node_ids[: 2])<line_sep>i4=self.model.grid.get_node_index_by_node_id(self.node_ids[: 3])<line_sep>i5=self.model.grid.get_node_index_by_node_id(self.node_ids[: 4])<line_sep>i6=self.model.grid.get_node_index_by_node_id(self.node_ids[: 5])<line_sep>i7=self.model.grid.get_node_index_by_node_id(self.node_ids[: 6])<line_sep>i8=self.model.grid.get_node_index_by_node_id(self.node_ids[: 7])<block_end><else_stmt><block_start>i1=self.model.grid.get_node_index_by_node_id(self.node_ids[i 0])<line_sep>i2=self.model.grid.get_node_index_by_node_id(self.node_ids[i 1])<line_sep>i3=self.model.grid.get_node_index_by_node_id(self.node_ids[i 2])<line_sep>i4=self.model.grid.get_node_index_by_node_id(self.node_ids[i 3])<line_sep>i5=self.model.grid.get_node_index_by_node_id(self.node_ids[i 4])<line_sep>i6=self.model.grid.get_node_index_by_node_id(self.node_ids[i 5])<line_sep>i7=self.model.grid.get_node_index_by_node_id(self.node_ids[i 6])<line_sep>i8=self.model.grid.get_node_index_by_node_id(self.node_ids[i 7])<block_end><return>i1 i2 i3 i4 i5 i6 i7 i8<block_end><def_stmt>_get_node_locations_by_index self i xyz_cid0<block_start>""" Parameters ---------- i : (nnodes, ) int ndarray; None -> all node IDs xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate the GRIDs in CORD2R=0 """<line_sep>grid=self.model.grid<line_sep>get_node_index_by_node_id=self.model.grid.get_node_index_by_node_id<line_sep>node_ids=self.node_ids<line_sep>msg=', which is required by %s'%self.type<line_sep>i1,i2,i3,i4,i5,i6,i7,i8=self.get_node_indicies(i)<line_sep>n1=xyz_cid0[i1 :]<line_sep>n2=xyz_cid0[i2 :]<line_sep>n3=xyz_cid0[i3 :]<line_sep>n4=xyz_cid0[i4 :]<line_sep>n5=xyz_cid0[i5 :]<line_sep>n6=xyz_cid0[i6 :]<line_sep>n7=xyz_cid0[i7 :]<line_sep>n8=xyz_cid0[i8 :]<line_sep><return>n1 n2 n3 n4 n5 n6 n7 n8<block_end><def_stmt>get_volume_by_element_id self element_id=<none> xyz_cid0=<none> total=<false><block_start>""" Gets the volume for one or more elements. Parameters ---------- element_id : (nelements, ) int ndarray; default=None the elements to consider xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate the GRIDs in CORD2R=0 total : bool; default=False should the volume be summed Notes ----- Volume for a CHEXA is the average area of two opposing faces times the length between the centroids of those points """<line_sep>nodes=self._get_node_locations_by_element_id(element_id xyz_cid0)<line_sep>n1,n2,n3,n4,n5,n6,n7,n8=nodes<line_sep>volume=volume8(n1 n2 n3 n4 n5 n6 n7 n8)<if_stmt>total<block_start>volume=abs(volume).sum()<block_end><else_stmt><block_start>volume=abs(volume)<block_end><return>volume<block_end><def_stmt>get_centroid_volume self element_id=<none> xyz_cid0=<none> total=<false><block_start>""" Gets the centroid and volume for one or more elements. Parameters ---------- element_id : (nelements, ) int ndarray; default=None the elements to consider xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate the GRIDs in CORD2R=0 total : bool; default=False should the volume be summed; centroid be averaged .. seealso:: CHEXA20.get_volume_by_element_id() and CHEXA20.get_centroid_by_element_id() for more information. """<line_sep>nodes=self._get_node_locations_by_element_id(element_id xyz_cid0)<line_sep>n1,n2,n3,n4,n5,n6,n7,n8=nodes<line_sep>(A1 c1)=quad_area_centroid(n1 n2 n3 n4)<line_sep>(A2 c2)=quad_area_centroid(n5 n6 n7 n8)<line_sep>centroid=(c1<times>A1+c2<times>A2)/(A1+A2)<line_sep>volume=(A1+A2)/2.<times>norm(c1-c2 axis=1)<if_stmt>total<block_start>centroid=centroid.mean()<line_sep>volume=abs(volume).sum()<block_end><else_stmt><block_start>volume=abs(volume)<block_end><assert_stmt>volume.min()<g>0.0 'volume.min() = %f'%volume.min()<line_sep><return>centroid volume<block_end><def_stmt>get_centroid_by_element_id self element_id=<none> xyz_cid0=<none> total=<false><block_start>""" Gets the centroid for one or more elements. Parameters ---------- element_id : (nelements, ) int ndarray; default=None the elements to consider xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate the GRIDs in CORD2R=0 total : bool; default=False should the centroid be averaged """<line_sep>nodes=self._get_node_locations_by_element_id(element_id xyz_cid0)<line_sep>n1,n2,n3,n4,n5,n6,n7,n8=nodes<line_sep>(A1 c1)=quad_area_centroid(n1 n2 n3 n4)<line_sep>(A2 c2)=quad_area_centroid(n5 n6 n7 n8)<line_sep>centroid=(c1<times>A1+c2<times>A2)/(A1+A2)<if_stmt>total<block_start>centroid=centroid.mean(axis=0)<block_end><return>centroid<block_end><def_stmt>get_face_nodes self nid nid_opposite<block_start><raise>NotImplementedError()<line_sep>#nids = self.node_ids[:4] #indx = nids.index(nid_opposite) #nids.pop(indx) #return nids <block_end><def_stmt>write_card self bdf_file size=8 element_id=<none><block_start><if_stmt>self.n<block_start><if_stmt>element_id<is><none><block_start>i=arange(self.n)<block_end><else_stmt><block_start>i=searchsorted(self.element_id element_id)<block_end><for_stmt>(eid pid n) zip(self.element_id[i] self.property_id[i] self.node_ids[i])<block_start><if_stmt>eid<in>self._comments<block_start>bdf_file.write(self._comments[eid])<block_end>n=[ni<if>ni<ne>0<else><none><for>ni n]<line_sep>card=['CHEXA' eid pid n[0] n[1] n[2] n[3] n[4] n[5] n[6] n[7] n[8] n[9] n[10] n[11] n[12] n[13] n[14] n[15] n[16] n[17] n[18] n[19]]<line_sep>bdf_file.write(print_card_8(card))<block_end><block_end><block_end><block_end>
<import_stmt>queue<import_stmt>struct<import_from_stmt>smartmicro.Protocols.udt.udtUatResponseV1 UATv1Response<line_sep># from smartmicro.Protocols.udt.udtUatResponseV2 import UATv2Response # from smartmicro.Protocols.udt.udtUatResponseV3 import UATv3Response <import_from_stmt>smartmicro.Protocols.udt.udtUatResponseV4 UATv4Response<import_from_stmt>smartmicro.Services.basicCanServices.canService CanIDService<class_stmt>uatResponseService(CanIDService)# ---------------------------------------------------------------------------------------------------------------- # # function: initialization # # ---------------------------------------------------------------------------------------------------------------- # <block_start><def_stmt>__init__ self<block_start>""" The function provides all necessary variables and instances to deal with the udt sub-service uat responses. """<line_sep># init super class CanIDService.__init__(self)<line_sep># provide receive queue self.recvQueue=queue.Queue()<line_sep># provide decode dictionary self.decDict=dict()<line_sep># provide udt identifier referring to uat response service self.uatRespIdList=[17000 17001 17002 17003 17004 17005 17006 17007 17008 17009 17010 17011 17012 17013 17014 17015 17016 17017 17018]<line_sep># set decode functions self.__regDecodeFunctions()<block_end># ---------------------------------------------------------------------------------------------------------------- # # function: __regDecodeFunctions # # ---------------------------------------------------------------------------------------------------------------- # <def_stmt>__regDecodeFunctions self<block_start>""" The function registers all decode functions into one dictionary. Returns ------- """<line_sep># register decode functions self.decDict["2"]=UATv1Response.decode<line_sep># self.decDict["3"] = UATv2Response.decode # self.decDict["4"] = UATv3Response.decode self.decDict["5"]=UATv4Response.decode<block_end># ---------------------------------------------------------------------------------------------------------------- # # function: getUdtIdentifier # # ---------------------------------------------------------------------------------------------------------------- # <def_stmt>getUdtIdentifier self<block_start>""" The function returns a list of used udt identifier for this response service. Returns ------- uatRespIdList : list list of used uat response identifier """<line_sep><return>self.uatRespIdList<block_end># ---------------------------------------------------------------------------------------------------------------- # # function: getMessage # # ---------------------------------------------------------------------------------------------------------------- # <def_stmt>getMessage self timeout=<none><block_start>""" The function decodes received uat responses Parameters ---------- timeout : integer timeout in [s] Returns ------- """<line_sep># wait for header header=self.__waitForHeader(timeout)<line_sep># decode uat response version msgList,respVersion=self.__uatVersionCtrl(header timeout)<line_sep># decode uat response decResp=self.decDict[str(respVersion)](msgList)<line_sep><return>decResp<block_end># ---------------------------------------------------------------------------------------------------------------- # # function: __waitForHeader # # ---------------------------------------------------------------------------------------------------------------- # <def_stmt>__waitForHeader self timeout=<none><block_start>""" The function waits for the header of the response. Parameters ---------- timeout : integer timeout in [s] Returns ------- header : bytearray header message of the response """<line_sep># init default udt index udtIndex=0<line_sep># set default header header=bytearray(8)<line_sep># run as long as header is not found <while_stmt>udtIndex<ne>17000# get header from queue <block_start>header=self.recvQueue.get(block=<true> timeout=timeout)['data']<line_sep># extract udt index udtIndex=struct.unpack('<H' header[0:2])[0]<block_end><return>header<block_end># ---------------------------------------------------------------------------------------------------------------- # # function: __waitForHeader # # ---------------------------------------------------------------------------------------------------------------- # <def_stmt>__uatVersionCtrl self header timeout=<none><block_start>""" The function decodes the corresponding uat version for further response decoding. Additional the corresponding messages for the response will be collected. Parameters ---------- header : bytearray header message used to determine next steps timeout : integer timeout in [s] Returns ------- msgList : list list of bytearrays uatRespVersion : integer current version of uat response """<line_sep># decode uat response version uatRespVersion=struct.unpack("<B" header[2:3])[0]<line_sep># UDT_UATv1Response <if_stmt>uatRespVersion<eq>2# repetition header plus 3 data packages <block_start>remaining_datapackages=4<block_end># UDT_UATv2Response # elif uatRespVersion == 3: # # repetition header plus 4 data packages # remaining_datapackages = 6 # # UDT_UATv3Response # elif uatRespVersion == 4: # # repetition header plus 7 data packages # remaining_datapackages = 8 # UDT_UATv4Response <elif_stmt>uatRespVersion<eq>5<block_start>numberOfInstructions=header[5]<line_sep># (Number of instructions * 3) data packages remaining_datapackages=numberOfInstructions<times>3<block_end><else_stmt><block_start><raise>TypeError("unsupported UDT-UAT response index received")<block_end># provide list of response messages msgList=[header]<for_stmt>nsgIdx range(0 remaining_datapackages)<block_start>msgList.append(self.recvQueue.get(block=<true> timeout=timeout)['data'])<block_end><return>msgList uatRespVersion<block_end># ---------------------------------------------------------------------------------------------------------------- # # function: clearQueue # # ---------------------------------------------------------------------------------------------------------------- # <def_stmt>clearQueue self<block_start>""" Flushes the recvQueue. Returns ------- None """<while_stmt>self.isEmpty()<is><false><block_start>self.getMessage()<block_end><block_end><block_end>
<import_stmt>torch.nn<as>nn<class_stmt>BaseSequenceGenerator(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(BaseSequenceGenerator self).__init__()<block_end><def_stmt>generate_dummy_input self lr_size<block_start>""" use for compute per-step FLOPs and speed return random tensors that can be taken as input of <forward> """<line_sep><return><none><block_end><def_stmt>forward self *args **kwargs<block_start>""" forward pass for a singe frame """<line_sep><pass><block_end><def_stmt>forward_sequence self lr_data<block_start>""" forward pass for a whole sequence (for training) """<line_sep><pass><block_end><def_stmt>infer_sequence self lr_data device<block_start>""" infer for a whole sequence (for inference) """<line_sep><pass><block_end><block_end><class_stmt>BaseSequenceDiscriminator(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(BaseSequenceDiscriminator self).__init__()<block_end><def_stmt>forward self *args **kwargs<block_start>""" forward pass for a singe frame """<line_sep><pass><block_end><def_stmt>forward_sequence self data args_dict<block_start>""" forward pass for a whole sequence (for training) """<line_sep><pass><block_end><block_end>
""" Example: Daily point data access Meteorological data provided by Meteostat (https://dev.meteostat.net) under the terms of the Creative Commons Attribution-NonCommercial 4.0 International Public License. The code is licensed under the MIT license. """<import_from_stmt>datetime datetime<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>meteostat Point Daily<line_sep># Set time period start=datetime(2018 1 1)<line_sep>end=datetime(2018 12 31)<line_sep># Create Point for Vancouver, BC vancouver=Point(49.2497 -123.1193 70)<line_sep># Get daily data for 2018 data=Daily(vancouver start end)<line_sep>data=data.fetch()<line_sep># Plot line chart including average, minimum and maximum temperature data.plot(y=['tavg' 'tmin' 'tmax'])<line_sep>plt.show()<line_sep>
<import_from_future_stmt> absolute_import<line_sep>__copyright__="Copyright 2016-2020, Netflix, Inc."<line_sep>__license__="BSD+Patent"<import_stmt>unittest<import_from_stmt>vmaf.core.feature_assembler FeatureAssembler<import_from_stmt>vmaf.core.feature_extractor VmafFeatureExtractor FeatureExtractor MomentFeatureExtractor<import_from_stmt>test.testutil set_default_576_324_videos_for_testing<class_stmt>FeatureAssemblerTest(unittest.TestCase)<block_start><def_stmt>tearDown self<block_start><if_stmt>hasattr(self 'fassembler')<block_start>self.fassembler.remove_results()<block_end><pass><block_end><def_stmt>test_get_fextractor_subclasses self<block_start>fextractor_subclasses=FeatureExtractor.get_subclasses_recursively()<line_sep>self.assertTrue(VmafFeatureExtractor<in>fextractor_subclasses)<line_sep>self.assertTrue(MomentFeatureExtractor<in>fextractor_subclasses)<block_end><def_stmt>test_feature_assembler_whole_feature self<block_start>ref_path,dis_path,asset,asset_original=set_default_576_324_videos_for_testing()<line_sep>self.fassembler=FeatureAssembler(feature_dict={'VMAF_feature':'all'} feature_option_dict=<none> assets=[asset asset_original] logger=<none> fifo_mode=<true> delete_workdir=<true> result_store=<none> optional_dict=<none> optional_dict2=<none> parallelize=<true> processes=<none> )<line_sep>self.fassembler.run()<line_sep>results=self.fassembler.results<line_sep>self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'] 0.44609306249999997 places=4)<line_sep>self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'] 4.0498253541666669 places=4)<line_sep>self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'] 0.9345149030293786 places=4)<line_sep>self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'] 23.509571520833333 places=4)<line_sep>self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'] 1.0 places=4)<line_sep>self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'] 4.0498253541666669 places=4)<line_sep>self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'] 1.0 places=4)<line_sep>self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'] 31.271439270833337 places=4)<block_end><def_stmt>test_feature_assembler_whole_feature_processes self<block_start>ref_path,dis_path,asset,asset_original=set_default_576_324_videos_for_testing()<line_sep>self.fassembler=FeatureAssembler(feature_dict={'VMAF_feature':'all'} feature_option_dict=<none> assets=[asset asset_original] logger=<none> fifo_mode=<true> delete_workdir=<true> result_store=<none> optional_dict=<none> optional_dict2=<none> parallelize=<true> processes=1 )<line_sep>self.fassembler.run()<line_sep>results=self.fassembler.results<line_sep>self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'] 0.44609306249999997 places=4)<line_sep>self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'] 4.0498253541666669 places=4)<line_sep>self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'] 0.9345149030293786 places=4)<line_sep>self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'] 23.509571520833333 places=4)<line_sep>self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'] 1.0 places=4)<line_sep>self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'] 4.0498253541666669 places=4)<line_sep>self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'] 1.0 places=4)<line_sep>self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'] 31.271439270833337 places=4)<block_end><def_stmt>test_feature_assembler_selected_atom_feature self<block_start>ref_path,dis_path,asset,asset_original=set_default_576_324_videos_for_testing()<line_sep>self.fassembler=FeatureAssembler(feature_dict={'VMAF_feature':['vif' 'motion']} feature_option_dict=<none> assets=[asset asset_original] logger=<none> fifo_mode=<true> delete_workdir=<true> result_store=<none> optional_dict=<none> optional_dict2=<none> parallelize=<true> )<line_sep>self.fassembler.run()<line_sep>results=self.fassembler.results<line_sep>self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'] 0.44609306249999997 places=4)<line_sep>self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'] 4.0498253541666669 places=4)<line_sep>self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'] 1.0 places=4)<line_sep>self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'] 4.0498253541666669 places=4)<with_stmt>self.assertRaises(KeyError)<block_start>results[0]['VMAF_feature_ansnr_scores']<block_end><with_stmt>self.assertRaises(KeyError)<block_start>results[0]['VMAF_feature_ansnr_score']<block_end><with_stmt>self.assertRaises(KeyError)<block_start>results[0]['VMAF_feature_adm_scores']<block_end><with_stmt>self.assertRaises(KeyError)<block_start>results[0]['VMAF_feature_adm_score']<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main(verbosity=2)<block_end>
<import_stmt>os<import_stmt>os.path<as>osp<import_stmt>shutil<import_stmt>tempfile<import_from_stmt>distutils.version LooseVersion<import_from_stmt>pkg_resources parse_requirements resource_filename<import_from_stmt>typing List<import_stmt>click<import_stmt>pip<import_from_stmt>mim.click get_official_package param2lowercase<import_from_stmt>mim.commands.uninstall uninstall<import_from_stmt>mim.utils DEFAULT_URL MODULE2PKG PKG2MODULE PKG2PROJECT WHEEL_URL call_command echo_success echo_warning get_installed_version get_latest_version get_package_version get_release_version get_torch_cuda_version highlighted_error is_installed is_version_equal parse_url split_package_version <line_sep>@click.command('install')@click.argument('package' type=str autocompletion=get_official_package callback=param2lowercase)@click.option('-f' '--find' 'find_url' type=str help='Url for finding package.')@click.option('--default-timeout' 'timeout' type=int default=45 help='Set the socket timeout (default 15 seconds).')@click.option('-y' '--yes' 'is_yes' is_flag=<true> help='Don’t ask for confirmation of uninstall deletions.')@click.option('--user' 'is_user_dir' is_flag=<true> help='Install to the Python user install directory')@click.option('-e' '--editable' 'is_editable' is_flag=<true> help='Install a package in editable mode.')<def_stmt>cli package:str find_url:str='' timeout:int=30 is_yes:bool=<false> is_user_dir:bool=<false> is_editable:bool=<false> <arrow><none><block_start>"""Install package. Example: \b # install latest version of mmcv-full > mim install mmcv-full # wheel # install 1.3.1 > mim install mmcv-full==1.3.1 # install master branch > mim install mmcv-full -f https://github.com/open-mmlab/mmcv.git # install latest version of mmcls > mim install mmcls # install 0.11.0 > mim install mmcls==0.11.0 # v0.11.0 # install master branch > mim install mmcls -f https://github.com/open-mmlab/mmclassification.git # install local repo > git clone https://github.com/open-mmlab/mmclassification.git > cd mmclassification > mim install . # install extension based on OpenMMLab > mim install mmcls-project -f https://github.com/xxx/mmcls-project.git """<line_sep>install(package find_url timeout is_yes=is_yes is_user_dir=is_user_dir is_editable=is_editable)<block_end><def_stmt>install package:str find_url:str='' timeout:int=15 is_yes:bool=<false> is_user_dir:bool=<false> is_editable:bool=<false><arrow><none><block_start>"""Install a package by wheel or from github. Args: package (str): The name of installed package, such as mmcls. find_url (str): Url for finding package. If finding is not provided, program will infer the find_url as much as possible. Default: ''. timeout (int): The socket timeout. Default: 15. is_yes (bool): Don’t ask for confirmation of uninstall deletions. Default: False. is_usr_dir (bool): Install to the Python user install directory for environment variables and user configuration. Default: False. is_editable (bool): Install a package in editable mode. Default: False. """<line_sep>target_pkg,target_version=split_package_version(package)<line_sep># whether install from local repo <if_stmt>looks_like_path(target_pkg)<block_start><if_stmt>is_installable_dir(target_pkg)<block_start>is_install_local_repo=<true><block_end><else_stmt><block_start><raise>ValueError(highlighted_error(f'{target_pkg} is not a installable directory'))<block_end><block_end><else_stmt><block_start>is_install_local_repo=<false><block_end># whether install master branch from github is_install_master=bool(<not>target_version<and>find_url)<line_sep># get target version <if_stmt>target_pkg<in>PKG2PROJECT<block_start>latest_version=get_latest_version(target_pkg timeout)<if_stmt>target_version<block_start><if_stmt>LooseVersion(target_version)<g>LooseVersion(latest_version)<block_start>error_msg=(f'target_version=={target_version} should not be'<concat>f' greater than latest_version=={latest_version}')<line_sep><raise>ValueError(highlighted_error(error_msg))<block_end><block_end><else_stmt><block_start>target_version=latest_version<block_end><block_end># check local environment whether package existed <if_stmt>is_install_master<or>is_install_local_repo<block_start><pass><block_end><elif_stmt>is_installed(target_pkg)<and>target_version<block_start>existed_version=get_installed_version(target_pkg)<if_stmt>is_version_equal(existed_version target_version)<block_start>echo_warning(f'{target_pkg}=={existed_version} existed.')<line_sep><return><none><block_end><else_stmt><block_start><if_stmt>is_yes<block_start>uninstall(target_pkg is_yes)<block_end><else_stmt><block_start>confirm_msg=(f'{target_pkg}=={existed_version} has been '<concat>f'installed, but want to install {target_pkg}=='<concat>f'{target_version}, do you want to uninstall '<concat>f'{target_pkg}=={existed_version} and '<concat>f'install {target_pkg}=={target_version}? ')<if_stmt>click.confirm(confirm_msg)<block_start>uninstall(target_pkg <true>)<block_end><else_stmt><block_start>echo_warning(f'skip {target_pkg}')<line_sep><return><none><block_end><block_end><block_end><block_end># try to infer find_url if possible <if_stmt><not>find_url<block_start>find_url=infer_find_url(target_pkg)<block_end><if_stmt>is_install_local_repo<block_start>repo_root=osp.abspath(target_pkg)<line_sep>module_name,target_version=get_package_version(repo_root)<if_stmt><not>module_name<block_start><raise>FileNotFoundError(highlighted_error(f'version.py is missed in {repo_root}'))<block_end>target_pkg=MODULE2PKG.get(module_name module_name)<if_stmt>target_pkg<eq>'mmcv'<and>os.getenv('MMCV_WITH_OPS' '0')<eq>'1'<block_start>target_pkg='mmcv-full'<block_end>echo_success(f'installing {target_pkg} from local repo.')<line_sep>install_from_repo(repo_root package=target_pkg timeout=timeout is_yes=is_yes is_user_dir=is_user_dir is_editable=is_editable)<block_end><elif_stmt>find_url<and>find_url.find('git')<ge>0<or>is_install_master<block_start>install_from_github(target_pkg target_version find_url timeout is_yes is_user_dir is_install_master)<block_end><else_stmt># if installing from wheel failed, it will try to install package by # building from source if possible. <block_start><try_stmt><block_start>install_from_wheel(target_pkg target_version find_url timeout is_user_dir)<block_end><except_stmt>RuntimeError<as>error<block_start><if_stmt>target_pkg<in>PKG2PROJECT<block_start>find_url=f'{DEFAULT_URL}/{PKG2PROJECT[target_pkg]}.git'<if_stmt>target_version<block_start>target_pkg=f'{target_pkg}=={target_version}'<block_end><if_stmt>is_yes<block_start>install(target_pkg find_url timeout is_yes is_user_dir)<block_end><else_stmt><block_start>confirm_msg=(f'install {target_pkg} from wheel, but it '<concat>'failed. Do you want to build it from '<concat>'source if possible?')<if_stmt>click.confirm(confirm_msg)<block_start>install(target_pkg find_url timeout is_yes is_user_dir)<block_end><else_stmt><block_start><raise>RuntimeError(highlighted_error(f'Failed to install {target_pkg}.'))<block_end><block_end><block_end><else_stmt><block_start><raise>RuntimeError(highlighted_error(error))<block_end><block_end><block_end>echo_success(f'Successfully installed {target_pkg}.')<block_end><def_stmt>looks_like_path name:str<arrow>bool<block_start>"""Checks whether the string "looks like" a path on the filesystem. This does not check whether the target actually exists, only judge from the appearance. Args: name (str): The string to be checked. """<if_stmt>osp.sep<in>name<block_start><return><true><block_end><if_stmt>osp.altsep<is><not><none><and>osp.altsep<in>name<block_start><return><true><block_end><if_stmt>name.startswith('.')<block_start><return><true><block_end><return><false><block_end><def_stmt>is_installable_dir name:str<arrow>bool<block_start>"""Check whether path is a directory containing setup.py. Args: name (str): The string to be checked. """<line_sep>path=osp.abspath(name)<if_stmt>osp.isdir(path)<block_start>setup_py=osp.join(path 'setup.py')<line_sep><return>osp.isfile(setup_py)<block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>infer_find_url package:str<arrow>str<block_start>"""Try to infer find_url if possible. If package is the official package, the find_url can be inferred. Args: package (str): The name of package, such as mmcls. """<line_sep>find_url=''<if_stmt>package<in>WHEEL_URL<block_start>torch_v,cuda_v=get_torch_cuda_version()<line_sep># In order to avoid builiding mmcv-full from source, we ignore the # difference among micro version because there are usually no big # changes among micro version. For example, the mmcv-full built in # pytorch 1.8.0 also works on 1.8.1 or other versions. major,minor,*_=torch_v.split('.')<line_sep>torch_v='.'.join([major minor '0'])<if_stmt>cuda_v.isdigit()<block_start>cuda_v=f'cu{cuda_v}'<block_end>find_url=WHEEL_URL[package].format(cuda_version=cuda_v torch_version=f'torch{torch_v}')<block_end><elif_stmt>package<in>PKG2PROJECT<block_start>find_url=(f'{DEFAULT_URL}/{PKG2PROJECT[package]}.git')<block_end><return>find_url<block_end><def_stmt>parse_dependencies path:str<arrow>list<block_start>"""Parse dependencies from repo/requirements/mminstall.txt. Args: path (str): Path of mminstall.txt. """<def_stmt>_get_proper_version package version op<block_start>releases=get_release_version(package)<if_stmt>op<eq>'>'<block_start><for_stmt>r_v releases<block_start><if_stmt>LooseVersion(r_v)<g>LooseVersion(version)<block_start><return>r_v<block_end><block_end><else_stmt><block_start><raise>ValueError(highlighted_error(f'invalid min version of {package}'))<block_end><block_end><elif_stmt>op<eq>'<'<block_start><for_stmt>r_v releases[::-1]<block_start><if_stmt>LooseVersion(r_v)<l>LooseVersion(version)<block_start><return>r_v<block_end><block_end><else_stmt><block_start><raise>ValueError(highlighted_error(f'invalid max version of {package}'))<block_end><block_end><block_end>dependencies=[]<with_stmt>open(path 'r')<as>fr<block_start><for_stmt>requirement parse_requirements(fr)<block_start>pkg_name=requirement.project_name<line_sep>min_version=''<line_sep>max_version=''<for_stmt>op,version requirement.specs<block_start><if_stmt>op<eq>'=='<block_start>min_version=max_version=version<line_sep><break><block_end><elif_stmt>op<eq>'>='<block_start>min_version=version<block_end><elif_stmt>op<eq>'>'<block_start>min_version=_get_proper_version(pkg_name version '>')<block_end><elif_stmt>op<eq>'<='<block_start>max_version=version<block_end><elif_stmt>op<eq>'<'<block_start>max_version=_get_proper_version(pkg_name version '<')<block_end><block_end>dependencies.append([pkg_name min_version max_version])<block_end><block_end><return>dependencies<block_end><def_stmt>install_dependencies dependencies:List[List[str]] timeout:int=15 is_yes:bool=<false> is_user_dir:bool=<false><arrow><none><block_start>"""Install dependencies, such as mmcls depends on mmcv. Args: dependencies (list): The list of dependency. timeout (int): The socket timeout. Default: 15. is_yes (bool): Don’t ask for confirmation of uninstall deletions. Default: False. is_usr_dir (bool): Install to the Python user install directory for environment variables and user configuration. Default: False. """<for_stmt>target_pkg,min_v,max_v dependencies<block_start>target_version=max_v<line_sep>latest_version=get_latest_version(target_pkg timeout)<if_stmt><not>target_version<or>LooseVersion(target_version)<g>LooseVersion(latest_version)<block_start>target_version=latest_version<block_end><if_stmt>is_installed(target_pkg)<block_start>existed_version=get_installed_version(target_pkg)<if_stmt>(LooseVersion(min_v)<le>LooseVersion(existed_version)<le>LooseVersion(target_version))<block_start><continue><block_end><block_end>echo_success(f'installing dependency: {target_pkg}')<line_sep>target_pkg=f'{target_pkg}=={target_version}'<line_sep>install(target_pkg timeout=timeout is_yes=is_yes is_user_dir=is_user_dir)<block_end>echo_success('Successfully installed dependencies.')<block_end><def_stmt>install_from_repo repo_root:str * package:str='' timeout:int=15 is_yes:bool=<false> is_user_dir:bool=<false> is_editable:bool=<false><block_start>"""Install package from local repo. Args: repo_root (str): The root of repo. package (str): The name of installed package. Default: ''. timeout (int): The socket timeout. Default: 15. is_yes (bool): Don’t ask for confirmation of uninstall deletions. Default: False. is_usr_dir (bool): Install to the Python user install directory for environment variables and user configuration. Default: False. is_editable (bool): Install a package in editable mode. Default: False. """<def_stmt>copy_file_to_package # rename the model_zoo.yml to model-index.yml but support both of them # for backward compatibility <block_start>filenames=['tools' 'configs' 'model_zoo.yml' 'model-index.yml']<line_sep>module_name=PKG2MODULE.get(package package)<line_sep># configs, tools and model-index.yml will be copied to package/.mim mim_root=resource_filename(module_name '.mim')<line_sep>os.makedirs(mim_root exist_ok=<true>)<for_stmt>filename filenames<block_start>src_path=osp.join(repo_root filename)<line_sep>dst_path=osp.join(mim_root filename)<if_stmt>osp.exists(src_path)<block_start><if_stmt>osp.islink(dst_path)<block_start>os.unlink(dst_path)<block_end><if_stmt>osp.isfile(src_path)<block_start>shutil.copyfile(src_path dst_path)<block_end><elif_stmt>osp.isdir(src_path)<block_start><if_stmt>osp.exists(dst_path)<block_start>shutil.rmtree(dst_path)<block_end>shutil.copytree(src_path dst_path)<block_end><block_end><block_end><block_end><def_stmt>link_file_to_package # When user installs package with editable mode, we should create # symlinks to package, which will synchronize the modified files. # Besides, rename the model_zoo.yml to model-index.yml but support both # of them for backward compatibility <block_start>filenames=['tools' 'configs' 'model_zoo.yml' 'model-index.yml']<line_sep>module_name=PKG2MODULE.get(package package)<line_sep>pkg_root=osp.join(repo_root module_name)<line_sep># configs, tools and model-index.yml will be linked to package/.mim mim_root=osp.join(pkg_root '.mim')<line_sep>os.makedirs(mim_root exist_ok=<true>)<for_stmt>filename filenames<block_start>src_path=osp.join(repo_root filename)<line_sep>dst_path=osp.join(mim_root filename)<if_stmt>osp.exists(dst_path)<block_start><continue><block_end><if_stmt>osp.exists(src_path)<block_start><if_stmt>osp.isfile(dst_path)<or>osp.islink(dst_path)<block_start>os.remove(dst_path)<block_end><elif_stmt>osp.isdir(dst_path)<block_start>shutil.rmtree(dst_path)<block_end>os.symlink(src_path dst_path)<block_end><block_end><block_end># install dependencies. For example, # install mmcls should install mmcv-full first if it is not installed or # its(mmcv) verison does not match. mminstall_path=osp.join(repo_root 'requirements' 'mminstall.txt')<if_stmt>osp.exists(mminstall_path)<block_start>dependencies=parse_dependencies(mminstall_path)<if_stmt>dependencies<block_start>install_dependencies(dependencies timeout is_yes is_user_dir)<block_end><block_end>third_dependencies=osp.join(repo_root 'requirements' 'build.txt')<if_stmt>osp.exists(third_dependencies)<block_start>dep_cmd=['python' '-m' 'pip' 'install' '-r' third_dependencies '--default-timeout' f'{timeout}']<if_stmt>is_user_dir<block_start>dep_cmd.append('--user')<block_end>call_command(dep_cmd)<block_end>install_cmd=['python' '-m' 'pip' 'install']<if_stmt>is_editable<block_start>install_cmd.append('-e')<block_end><else_stmt># solving issues related to out-of-tree builds # more datails at https://github.com/pypa/pip/issues/7555 <block_start><if_stmt>LooseVersion(pip.__version__)<ge>LooseVersion('21.1.1')<block_start>install_cmd.append('--use-feature=in-tree-build')<block_end><block_end>install_cmd.append(repo_root)<if_stmt>is_user_dir<block_start>install_cmd.append('--user')<block_end># The issue is caused by the import order of numpy and torch # Please refer to github.com/pytorch/pytorch/issue/37377 os.environ['MKL_SERVICE_FORCE_INTEL']='1'<if_stmt>package<in>WHEEL_URL<block_start>echo_success(f'compiling {package} with "MMCV_WITH_OPS=1"')<line_sep>os.environ['MMCV_WITH_OPS']='1'<block_end>call_command(install_cmd)<if_stmt>is_editable<block_start>link_file_to_package()<block_end><else_stmt><block_start>copy_file_to_package()<block_end><block_end><def_stmt>install_from_github package:str version:str='' find_url:str='' timeout:int=15 is_yes:bool=<false> is_user_dir:bool=<false> is_install_master:bool=<false><arrow><none><block_start>"""Install package from github. Args: package (str): The name of installed package, such as mmcls. version (str): Version of package. Default: ''. find_url (str): Url for finding package. If finding is not provided, program will infer the find_url as much as possible. Default: ''. timeout (int): The socket timeout. Default: 15. is_yes (bool): Don’t ask for confirmation of uninstall deletions. Default: False. is_usr_dir (bool): Install to the Python user install directory for environment variables and user configuration. Default: False. is_install_master (bool): Whether install master branch. If it is True, process will install master branch. If it is False, process will install the specified version. Default: False. """<line_sep>click.echo(f'installing {package} from {find_url}.')<line_sep>_,repo=parse_url(find_url)<line_sep>clone_cmd=['git' 'clone' find_url]<if_stmt><not>is_install_master<block_start>clone_cmd.extend(['-b' f'v{version}'])<block_end><with_stmt>tempfile.TemporaryDirectory()<as>temp_root<block_start>repo_root=osp.join(temp_root repo)<line_sep>clone_cmd.append(repo_root)<line_sep>call_command(clone_cmd)<line_sep>install_from_repo(repo_root package=package timeout=timeout is_yes=is_yes is_user_dir=is_user_dir)<block_end><block_end><def_stmt>install_from_wheel package:str version:str='' find_url:str='' timeout:int=15 is_user_dir:bool=<false><arrow><none><block_start>"""Install wheel from find_url. Args: package (str): The name of installed package, such as mmcls. version (str): Version of package. Default: ''. find_url (str): Url for finding package. If finding is not provided, program will infer the find_url as much as possible. Default: ''. timeout (int): The socket timeout. Default: 15. is_usr_dir (bool): Install to the Python user install directory for environment variables and user configuration. Default: False. """<line_sep>click.echo(f'installing {package} from wheel.')<line_sep>install_cmd=['python' '-m' 'pip' '--default-timeout' f'{timeout}' 'install']<if_stmt>version<block_start>install_cmd.append(f'{package}=={version}')<block_end><else_stmt><block_start>install_cmd.append(package)<block_end><if_stmt>find_url<block_start>install_cmd.extend(['-f' find_url])<block_end><if_stmt>is_user_dir<block_start>install_cmd.append('--user')<block_end>call_command(install_cmd)<block_end>
# nl80211 definitions # Copyright (c) 2014, <NAME> <<EMAIL>> # # This software may be distributed under the terms of the BSD license. # See README for more details. <import_stmt>binascii<import_stmt>struct<line_sep>nl80211_cmd={'GET_WIPHY':1 'SET_WIPHY':2 'NEW_WIPHY':3 'DEL_WIPHY':4 'GET_INTERFACE':5 'SET_INTERFACE':6 'NEW_INTERFACE':7 'DEL_INTERFACE':8 'GET_KEY':9 'SET_KEY':10 'NEW_KEY':11 'DEL_KEY':12 'GET_BEACON':13 'SET_BEACON':14 'START_AP':15 'STOP_AP':16 'GET_STATION':17 'SET_STATION':18 'NEW_STATION':19 'DEL_STATION':20 'GET_MPATH':21 'SET_MPATH':22 'NEW_MPATH':23 'DEL_MPATH':24 'SET_BSS':25 'SET_REG':26 'REQ_SET_REG':27 'GET_MESH_CONFIG':28 'SET_MESH_CONFIG':29 'SET_MGMT_EXTRA_IE[RESERVED]':30 'GET_REG':31 'GET_SCAN':32 'TRIGGER_SCAN':33 'NEW_SCAN_RESULTS':34 'SCAN_ABORTED':35 'REG_CHANGE':36 'AUTHENTICATE':37 'ASSOCIATE':38 'DEAUTHENTICATE':39 'DISASSOCIATE':40 'MICHAEL_MIC_FAILURE':41 'REG_BEACON_HINT':42 'JOIN_IBSS':43 'LEAVE_IBSS':44 'TESTMODE':45 'CONNECT':46 'ROAM':47 'DISCONNECT':48 'SET_WIPHY_NETNS':49 'GET_SURVEY':50 'NEW_SURVEY_RESULTS':51 'SET_PMKSA':52 'DEL_PMKSA':53 'FLUSH_PMKSA':54 'REMAIN_ON_CHANNEL':55 'CANCEL_REMAIN_ON_CHANNEL':56 'SET_TX_BITRATE_MASK':57 'REGISTER_FRAME':58 'FRAME':59 'FRAME_TX_STATUS':60 'SET_POWER_SAVE':61 'GET_POWER_SAVE':62 'SET_CQM':63 'NOTIFY_CQM':64 'SET_CHANNEL':65 'SET_WDS_PEER':66 'FRAME_WAIT_CANCEL':67 'JOIN_MESH':68 'LEAVE_MESH':69 'UNPROT_DEAUTHENTICATE':70 'UNPROT_DISASSOCIATE':71 'NEW_PEER_CANDIDATE':72 'GET_WOWLAN':73 'SET_WOWLAN':74 'START_SCHED_SCAN':75 'STOP_SCHED_SCAN':76 'SCHED_SCAN_RESULTS':77 'SCHED_SCAN_STOPPED':78 'SET_REKEY_OFFLOAD':79 'PMKSA_CANDIDATE':80 'TDLS_OPER':81 'TDLS_MGMT':82 'UNEXPECTED_FRAME':83 'PROBE_CLIENT':84 'REGISTER_BEACONS':85 'UNEXPECTED_4ADDR_FRAME':86 'SET_NOACK_MAP':87 'CH_SWITCH_NOTIFY':88 'START_P2P_DEVICE':89 'STOP_P2P_DEVICE':90 'CONN_FAILED':91 'SET_MCAST_RATE':92 'SET_MAC_ACL':93 'RADAR_DETECT':94 'GET_PROTOCOL_FEATURES':95 'UPDATE_FT_IES':96 'FT_EVENT':97 'CRIT_PROTOCOL_START':98 'CRIT_PROTOCOL_STOP':99 'GET_COALESCE':100 'SET_COALESCE':101 'CHANNEL_SWITCH':102 'VENDOR':103 'SET_QOS_MAP':104 }<line_sep>nl80211_attr={'WIPHY':1 'WIPHY_NAME':2 'IFINDEX':3 'IFNAME':4 'IFTYPE':5 'MAC':6 'KEY_DATA':7 'KEY_IDX':8 'KEY_CIPHER':9 'KEY_SEQ':10 'KEY_DEFAULT':11 'BEACON_INTERVAL':12 'DTIM_PERIOD':13 'BEACON_HEAD':14 'BEACON_TAIL':15 'STA_AID':16 'STA_FLAGS':17 'STA_LISTEN_INTERVAL':18 'STA_SUPPORTED_RATES':19 'STA_VLAN':20 'STA_INFO':21 'WIPHY_BANDS':22 'MNTR_FLAGS':23 'MESH_ID':24 'STA_PLINK_ACTION':25 'MPATH_NEXT_HOP':26 'MPATH_INFO':27 'BSS_CTS_PROT':28 'BSS_SHORT_PREAMBLE':29 'BSS_SHORT_SLOT_TIME':30 'HT_CAPABILITY':31 'SUPPORTED_IFTYPES':32 'REG_ALPHA2':33 'REG_RULES':34 'MESH_CONFIG':35 'BSS_BASIC_RATES':36 'WIPHY_TXQ_PARAMS':37 'WIPHY_FREQ':38 'WIPHY_CHANNEL_TYPE':39 'KEY_DEFAULT_MGMT':40 'MGMT_SUBTYPE':41 'IE':42 'MAX_NUM_SCAN_SSIDS':43 'SCAN_FREQUENCIES':44 'SCAN_SSIDS':45 'GENERATION':46 'BSS':47 'REG_INITIATOR':48 'REG_TYPE':49 'SUPPORTED_COMMANDS':50 'FRAME':51 'SSID':52 'AUTH_TYPE':53 'REASON_CODE':54 'KEY_TYPE':55 'MAX_SCAN_IE_LEN':56 'CIPHER_SUITES':57 'FREQ_BEFORE':58 'FREQ_AFTER':59 'FREQ_FIXED':60 'WIPHY_RETRY_SHORT':61 'WIPHY_RETRY_LONG':62 'WIPHY_FRAG_THRESHOLD':63 'WIPHY_RTS_THRESHOLD':64 'TIMED_OUT':65 'USE_MFP':66 'STA_FLAGS2':67 'CONTROL_PORT':68 'TESTDATA':69 'PRIVACY':70 'DISCONNECTED_BY_AP':71 'STATUS_CODE':72 'CIPHER_SUITES_PAIRWISE':73 'CIPHER_SUITE_GROUP':74 'WPA_VERSIONS':75 'AKM_SUITES':76 'REQ_IE':77 'RESP_IE':78 'PREV_BSSID':79 'KEY':80 'KEYS':81 'PID':82 '4ADDR':83 'SURVEY_INFO':84 'PMKID':85 'MAX_NUM_PMKIDS':86 'DURATION':87 'COOKIE':88 'WIPHY_COVERAGE_CLASS':89 'TX_RATES':90 'FRAME_MATCH':91 'ACK':92 'PS_STATE':93 'CQM':94 'LOCAL_STATE_CHANGE':95 'AP_ISOLATE':96 'WIPHY_TX_POWER_SETTING':97 'WIPHY_TX_POWER_LEVEL':98 'TX_FRAME_TYPES':99 'RX_FRAME_TYPES':100 'FRAME_TYPE':101 'CONTROL_PORT_ETHERTYPE':102 'CONTROL_PORT_NO_ENCRYPT':103 'SUPPORT_IBSS_RSN':104 'WIPHY_ANTENNA_TX':105 'WIPHY_ANTENNA_RX':106 'MCAST_RATE':107 'OFFCHANNEL_TX_OK':108 'BSS_HT_OPMODE':109 'KEY_DEFAULT_TYPES':110 'MAX_REMAIN_ON_CHANNEL_DURATION':111 'MESH_SETUP':112 'WIPHY_ANTENNA_AVAIL_TX':113 'WIPHY_ANTENNA_AVAIL_RX':114 'SUPPORT_MESH_AUTH':115 'STA_PLINK_STATE':116 'WOWLAN_TRIGGERS':117 'WOWLAN_TRIGGERS_SUPPORTED':118 'SCHED_SCAN_INTERVAL':119 'INTERFACE_COMBINATIONS':120 'SOFTWARE_IFTYPES':121 'REKEY_DATA':122 'MAX_NUM_SCHED_SCAN_SSIDS':123 'MAX_SCHED_SCAN_IE_LEN':124 'SCAN_SUPP_RATES':125 'HIDDEN_SSID':126 'IE_PROBE_RESP':127 'IE_ASSOC_RESP':128 'STA_WME':129 'SUPPORT_AP_UAPSD':130 'ROAM_SUPPORT':131 'SCHED_SCAN_MATCH':132 'MAX_MATCH_SETS':133 'PMKSA_CANDIDATE':134 'TX_NO_CCK_RATE':135 'TDLS_ACTION':136 'TDLS_DIALOG_TOKEN':137 'TDLS_OPERATION':138 'TDLS_SUPPORT':139 'TDLS_EXTERNAL_SETUP':140 'DEVICE_AP_SME':141 'DONT_WAIT_FOR_ACK':142 'FEATURE_FLAGS':143 'PROBE_RESP_OFFLOAD':144 'PROBE_RESP':145 'DFS_REGION':146 'DISABLE_HT':147 'HT_CAPABILITY_MASK':148 'NOACK_MAP':149 'INACTIVITY_TIMEOUT':150 'RX_SIGNAL_DBM':151 'BG_SCAN_PERIOD':152 'WDEV':153 'USER_REG_HINT_TYPE':154 'CONN_FAILED_REASON':155 'SAE_DATA':156 'VHT_CAPABILITY':157 'SCAN_FLAGS':158 'CHANNEL_WIDTH':159 'CENTER_FREQ1':160 'CENTER_FREQ2':161 'P2P_CTWINDOW':162 'P2P_OPPPS':163 'LOCAL_MESH_POWER_MODE':164 'ACL_POLICY':165 'MAC_ADDRS':166 'MAC_ACL_MAX':167 'RADAR_EVENT':168 'EXT_CAPA':169 'EXT_CAPA_MASK':170 'STA_CAPABILITY':171 'STA_EXT_CAPABILITY':172 'PROTOCOL_FEATURES':173 'SPLIT_WIPHY_DUMP':174 'DISABLE_VHT':175 'VHT_CAPABILITY_MASK':176 'MDID':177 'IE_RIC':178 'CRIT_PROT_ID':179 'MAX_CRIT_PROT_DURATION':180 'PEER_AID':181 'COALESCE_RULE':182 'CH_SWITCH_COUNT':183 'CH_SWITCH_BLOCK_TX':184 'CSA_IES':185 'CSA_C_OFF_BEACON':186 'CSA_C_OFF_PRESP':187 'RXMGMT_FLAGS':188 'STA_SUPPORTED_CHANNELS':189 'STA_SUPPORTED_OPER_CLASSES':190 'HANDLE_DFS':191 'SUPPORT_5_MHZ':192 'SUPPORT_10_MHZ':193 'OPMODE_NOTIF':194 'VENDOR_ID':195 'VENDOR_SUBCMD':196 'VENDOR_DATA':197 'VENDOR_EVENTS':198 'QOS_MAP':199 'MAC_HINT':200 'WIPHY_FREQ_HINT':201 'MAX_AP_ASSOC_STA':202 }<def_stmt>build_nl80211_attr id val<block_start>attr=struct.pack("@HH" 4+len(val) nl80211_attr[id])+val<if_stmt>len(attr)%4<ne>0<block_start>attr<augadd>'\0'<times>(4-(len(attr)%4))<block_end><return>attr<block_end><def_stmt>build_nl80211_attr_u32 id val<block_start><return>build_nl80211_attr(id struct.pack("@I" val))<block_end><def_stmt>build_nl80211_attr_u16 id val<block_start><return>build_nl80211_attr(id struct.pack("@H" val))<block_end><def_stmt>build_nl80211_attr_u8 id val<block_start><return>build_nl80211_attr(id struct.pack("@B" val))<block_end><def_stmt>build_nl80211_attr_flag id<block_start><return>build_nl80211_attr(id '')<block_end><def_stmt>build_nl80211_attr_mac id val<block_start>addr=struct.unpack('6B' binascii.unhexlify(val.replace(':' '')))<line_sep>aval=struct.pack('<6B' *addr)<line_sep><return>build_nl80211_attr(id aval)<block_end><def_stmt>parse_nl80211_attrs msg<block_start>attrs={}<while_stmt>len(msg)<ge>4<block_start>alen,attr=struct.unpack("@HH" msg[0:4])<if_stmt>alen<l>4<block_start><raise>Exception("Too short nl80211 attribute")<block_end>alen<augsub>4<line_sep>msg=msg[4:]<if_stmt>alen<g>len(msg)<block_start><raise>Exception("nl80211 attribute underflow")<block_end>attrs[attr]=msg[0:alen]<line_sep>msg=msg[alen:]<block_end><return>attrs<block_end>
<import_stmt>json<import_stmt>logging<import_from_stmt>urllib.parse urljoin<import_from_stmt>typing Optional<import_from_stmt>fastapi FastAPI Path Query Form HTTPException<import_from_stmt>fastapi.middleware.cors CORSMiddleware<import_from_stmt>followthemoney.types registry<import_from_stmt>starlette.responses RedirectResponse<import_from_stmt>followthemoney model<import_from_stmt>followthemoney.exc InvalidData<import_from_stmt>api.osapi.data get_freebase_entity get_freebase_property get_matchable_schemata <import_from_stmt>opensanctions.core.entity Entity<import_from_stmt>opensanctions.core.logs configure_logging<import_from_stmt>osapi settings<import_from_stmt>osapi.models EntityResponse SearchResponse<import_from_stmt>osapi.data dataset resolver<import_from_stmt>osapi.data get_loader get_index get_schemata<import_from_stmt>osapi.data get_freebase_type get_freebase_types<import_from_stmt>osapi.util match_prefix<line_sep>log=logging.getLogger(__name__)<line_sep>app=FastAPI(title="OpenSanctions Matching API" version=settings.VERSION contact=settings.CONTACT )<line_sep>app.add_middleware(CORSMiddleware allow_origins=["*"] allow_credentials=<true> allow_methods=["*"] allow_headers=["*"] )<line_sep>configure_logging(level=logging.INFO)<line_sep>@app.on_event("startup")<async_keyword><def_stmt>startup_event <block_start>loader=get_loader()<line_sep>get_index(loader)<block_end>@app.get("/")<async_keyword><def_stmt>index <block_start>"""Get system configuration information."""<line_sep>loader=get_loader()<line_sep>index=get_index(loader)<line_sep><return>{"dataset":dataset.to_dict() "model":model.to_dict() "index":{"terms":len(index.terms) "tokens":len(index.inverted)} }<block_end>@app.get("/healthz")<async_keyword><def_stmt>healthz <block_start>"""No-op basic health check."""<line_sep><return>{"status":"ok"}<block_end>@app.get("/entities/{entity_id}" response_model=EntityResponse)<async_keyword><def_stmt>get_entity entity_id:str=Path(<none> title="The ID of the entity to retrieve")<block_start>"""Retrieve a single entity by its ID."""<line_sep>loader=get_loader()<line_sep>canonical_id=resolver.get_canonical(entity_id)<if_stmt>canonical_id<ne>entity_id<block_start>url=app.url_path_for("get_entity" entity_id=canonical_id)<line_sep><return>RedirectResponse(url=url)<block_end>entity=loader.get_entity(entity_id)<if_stmt>entity<is><none><block_start><raise>HTTPException(status_code=404 detail="No such entity!")<block_end><return>entity.to_nested_dict(loader)<block_end>@app.get("/search" response_model=SearchResponse)<async_keyword><def_stmt>search q:str schema:str=Query(settings.BASE_SCHEMA title="Types of entities that can match") limit:int=Query(10 title="Number of results to return") fuzzy:bool=Query(<false> title="Enable n-gram matching of partial names") nested:bool=Query(<false> title="Include adjacent entities in response") <block_start>"""Search matching entities based on a simple piece of text, e.g. a name."""<line_sep>loader=get_loader()<line_sep>index=get_index(loader)<line_sep>query=Entity(schema)<line_sep>query.add("name" q)<line_sep>query.add("notes" q)<line_sep>results=[]<for_stmt>result,score index.match_entities(query limit=limit fuzzy=fuzzy)<block_start>result_data=<none><if_stmt>nested<block_start>result_data=result.to_nested_dict(loader)<block_end><else_stmt><block_start>result_data=result.to_dict()<block_end>result_data["score"]=score<line_sep>results.append(result_data)<block_end><return>{"results":results}<block_end>@app.get("/reconcile")<def_stmt>reconcile queries:Optional[str]=<none><block_start>"""Reconciliation API, emulates Google Refine API. This endpoint can be used to bulk match entities against the system using an end-user application like [OpenRefine](https://openrefine.org). See: [Reconciliation API docs](https://reconciliation-api.github.io/specs/latest/#structure-of-a-reconciliation-query) """<if_stmt>queries<is><not><none><block_start><return>reconcile_queries(queries)<block_end>base_url=urljoin(settings.ENDPOINT_URL "/reconcile")<line_sep><return>{"versions":["0.2"] "name":f"{dataset.title} ({app.title})" "identifierSpace":"https://opensanctions.org/reference/#schema" "schemaSpace":"https://opensanctions.org/reference/#schema" "view":{"url":("https://opensanctions.org/entities/{{id}}/")} "suggest":{"entity":{"service_url":base_url "service_path":"/suggest/entity" } "type":{"service_url":base_url "service_path":"/suggest/type" } "property":{"service_url":base_url "service_path":"/suggest/property" } } "defaultTypes":get_freebase_types() }<block_end>@app.post("/reconcile")<def_stmt>reconcile_post queries:str=Form("")<block_start>"""Reconciliation API, emulates Google Refine API."""<line_sep><return>reconcile_queries(queries)<block_end><def_stmt>reconcile_queries queries# multiple requests in one query <block_start><try_stmt><block_start>queries=json.loads(queries)<block_end><except_stmt>ValueError<block_start><raise>HTTPException(status_code=400 detail="Cannot decode query")<block_end>results={}<for_stmt>k,q queries.items()<block_start>results[k]=reconcile_query(q)<block_end># log.info("RESULTS: %r" % results) <return>results<block_end><def_stmt>reconcile_query query<block_start>"""Reconcile operation for a single query."""<line_sep># log.info("Reconcile: %r", query) limit=int(query.get("limit" 5))<line_sep>type=query.get("type" settings.BASE_SCHEMA)<line_sep>loader=get_loader()<line_sep>index=get_index(loader)<line_sep>proxy=Entity(type)<line_sep>proxy.add("name" query.get("query"))<line_sep>proxy.add("notes" query.get("query"))<for_stmt>p query.get("properties" [])<block_start>prop=model.get_qname(p.get("pid"))<if_stmt>prop<is><none><block_start><continue><block_end><try_stmt><block_start>proxy.add_cast(prop.schema prop.name p.get("v") fuzzy=<true>)<block_end><except_stmt>InvalidData<block_start>log.exception("Invalid property is set.")<block_end><block_end>results=[]<line_sep># log.info("QUERY %r %s", proxy.to_dict(), limit) <for_stmt>result,score index.match_entities(proxy limit=limit fuzzy=<true>)<block_start>results.append(get_freebase_entity(result score))<block_end><return>{"result":results}<block_end>@app.get("/reconcile/suggest/entity")<def_stmt>reconcile_suggest_entity prefix:str="" limit:int=10<block_start>"""Suggest an entity API, emulates Google Refine API. This is functionally very similar to the basic search API, but returns data in the structure assumed by the [Reconciliation API](https://reconciliation-api.github.io/specs/latest/#suggest-services). Searches are conducted based on name and text content, using all matchable entities in the system index."""<line_sep>loader=get_loader()<line_sep>index=get_index(loader)<line_sep>query=Entity(settings.BASE_SCHEMA)<line_sep>query.add("name" prefix)<line_sep>query.add("notes" prefix)<line_sep>results=[]<for_stmt>result,score index.match_entities(query limit=limit fuzzy=<true>)<block_start>results.append(get_freebase_entity(result score))<block_end><return>{"code":"/api/status/ok" "status":"200 OK" "prefix":prefix "result":results }<block_end>@app.get("/reconcile/suggest/property")<def_stmt>reconcile_suggest_property prefix:str=""<block_start>"""Given a search prefix, return all the type/schema properties which match the given text. This is used to auto-complete property selection for detail filters in OpenRefine."""<line_sep>matches=[]<for_stmt>prop model.properties<block_start><if_stmt><not>prop.schema.is_a(settings.BASE_SCHEMA)<block_start><continue><block_end><if_stmt>prop.hidden<or>prop.type<eq>prop.type<eq>registry.entity<block_start><continue><block_end><if_stmt>match_prefix(prefix prop.name prop.label)<block_start>matches.append(get_freebase_property(prop))<block_end><block_end><return>{"code":"/api/status/ok" "status":"200 OK" "prefix":prefix "result":matches }<block_end>@app.get("/reconcile/suggest/type")<def_stmt>suggest_type prefix:str=""<block_start>"""Given a search prefix, return all the types (i.e. schema) which match the given text. This is used to auto-complete type selection for the configuration of reconciliation in OpenRefine."""<line_sep>matches=[]<for_stmt>schema get_matchable_schemata()<block_start><if_stmt>match_prefix(prefix schema.name schema.label)<block_start>matches.append(get_freebase_type(schema))<block_end><block_end><return>{"code":"/api/status/ok" "status":"200 OK" "prefix":prefix "result":matches }<block_end>
<import_from_stmt>twisted.internet.defer inlineCallbacks returnValue<import_from_stmt>vumi.utils http_request_full<import_from_stmt>vumi.message TransportUserMessage<import_from_stmt>vumi.transports.mtech_ussd MtechUssdTransport<import_from_stmt>vumi.transports.mtech_ussd.mtech_ussd MtechUssdResponse<import_from_stmt>vumi.transports.tests.helpers TransportHelper<import_from_stmt>vumi.tests.helpers VumiTestCase<class_stmt>TestMtechUssdTransport(VumiTestCase)<block_start>@inlineCallbacks<def_stmt>setUp self<block_start>self.config={'transport_type':'ussd' 'ussd_string_prefix':'*120*666#' 'web_path':"/foo" 'web_host':"127.0.0.1" 'web_port':0 'username':'testuser' 'password':'<PASSWORD>' }<line_sep>self.tx_helper=self.add_helper(TransportHelper(MtechUssdTransport))<line_sep>self.transport=<yield>self.tx_helper.get_transport(self.config)<line_sep>self.transport_url=self.transport.get_transport_url().rstrip('/')<line_sep>self.url="%s%s"%(self.transport_url self.config['web_path'])<line_sep><yield>self.transport.session_manager.redis._purge_all()<block_end># just in case <def_stmt>make_ussd_request_full self session_id **kwargs<block_start>lines=['<?xml version="1.0" encoding="UTF-8"?>' '<page version="2.0">' ' <session_id>%s</session_id>'%(session_id ) ]<for_stmt>k,v kwargs.items()<block_start>lines.append(' <%s>%s</%s>'%(k v k))<block_end>lines.append('</page>')<line_sep>data='\n'.join(lines)<line_sep><return>http_request_full(self.url data method='POST')<block_end><def_stmt>make_ussd_request self session_id **kwargs<block_start><return>self.make_ussd_request_full(session_id **kwargs).addCallback(<lambda>r:r.delivered_body)<block_end>@inlineCallbacks<def_stmt>reply_to_message self content **kw<block_start>[msg]=<yield>self.tx_helper.wait_for_dispatched_inbound(1)<line_sep><yield>self.tx_helper.make_dispatch_reply(msg content **kw)<line_sep>returnValue(msg)<block_end>@inlineCallbacks<def_stmt>test_empty_request self<block_start>response=<yield>http_request_full(self.url "" method='POST')<line_sep>self.assertEqual(response.code 400)<block_end>@inlineCallbacks<def_stmt>test_bad_request self<block_start>response=<yield>http_request_full(self.url "blah" method='POST')<line_sep>self.assertEqual(response.code 400)<block_end>@inlineCallbacks<def_stmt>test_inbound_new_continue self<block_start>sid='a41739890287485d968ea66e8b44bfd3'<line_sep>response_d=self.make_ussd_request(sid mobile_number='2348085832481' page_id='0' data='testmenu' gate='gateid')<line_sep>msg=<yield>self.reply_to_message("OK\n1 < 2")<line_sep>self.assertEqual(msg['transport_name'] self.tx_helper.transport_name)<line_sep>self.assertEqual(msg['transport_type'] "ussd")<line_sep>self.assertEqual(msg['transport_metadata'] {"session_id":sid})<line_sep>self.assertEqual(msg['session_event'] TransportUserMessage.SESSION_NEW)<line_sep>self.assertEqual(msg['from_addr'] '2348085832481')<line_sep># self.assertEqual(msg['to_addr'], '*120*666#') self.assertEqual(msg['content'] 'testmenu')<line_sep>response=<yield>response_d<line_sep>correct_response=''.join(["<?xml version='1.0' encoding='UTF-8'?>" '<page version="2.0">' '<session_id>a41739890287485d968ea66e8b44bfd3</session_id>' '<div>OK<br />1 &lt; 2</div>' '<navigation>' '<link accesskey="*" pageId="indexX" />' '</navigation>' '</page>' ])<line_sep>self.assertEqual(response correct_response)<block_end>@inlineCallbacks<def_stmt>test_inbound_resume_continue self<block_start>sid='a41739890287485d968ea66e8b44bfd3'<line_sep><yield>self.transport.save_session(sid '2348085832481' '*120*666#')<line_sep>response_d=self.make_ussd_request(sid page_id="indexX" data="foo")<line_sep>msg=<yield>self.reply_to_message("OK")<line_sep>self.assertEqual(msg['transport_name'] self.tx_helper.transport_name)<line_sep>self.assertEqual(msg['transport_type'] "ussd")<line_sep>self.assertEqual(msg['transport_metadata'] {"session_id":sid})<line_sep>self.assertEqual(msg['session_event'] TransportUserMessage.SESSION_RESUME)<line_sep>self.assertEqual(msg['from_addr'] '2348085832481')<line_sep>self.assertEqual(msg['to_addr'] '*120*666#')<line_sep>self.assertEqual(msg['content'] 'foo')<line_sep>response=<yield>response_d<line_sep>correct_response=''.join(["<?xml version='1.0' encoding='UTF-8'?>" '<page version="2.0">' '<session_id>a41739890287485d968ea66e8b44bfd3</session_id>' '<div>OK</div>' '<navigation>' '<link accesskey="*" pageId="indexX" />' '</navigation>' '</page>' ])<line_sep>self.assertEqual(response correct_response)<block_end>@inlineCallbacks<def_stmt>test_nack self<block_start>msg=<yield>self.tx_helper.make_dispatch_outbound("outbound")<line_sep>[nack]=<yield>self.tx_helper.wait_for_dispatched_events(1)<line_sep>self.assertEqual(nack['user_message_id'] msg['message_id'])<line_sep>self.assertEqual(nack['sent_message_id'] msg['message_id'])<line_sep>self.assertEqual(nack['nack_reason'] 'Missing in_reply_to, content or session_id')<block_end>@inlineCallbacks<def_stmt>test_inbound_missing_session self<block_start>sid='a41739890287485d968ea66e8b44bfd3'<line_sep>response=<yield>self.make_ussd_request_full(sid page_id="indexX" data="foo")<line_sep>self.assertEqual(400 response.code)<line_sep>self.assertEqual('' response.delivered_body)<block_end>@inlineCallbacks<def_stmt>test_inbound_new_and_resume self<block_start>sid='a41739890287485d968ea66e8b44bfd3'<line_sep>response_d=self.make_ussd_request(sid mobile_number='2348085832481' page_id='0' data='testmenu' gate='gateid')<line_sep>msg=<yield>self.reply_to_message("OK\n1 < 2")<line_sep>self.assertEqual(msg['transport_name'] self.tx_helper.transport_name)<line_sep>self.assertEqual(msg['transport_type'] "ussd")<line_sep>self.assertEqual(msg['transport_metadata'] {"session_id":sid})<line_sep>self.assertEqual(msg['session_event'] TransportUserMessage.SESSION_NEW)<line_sep>self.assertEqual(msg['from_addr'] '2348085832481')<line_sep># self.assertEqual(msg['to_addr'], '*120*666#') self.assertEqual(msg['content'] 'testmenu')<line_sep>response=<yield>response_d<line_sep>correct_response=''.join(["<?xml version='1.0' encoding='UTF-8'?>" '<page version="2.0">' '<session_id>a41739890287485d968ea66e8b44bfd3</session_id>' '<div>OK<br />1 &lt; 2</div>' '<navigation>' '<link accesskey="*" pageId="indexX" />' '</navigation>' '</page>' ])<line_sep>self.assertEqual(response correct_response)<line_sep>self.tx_helper.clear_all_dispatched()<line_sep>response_d=self.make_ussd_request(sid page_id="indexX" data="foo")<line_sep>msg=<yield>self.reply_to_message("OK")<line_sep>self.assertEqual(msg['transport_name'] self.tx_helper.transport_name)<line_sep>self.assertEqual(msg['transport_type'] "ussd")<line_sep>self.assertEqual(msg['transport_metadata'] {"session_id":sid})<line_sep>self.assertEqual(msg['session_event'] TransportUserMessage.SESSION_RESUME)<line_sep>self.assertEqual(msg['from_addr'] '2348085832481')<line_sep>self.assertEqual(msg['to_addr'] 'gateid')<line_sep>self.assertEqual(msg['content'] 'foo')<line_sep>response=<yield>response_d<line_sep>correct_response=''.join(["<?xml version='1.0' encoding='UTF-8'?>" '<page version="2.0">' '<session_id>a41739890287485d968ea66e8b44bfd3</session_id>' '<div>OK</div>' '<navigation>' '<link accesskey="*" pageId="indexX" />' '</navigation>' '</page>' ])<line_sep>self.assertEqual(response correct_response)<block_end>@inlineCallbacks<def_stmt>test_inbound_resume_close self<block_start>sid='a41739890287485d968ea66e8b44bfd3'<line_sep><yield>self.transport.save_session(sid '2348085832481' '*120*666#')<line_sep>response_d=self.make_ussd_request(sid page_id="indexX" data="foo")<line_sep>msg=<yield>self.reply_to_message("OK" continue_session=<false>)<line_sep>self.assertEqual(msg['transport_name'] self.tx_helper.transport_name)<line_sep>self.assertEqual(msg['transport_type'] "ussd")<line_sep>self.assertEqual(msg['transport_metadata'] {"session_id":sid})<line_sep>self.assertEqual(msg['session_event'] TransportUserMessage.SESSION_RESUME)<line_sep>self.assertEqual(msg['from_addr'] '2348085832481')<line_sep>self.assertEqual(msg['to_addr'] '*120*666#')<line_sep>self.assertEqual(msg['content'] 'foo')<line_sep>response=<yield>response_d<line_sep>correct_response=''.join(["<?xml version='1.0' encoding='UTF-8'?>" '<page version="2.0">' '<session_id>a41739890287485d968ea66e8b44bfd3</session_id>' '<div>OK</div>' '</page>' ])<line_sep>self.assertEqual(response correct_response)<block_end>@inlineCallbacks<def_stmt>test_inbound_cancel self<block_start>sid='a41739890287485d968ea66e8b44bfd3'<line_sep><yield>self.transport.save_session(sid '2348085832481' '*120*666#')<line_sep>response=<yield>self.make_ussd_request(sid status="1")<line_sep>correct_response=''.join(["<?xml version='1.0' encoding='UTF-8'?>" '<page version="2.0">' '<session_id>a41739890287485d968ea66e8b44bfd3</session_id>' '</page>' ])<line_sep>self.assertEqual(response correct_response)<block_end><block_end><class_stmt>TestMtechUssdResponse(VumiTestCase)<block_start><def_stmt>setUp self<block_start>self.mur=MtechUssdResponse("sid123")<block_end><def_stmt>assert_message_xml self *lines<block_start>xml_str=''.join(["<?xml version='1.0' encoding='UTF-8'?>"]+list(lines))<line_sep>self.assertEqual(self.mur.to_xml() xml_str)<block_end><def_stmt>test_empty_response self<block_start>self.assert_message_xml('<page version="2.0">' '<session_id>sid123</session_id>' '</page>')<block_end><def_stmt>test_free_text self<block_start>self.mur.add_text("Please enter your name")<line_sep>self.mur.add_freetext_option()<line_sep>self.assert_message_xml('<page version="2.0">' '<session_id>sid123</session_id>' '<div>Please enter your name</div>' '<navigation><link accesskey="*" pageId="indexX" /></navigation>' '</page>')<block_end><def_stmt>test_menu_options self<block_start>self.mur.add_text("Please choose:")<line_sep>self.mur.add_menu_item('chicken' '1')<line_sep>self.mur.add_menu_item('beef' '2')<line_sep>self.assert_message_xml('<page version="2.0">' '<session_id>sid123</session_id>' '<div>Please choose:</div>' '<navigation>' '<link accesskey="1" pageId="index1">chicken</link>' '<link accesskey="2" pageId="index2">beef</link>' '</navigation>' '</page>')<block_end><def_stmt>test_menu_options_title self<block_start>self.mur.add_title("LUNCH")<line_sep>self.mur.add_text("Please choose:")<line_sep>self.mur.add_menu_item('chicken' '1')<line_sep>self.mur.add_menu_item('beef' '2')<line_sep>self.assert_message_xml('<page version="2.0">' '<session_id>sid123</session_id>' '<title>LUNCH</title>' '<div>Please choose:</div>' '<navigation>' '<link accesskey="1" pageId="index1">chicken</link>' '<link accesskey="2" pageId="index2">beef</link>' '</navigation>' '</page>')<block_end><block_end>
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>re<import_from_stmt>telemetry.core util<import_from_stmt>telemetry.core exceptions<import_from_stmt>telemetry.page.actions page_action<def_stmt>_EscapeSelector selector<block_start><return>selector.replace('\'' '\\\'')<block_end><class_stmt>ClickElementAction(page_action.PageAction)<block_start><def_stmt>__init__ self attributes=<none><block_start>super(ClickElementAction self).__init__(attributes)<block_end><def_stmt>RunAction self page tab previous_action<block_start><def_stmt>DoClick <block_start><if_stmt>hasattr(self 'selector')<block_start>code=('document.querySelector(\''+_EscapeSelector(self.selector)+'\').click();')<try_stmt><block_start>tab.ExecuteJavaScript(code)<block_end><except_stmt>exceptions.EvaluateException<block_start><raise>page_action.PageActionFailed('Cannot find element with selector '+self.selector)<block_end><block_end><elif_stmt>hasattr(self 'text')<block_start>callback_code='function(element) { element.click(); }'<try_stmt><block_start>util.FindElementAndPerformAction(tab self.text callback_code)<block_end><except_stmt>exceptions.EvaluateException<block_start><raise>page_action.PageActionFailed('Cannot find element with text '+self.text)<block_end><block_end><elif_stmt>hasattr(self 'xpath')<block_start>code=('document.evaluate("%s",'<concat>'document,'<concat>'null,'<concat>'XPathResult.FIRST_ORDERED_NODE_TYPE,'<concat>'null)'<concat>'.singleNodeValue.click()'%re.escape(self.xpath))<try_stmt><block_start>tab.ExecuteJavaScript(code)<block_end><except_stmt>exceptions.EvaluateException<block_start><raise>page_action.PageActionFailed('Cannot find element with xpath '+self.xpath)<block_end><block_end><else_stmt><block_start><raise>page_action.PageActionFailed('No condition given to click_element')<block_end><block_end>DoClick()<block_end><block_end>
# encoding: utf8 <import_from_future_stmt> unicode_literals<try_stmt><block_start><import_stmt>tkinter<as>tk<import_from_stmt>tkinter.scrolledtext ScrolledText<block_end><except_stmt>ImportError<block_start><import_stmt>Tkinter<as>tk<import_from_stmt>ScrolledText ScrolledText<block_end><import_from_stmt>pygubu.builder.builderobject BuilderObject register_widget<import_from_stmt>pygubu.builder.tkstdwidgets TKText<class_stmt>TkinterScrolledTextBO(TKText)<block_start>class_=ScrolledText<block_end>register_widget('pygubu.builder.widgets.tkinterscrolledtext' TkinterScrolledTextBO 'ScrolledText' ('Control & Display' 'tk'))<line_sep>
""" In an input of unsorted integer array, find the number of elements that can be searched using binary search The idea is the an element is binary searchable if the elements to the left of it are smaller than it and the elements to the right of it are bigger than it So maintain two arrays - left_max and right_min such that in i'th index - * left_max[i] contains the max element between 0 and i-1 (left to right movement) * right_min[i] contains the min element between i+1 and n-1 (right to left movement) Now for every element in the array, if its index its i, then it is binary searchable if left_max[i] < arr[i] < right_min[i] """<import_stmt>sys<def_stmt>get_searchable_numbers arr n<block_start>left_max=[<none>]<times>n<line_sep>right_min=[<none>]<times>n<line_sep>left_max[0]=float('-inf')<line_sep>right_min[n-1]=float('inf')<for_stmt>i range(1 n)<block_start>left_max[i]=max(left_max[i-1] arr[i-1])<block_end><for_stmt>i range(len(arr)-2 -1 -1)<block_start>right_min[i]=min(right_min[i+1] arr[i+1])<block_end>res=[]<line_sep>count=0<for_stmt>i range(0 n)<block_start>num=arr[i]<line_sep>left=left_max[i]<line_sep>right=right_min[i]<if_stmt>left<l>num<l>right<block_start>res.append(num)<line_sep>count<augadd>1<block_end><block_end><return>count res<block_end><if_stmt>__name__<eq>'__main__'#arr = [5,1,4,3,6,8,10,7,9] <block_start>arr=[4 1 3 9 8 10 11]<line_sep>count,res=get_searchable_numbers(arr len(arr))<line_sep>print(count res)<block_end>
"""pypyr step that merges the input mappings into context. Whereas contextcopy and contextsetf overwrites values that are in context already, contextmerge merges its input into context, preserving the existing hierarchy while just updating the values where specified in the contextmerge input. Applies string interpolation as it merges. String interpolation applies to keys and values. """<import_stmt>logging<line_sep># logger means the log level will be set correctly logger=logging.getLogger(__name__)<def_stmt>run_step context<block_start>"""Merge hierarchy into context with substitutions. context is a dictionary or dictionary-like. context['contextMerge'] must exist. It's a dictionary. Will iterate context['contextMerge'] and save the values as new keys to the context where they exist already, and add these as new values where they don't already exist. While it's doing so, it will leave unspecified values in the existing hierarchy untouched. List merging is purely additive, with no checks for uniqueness or already existing list items. E.g context [0,1,2] with contextMerge=[2,3,4] will result in [0,1,2,2,3,4] Keep this in mind especially where complex types like dicts nest inside a list - a merge will always add a new dict list item, not merge it into whatever dicts might exist on the list already. For example, say input context is: key1: value1 key2: value2 key3: k31: value31 k32: value32 contextMerge: key2: 'aaa_{key1}_zzz' key3: k33: value33 key4: 'bbb_{key2}_yyy' This will result in return context: key1: value1 key2: aaa_value1_zzz key3: k31: value31 k32: value32 k33: value33 key4: <KEY> """<line_sep>logger.debug("started")<line_sep>context.assert_key_has_value(key='contextMerge' caller=__name__)<line_sep>context.merge(context['contextMerge'])<line_sep>logger.info("merged %d context items." len(context['contextMerge']))<line_sep>logger.debug("done")<block_end>
"""A collection of CLI commands for working with Kedro micro-packages."""<import_stmt>re<import_stmt>shutil<import_stmt>sys<import_stmt>tarfile<import_stmt>tempfile<import_from_stmt>importlib import_module<import_from_stmt>pathlib Path<import_from_stmt>typing Iterable List Optional Set Tuple Union<import_stmt>click<import_stmt>pkg_resources<import_from_stmt>rope.base.project Project<import_from_stmt>rope.contrib generate<import_from_stmt>rope.refactor.move MoveModule<import_from_stmt>rope.refactor.rename Rename<import_from_stmt>kedro.framework.cli.pipeline _assert_pkg_name_ok _check_pipeline_name _get_artifacts_to_package _sync_dirs <import_from_stmt>kedro.framework.cli.utils KedroCliError _clean_pycache call command_with_verbosity env_option python_call <import_from_stmt>kedro.framework.startup ProjectMetadata<line_sep>_SETUP_PY_TEMPLATE="""# -*- coding: utf-8 -*- from setuptools import setup, find_packages setup( name="{name}", version="{version}", description="Micro-package `{name}`", packages=find_packages(), include_package_data=True, install_requires={install_requires}, ) """<def_stmt>_check_module_path ctx param value# pylint: disable=unused-argument <block_start><if_stmt>value<and><not>re.match(r"^[\w.]+$" value)<block_start>message=("The micro-package location you provided is not a valid Python module path")<line_sep><raise>KedroCliError(message)<block_end><return>value<block_end># pylint: disable=missing-function-docstring @click.group(name="Kedro")<def_stmt>micropkg_cli # pragma: no cover <block_start><pass><block_end>@micropkg_cli.group()<def_stmt>micropkg <block_start>"""Commands for working with micro-packages."""<block_end>@command_with_verbosity(micropkg "pull")@click.argument("package_path" nargs=1 required=<false>)@click.option("--all" "-a" "all_flag" is_flag=<true> help="Pull and unpack all micro-packages in the `pyproject.toml` package manifest section." )@env_option(help="Environment to install the micro-package configuration to. Defaults to `base`.")@click.option("--alias" type=str default="" help="Rename the package.")@click.option("-d" "--destination" type=click.Path(file_okay=<false> dir_okay=<false>) default=<none> help="Module location where to unpack under." )@click.option("--fs-args" type=click.Path(exists=<true> file_okay=<true> dir_okay=<false> readable=<true> resolve_path=<true>) default=<none> help="Location of a configuration file for the fsspec filesystem used to pull the package." )@click.pass_obj# this will pass the metadata as first argument <def_stmt>pull_package # pylint:disable=unused-argument, too-many-arguments metadata:ProjectMetadata package_path env alias destination fs_args all_flag **kwargs <arrow><none><block_start>"""Pull and unpack a modular pipeline and other micro-packages in your project."""<if_stmt><not>package_path<and><not>all_flag<block_start>click.secho("Please specify a package path or add '--all' to pull all micro-packages in the "<concat>"`pyproject.toml` package manifest section.")<line_sep>sys.exit(1)<block_end><if_stmt>all_flag<block_start>_pull_packages_from_manifest(metadata)<line_sep><return><block_end>_pull_package(package_path metadata env=env alias=alias destination=destination fs_args=fs_args )<line_sep>as_alias=f" as `{alias}`"<if>alias<else>""<line_sep>message=f"Micro-package {package_path} pulled and unpacked{as_alias}!"<line_sep>click.secho(message fg="green")<block_end># pylint: disable=too-many-arguments, too-many-locals <def_stmt>_pull_package package_path:str metadata:ProjectMetadata env:str=<none> alias:str=<none> destination:str=<none> fs_args:str=<none> <block_start><with_stmt>tempfile.TemporaryDirectory()<as>temp_dir<block_start>temp_dir_path=Path(temp_dir).resolve()<line_sep>_unpack_sdist(package_path temp_dir_path fs_args)<line_sep>sdist_file_name=Path(package_path).name.rstrip(".tar.gz")<line_sep>egg_info_file=list((temp_dir_path/sdist_file_name).glob("*.egg-info"))<if_stmt>len(egg_info_file)<ne>1<block_start><raise>KedroCliError(f"More than 1 or no egg-info files found from {package_path}. "<concat>f"There has to be exactly one egg-info directory.")<block_end>package_name=egg_info_file[0].stem<line_sep>package_requirements=temp_dir_path/sdist_file_name/"setup.py"<line_sep># Finds a string representation of 'install_requires' list from setup.py reqs_list_pattern=r"install_requires\=(.*?)\,\n"<line_sep>list_reqs=re.findall(reqs_list_pattern package_requirements.read_text(encoding="utf-8"))<line_sep># Finds all elements from the above string representation of a list reqs_element_pattern=r"\'(.*?)\'"<line_sep>package_reqs=re.findall(reqs_element_pattern list_reqs[0])<if_stmt>package_reqs<block_start>requirements_txt=metadata.source_dir/"requirements.txt"<line_sep>_append_package_reqs(requirements_txt package_reqs package_name)<block_end>_clean_pycache(temp_dir_path)<line_sep>_install_files(metadata package_name temp_dir_path/sdist_file_name env alias destination )<block_end><block_end><def_stmt>_pull_packages_from_manifest metadata:ProjectMetadata<arrow><none># pylint: disable=import-outside-toplevel <block_start><import_stmt>anyconfig# for performance reasons config_dict=anyconfig.load(metadata.config_file)<line_sep>config_dict=config_dict["tool"]["kedro"]<line_sep>build_specs=config_dict.get("micropkg" {}).get("pull")<if_stmt><not>build_specs<block_start>click.secho("Nothing to pull. Please update the `pyproject.toml` package manifest section." fg="yellow" )<line_sep><return><block_end><for_stmt>package_path,specs build_specs.items()<block_start><if_stmt>"alias"<in>specs<block_start>_assert_pkg_name_ok(specs["alias"].split(".")[-1])<block_end>_pull_package(package_path metadata **specs)<line_sep>click.secho(f"Pulled and unpacked `{package_path}`!")<block_end>click.secho("Micro-packages pulled and unpacked!" fg="green")<block_end><def_stmt>_package_micropkgs_from_manifest metadata:ProjectMetadata<arrow><none># pylint: disable=import-outside-toplevel <block_start><import_stmt>anyconfig# for performance reasons config_dict=anyconfig.load(metadata.config_file)<line_sep>config_dict=config_dict["tool"]["kedro"]<line_sep>build_specs=config_dict.get("micropkg" {}).get("package")<if_stmt><not>build_specs<block_start>click.secho("Nothing to package. Please update the `pyproject.toml` package manifest section." fg="yellow" )<line_sep><return><block_end><for_stmt>package_name,specs build_specs.items()<block_start><if_stmt>"alias"<in>specs<block_start>_assert_pkg_name_ok(specs["alias"])<block_end>_package_micropkg(package_name metadata **specs)<line_sep>click.secho(f"Packaged `{package_name}` micro-package!")<block_end>click.secho("Micro-packages packaged!" fg="green")<block_end>@micropkg.command("package")@env_option(help="Environment where the micro-package configuration lives. Defaults to `base`.")@click.option("--alias" type=str default="" callback=_check_pipeline_name help="Alternative name to package under." )@click.option("-d" "--destination" type=click.Path(resolve_path=<true> file_okay=<false>) help="Location where to create the source distribution file. Defaults to `dist/`." )@click.option("--all" "-a" "all_flag" is_flag=<true> help="Package all micro-packages in the `pyproject.toml` package manifest section." )@click.argument("module_path" nargs=1 required=<false> callback=_check_module_path)@click.pass_obj# this will pass the metadata as first argument <def_stmt>package_micropkg metadata:ProjectMetadata module_path env alias destination all_flag# pylint: disable=too-many-arguments <block_start>"""Package up a modular pipeline or micro-package as a Python source distribution."""<if_stmt><not>module_path<and><not>all_flag<block_start>click.secho("Please specify a micro-package name or add '--all' to package all micro-packages in "<concat>"the `pyproject.toml` package manifest section.")<line_sep>sys.exit(1)<block_end><if_stmt>all_flag<block_start>_package_micropkgs_from_manifest(metadata)<line_sep><return><block_end>result_path=_package_micropkg(module_path metadata alias=alias destination=destination env=env)<line_sep>as_alias=f" as `{alias}`"<if>alias<else>""<line_sep>message=(f"`{metadata.package_name}.{module_path}` packaged{as_alias}! "<concat>f"Location: {result_path}")<line_sep>click.secho(message fg="green")<block_end><def_stmt>_get_fsspec_filesystem location:str fs_args:Optional[str]# pylint: disable=import-outside-toplevel <block_start><import_stmt>anyconfig<import_stmt>fsspec<import_from_stmt>kedro.io.core get_protocol_and_path<line_sep>protocol,_=get_protocol_and_path(location)<line_sep>fs_args_config=anyconfig.load(fs_args)<if>fs_args<else>{}<try_stmt><block_start><return>fsspec.filesystem(protocol **fs_args_config)<block_end><except_stmt>Exception<as>exc# pylint: disable=broad-except # Specified protocol is not supported by `fsspec` # or requires extra dependencies <block_start>click.secho(str(exc) fg="red")<line_sep>click.secho("Trying to use 'pip download'..." fg="red")<line_sep><return><none><block_end><block_end><def_stmt>_unpack_sdist location:str destination:Path fs_args:Optional[str]<arrow><none><block_start>filesystem=_get_fsspec_filesystem(location fs_args)<if_stmt>location.endswith(".tar.gz")<and>filesystem<and>filesystem.exists(location)<block_start><with_stmt>filesystem.open(location)<as>fs_file<block_start><with_stmt>tarfile.open(fileobj=fs_file mode="r:gz")<as>tar_file<block_start>tar_file.extractall(destination)<block_end><block_end><block_end><else_stmt><block_start>python_call("pip" ["download" "--no-deps" "--dest" str(destination) location])<line_sep>sdist_file=list(destination.glob("*.tar.gz"))<line_sep># `--no-deps` should fetch only one source distribution file, and CLI should fail if that's # not the case. <if_stmt>len(sdist_file)<ne>1<block_start>file_names=[sf.name<for>sf sdist_file]<line_sep><raise>KedroCliError(f"More than 1 or no sdist files found: {file_names}. "<concat>f"There has to be exactly one source distribution file.")<block_end><with_stmt>tarfile.open(sdist_file[0] "r:gz")<as>fs_file<block_start>fs_file.extractall(destination)<block_end><block_end><block_end><def_stmt>_rename_files conf_source:Path old_name:str new_name:str<block_start>config_files_to_rename=(each<for>each conf_source.rglob("*")<if>each.is_file()<and>old_name<in>each.name)<for_stmt>config_file config_files_to_rename<block_start>new_config_name=config_file.name.replace(old_name new_name)<line_sep>config_file.rename(config_file.parent/new_config_name)<block_end><block_end><def_stmt>_refactor_code_for_unpacking project:Project package_path:Path tests_path:Path alias:Optional[str] destination:Optional[str] project_metadata:ProjectMetadata <arrow>Tuple[Path Path]<block_start>"""This is the reverse operation of `_refactor_code_for_package`, i.e we go from: <temp_dir> # also the root of the Rope project |__ <micro_package> # or <alias> |__ __init__.py |__ tests # only tests for <micro_package> |__ __init__.py |__ tests.py to: <temp_dir> # also the root of the Rope project |__ <project_package> |__ __init__.py |__ <path_to_micro_package> |__ __init__.py |__ <micro_package> |__ __init__.py |__ tests |__ __init__.py |__ <path_to_micro_package> |__ __init__.py |__ <micro_package> |__ __init__.py """<def_stmt>_move_package_with_conflicting_name target:Path original_name:str desired_name:str=<none><arrow>Path<block_start>_rename_package(project original_name "tmp_name")<line_sep>full_path=_create_nested_package(project target)<line_sep>_move_package(project "tmp_name" target.as_posix())<line_sep>desired_name=desired_name<or>original_name<line_sep>_rename_package(project (target/"tmp_name").as_posix() desired_name)<line_sep><return>full_path<block_end>package_name=package_path.stem<line_sep>package_target=Path(project_metadata.package_name)<line_sep>tests_target=Path("tests")<if_stmt>destination<block_start>destination_path=Path(destination)<line_sep>package_target=package_target/destination_path<line_sep>tests_target=tests_target/destination_path<block_end><if_stmt>alias<and>alias<ne>package_name<block_start>_rename_package(project package_name alias)<line_sep>package_name=alias<block_end><if_stmt>package_name<eq>project_metadata.package_name<block_start>full_path=_move_package_with_conflicting_name(package_target package_name)<block_end><else_stmt><block_start>full_path=_create_nested_package(project package_target)<line_sep>_move_package(project package_name package_target.as_posix())<block_end>refactored_package_path=full_path/package_name<if_stmt><not>tests_path.exists()<block_start><return>refactored_package_path tests_path<block_end># we can't rename the tests package to <package_name> # because it will conflict with existing top-level package; # hence we give it a temp name, create the expected # nested folder structure, move the contents there, # then rename the temp name to <package_name>. full_path=_move_package_with_conflicting_name(tests_target original_name="tests" desired_name=package_name)<line_sep>refactored_tests_path=full_path/package_name<line_sep><return>refactored_package_path refactored_tests_path<block_end><def_stmt>_install_files # pylint: disable=too-many-arguments, too-many-locals project_metadata:ProjectMetadata package_name:str source_path:Path env:str=<none> alias:str=<none> destination:str=<none> <block_start>env=env<or>"base"<line_sep>package_source,test_source,conf_source=_get_package_artifacts(source_path package_name)<if_stmt>conf_source.is_dir()<and>alias<block_start>_rename_files(conf_source package_name alias)<block_end>module_path=alias<or>package_name<if_stmt>destination<block_start>module_path=f"{destination}.{module_path}"<block_end>package_dest,test_dest,conf_dest=_get_artifacts_to_package(project_metadata module_path=module_path env=env)<if_stmt>conf_source.is_dir()<block_start>_sync_dirs(conf_source conf_dest)<line_sep># `config` dir was packaged under `package_name` directory with # `kedro micropkg package`. Since `config` was already synced, # we don't want to copy it again when syncing the package, so we remove it. shutil.rmtree(str(conf_source))<block_end>project=Project(source_path)<line_sep>refactored_package_source,refactored_test_source=_refactor_code_for_unpacking(project package_source test_source alias destination project_metadata)<line_sep>project.close()<if_stmt>refactored_test_source.is_dir()<block_start>_sync_dirs(refactored_test_source test_dest)<block_end># Sync everything under package directory, except `config` # since it has already been copied. <if_stmt>refactored_package_source.is_dir()<block_start>_sync_dirs(refactored_package_source package_dest)<block_end><block_end><def_stmt>_find_config_files source_config_dir:Path glob_patterns:List[str]<arrow>List[Tuple[Path str]]<block_start>config_files=[]# type: List[Tuple[Path, str]] <if_stmt>source_config_dir.is_dir()<block_start>config_files=[(path path.parent.relative_to(source_config_dir).as_posix())<for>glob_pattern glob_patterns<for>path source_config_dir.glob(glob_pattern)<if>path.is_file()]<block_end><return>config_files<block_end><def_stmt>_get_default_version metadata:ProjectMetadata micropkg_module_path:str<arrow>str# default to micropkg package version <block_start><try_stmt><block_start>micropkg_module=import_module(f"{metadata.package_name}.{micropkg_module_path}")<line_sep><return>micropkg_module.__version__# type: ignore <block_end><except_stmt>(AttributeError ModuleNotFoundError)# if micropkg version doesn't exist, take the project one <block_start>project_module=import_module(f"{metadata.package_name}")<line_sep><return>project_module.__version__<block_end><block_end># type: ignore <def_stmt>_package_micropkg micropkg_module_path:str metadata:ProjectMetadata alias:str=<none> destination:str=<none> env:str=<none> <arrow>Path<block_start>micropkg_name=micropkg_module_path.split(".")[-1]<line_sep>package_dir=metadata.source_dir/metadata.package_name<line_sep>env=env<or>"base"<line_sep>package_source,package_tests,package_conf=_get_artifacts_to_package(metadata module_path=micropkg_module_path env=env)<line_sep># as the source distribution will only contain parameters, we aren't listing other # config files not to confuse users and avoid useless file copies configs_to_package=_find_config_files(package_conf [f"parameters*/**/{micropkg_name}.yml" f"parameters*/**/{micropkg_name}/**/*"] )<line_sep>source_paths=(package_source package_tests configs_to_package)<line_sep># Check that micropkg directory exists and not empty _validate_dir(package_source)<line_sep>destination=Path(destination)<if>destination<else>metadata.project_path/"dist"<line_sep>version=_get_default_version(metadata micropkg_module_path)<line_sep>_generate_sdist_file(micropkg_name=micropkg_name destination=destination.resolve() source_paths=source_paths version=version metadata=metadata alias=alias )<line_sep>_clean_pycache(package_dir)<line_sep>_clean_pycache(metadata.project_path)<line_sep><return>destination<block_end><def_stmt>_validate_dir path:Path<arrow><none><block_start><if_stmt><not>path.is_dir()<block_start><raise>KedroCliError(f"Directory '{path}' doesn't exist.")<block_end><if_stmt><not>list(path.iterdir())<block_start><raise>KedroCliError(f"'{path}' is an empty directory.")<block_end><block_end><def_stmt>_get_sdist_name name version<block_start><return>f"{name}-{version}.tar.gz"<block_end><def_stmt>_sync_path_list source:List[Tuple[Path str]] target:Path<arrow><none><block_start><for_stmt>source_path,suffix source<block_start>target_with_suffix=(target/suffix).resolve()<line_sep>_sync_dirs(source_path target_with_suffix)<block_end><block_end><def_stmt>_make_install_requires requirements_txt:Path<arrow>List[str]<block_start>"""Parses each line of requirements.txt into a version specifier valid to put in install_requires."""<if_stmt><not>requirements_txt.exists()<block_start><return>[]<block_end>requirements=pkg_resources.parse_requirements(requirements_txt.read_text())<line_sep><return>[str(requirement)<for>requirement requirements]<block_end><def_stmt>_create_nested_package project:Project package_path:Path<arrow>Path# fails if parts of the path exists already <block_start>packages=package_path.parts<line_sep>parent=generate.create_package(project packages[0])<line_sep>nested_path=Path(project.address)/packages[0]<for_stmt>package packages[1:]<block_start>parent=generate.create_package(project package sourcefolder=parent)<line_sep>nested_path=nested_path/package<block_end><return>nested_path<block_end><def_stmt>_move_package project:Project source:str target:str<arrow><none><block_start>""" Move a Python package, refactoring relevant imports along the way. A target of empty string means moving to the root of the `project`. Args: project: rope.base.Project holding the scope of the refactoring. source: Name of the Python package to be moved. Can be a fully qualified module path relative to the `project` root, e.g. "package.pipelines.pipeline" or "package/pipelines/pipeline". target: Destination of the Python package to be moved. Can be a fully qualified module path relative to the `project` root, e.g. "package.pipelines.pipeline" or "package/pipelines/pipeline". """<line_sep>src_folder=project.get_module(source).get_resource()<line_sep>target_folder=project.get_module(target).get_resource()<line_sep>change=MoveModule(project src_folder).get_changes(dest=target_folder)<line_sep>project.do(change)<block_end><def_stmt>_rename_package project:Project old_name:str new_name:str<arrow><none><block_start>""" Rename a Python package, refactoring relevant imports along the way, as well as references in comments. Args: project: rope.base.Project holding the scope of the refactoring. old_name: Old module name. Can be a fully qualified module path, e.g. "package.pipelines.pipeline" or "package/pipelines/pipeline", relative to the `project` root. new_name: New module name. Can't be a fully qualified module path. """<line_sep>folder=project.get_folder(old_name)<line_sep>change=Rename(project folder).get_changes(new_name docs=<true>)<line_sep>project.do(change)<block_end><def_stmt>_refactor_code_for_package project:Project package_path:Path tests_path:Path alias:Optional[str] project_metadata:ProjectMetadata <arrow><none><block_start>"""In order to refactor the imports properly, we need to recreate the same nested structure as in the project. Therefore, we create: <temp_dir> # also the root of the Rope project |__ <project_package> |__ __init__.py |__ <path_to_micro_package> |__ __init__.py |__ <micro_package> |__ __init__.py |__ tests |__ __init__.py |__ path_to_micro_package |__ __init__.py |__ <micro_package> |__ __init__.py We then move <micro_package> outside of package src to top level ("") in temp_dir, and rename folder & imports if alias provided. For tests, we need to extract all the contents of <micro_package> at into top-level `tests` folder. This is not possible in one go with the Rope API, so we have to do it in a bit of a hacky way. We rename <micro_package> to a `tmp_name` and move it at top-level ("") in temp_dir. We remove the old `tests` folder and rename `tmp_name` to `tests`. The final structure should be: <temp_dir> # also the root of the Rope project |__ <micro_package> # or <alias> |__ __init__.py |__ tests # only tests for <micro_package> |__ __init__.py |__ test.py """<def_stmt>_move_package_with_conflicting_name target:Path conflicting_name:str<block_start>tmp_name="tmp_name"<line_sep>tmp_module=target.parent/tmp_name<line_sep>_rename_package(project target.as_posix() tmp_name)<line_sep>_move_package(project tmp_module.as_posix() "")<line_sep>shutil.rmtree(Path(project.address)/conflicting_name)<line_sep>_rename_package(project tmp_name conflicting_name)<block_end># Copy source in appropriate folder structure package_target=package_path.relative_to(project_metadata.source_dir)<line_sep>full_path=_create_nested_package(project package_target)<line_sep># overwrite=True to update the __init__.py files generated by create_package _sync_dirs(package_path full_path overwrite=<true>)<line_sep># Copy tests in appropriate folder structure <if_stmt>tests_path.exists()<block_start>tests_target=tests_path.relative_to(project_metadata.source_dir)<line_sep>full_path=_create_nested_package(project tests_target)<line_sep># overwrite=True to update the __init__.py files generated by create_package _sync_dirs(tests_path full_path overwrite=<true>)<block_end># Refactor imports in src/package_name/.../micro_package # and imports of `micro_package` in tests. micro_package_name=package_target.stem<if_stmt>micro_package_name<eq>project_metadata.package_name<block_start>_move_package_with_conflicting_name(package_target micro_package_name)<block_end><else_stmt><block_start>_move_package(project package_target.as_posix() "")<line_sep>shutil.rmtree(Path(project.address)/project_metadata.package_name)<block_end><if_stmt>alias<block_start>_rename_package(project micro_package_name alias)<block_end><if_stmt>tests_path.exists()# we can't move the relevant tests folder as is because # it will conflict with the top-level package <micro_package>; # we can't rename it "tests" and move it, because it will conflict # with the existing "tests" folder at top level; # hence we give it a temp name, move it, delete tests/ and # rename the temp name to tests. <block_start>_move_package_with_conflicting_name(tests_target "tests")<block_end><block_end>_SourcePathType=Union[Path List[Tuple[Path str]]]<line_sep># pylint: disable=too-many-arguments,too-many-locals <def_stmt>_generate_sdist_file micropkg_name:str destination:Path source_paths:Tuple[_SourcePathType <ellipsis>] version:str metadata:ProjectMetadata alias:str=<none> <arrow><none><block_start>package_name=alias<or>micropkg_name<line_sep>package_source,tests_source,conf_source=source_paths<with_stmt>tempfile.TemporaryDirectory()<as>temp_dir<block_start>temp_dir_path=Path(temp_dir).resolve()<line_sep>project=Project(temp_dir_path)# project where to do refactoring _refactor_code_for_package(project package_source tests_source alias metadata# type: ignore )<line_sep>project.close()<line_sep># Copy & "refactor" config _,_,conf_target=_get_package_artifacts(temp_dir_path package_name)<line_sep>_sync_path_list(conf_source conf_target)# type: ignore <if_stmt>conf_target.is_dir()<and>alias<block_start>_rename_files(conf_target micropkg_name alias)<block_end># Build a setup.py on the fly <try_stmt><block_start>install_requires=_make_install_requires(package_source/"requirements.txt"# type: ignore )<block_end><except_stmt>Exception<as>exc<block_start>click.secho("FAILED" fg="red")<line_sep>cls=exc.__class__<line_sep><raise>KedroCliError(f"{cls.__module__}.{cls.__qualname__}: {exc}")<from>exc<block_end>_generate_manifest_file(temp_dir_path)<line_sep>setup_file=_generate_setup_file(package_name version install_requires temp_dir_path)<line_sep>package_file=destination/_get_sdist_name(name=package_name version=version)<if_stmt>package_file.is_file()<block_start>click.secho(f"Package file {package_file} will be overwritten!" fg="yellow")<block_end># python setup.py sdist --formats=gztar --dist-dir <destination> call([sys.executable str(setup_file.resolve()) "sdist" "--formats=gztar" "--dist-dir" str(destination) ] cwd=temp_dir )<block_end><block_end><def_stmt>_generate_manifest_file output_dir:Path<block_start>manifest_file=output_dir/"MANIFEST.in"<line_sep>manifest_file.write_text(""" global-include README.md global-include config/parameters* global-include config/**/parameters* global-include config/parameters*/** global-include config/parameters*/**/* """)<block_end><def_stmt>_generate_setup_file package_name:str version:str install_requires:List[str] output_dir:Path<arrow>Path<block_start>setup_file=output_dir/"setup.py"<line_sep>setup_file_context=dict(name=package_name version=version install_requires=install_requires)<line_sep>setup_file.write_text(_SETUP_PY_TEMPLATE.format(**setup_file_context))<line_sep><return>setup_file<block_end><def_stmt>_get_package_artifacts source_path:Path package_name:str<arrow>Tuple[Path Path Path]<block_start>"""From existing package, returns in order: source_path, tests_path, config_path """<line_sep>artifacts=(source_path/package_name source_path/"tests" # package_data (non-python files) needs to live inside one of the packages source_path/package_name/"config" )<line_sep><return>artifacts<block_end><def_stmt>_append_package_reqs requirements_txt:Path package_reqs:List[str] package_name:str<arrow><none><block_start>"""Appends micro-package requirements to project level requirements.txt"""<line_sep>incoming_reqs=_safe_parse_requirements(package_reqs)<if_stmt>requirements_txt.is_file()<block_start>existing_reqs=_safe_parse_requirements(requirements_txt.read_text())<line_sep>reqs_to_add=set(incoming_reqs)-set(existing_reqs)<if_stmt><not>reqs_to_add<block_start><return><block_end>sorted_reqs=sorted(str(req)<for>req reqs_to_add)<line_sep>sep="\n"<with_stmt>open(requirements_txt "a" encoding="utf-8")<as>file<block_start>file.write(f"\n\n# Additional requirements from micro-package `{package_name}`:\n")<line_sep>file.write(sep.join(sorted_reqs))<block_end>click.secho(f"Added the following requirements from micro-package `{package_name}` to "<concat>f"requirements.txt:\n{sep.join(sorted_reqs)}")<block_end><else_stmt><block_start>click.secho("No project requirements.txt found. Copying contents from project requirements.txt...")<line_sep>sorted_reqs=sorted(str(req)<for>req incoming_reqs)<line_sep>sep="\n"<with_stmt>open(requirements_txt "a" encoding="utf-8")<as>file<block_start>file.write(sep.join(sorted_reqs))<block_end><block_end>click.secho("Use `kedro build-reqs` to compile and `pip install -r src/requirements.lock` to install "<concat>"the updated list of requirements.")<block_end><def_stmt>_safe_parse_requirements requirements:Union[str Iterable[str]]<arrow>Set[pkg_resources.Requirement]<block_start>"""Safely parse a requirement or set of requirements. This effectively replaces pkg_resources.parse_requirements, which blows up with a ValueError as soon as it encounters a requirement it cannot parse (e.g. `-r requirements.txt`). This way we can still extract all the parseable requirements out of a set containing some unparseable requirements. """<line_sep>parseable_requirements=set()<for_stmt>requirement pkg_resources.yield_lines(requirements)<block_start><try_stmt><block_start>parseable_requirements.add(pkg_resources.Requirement.parse(requirement))<block_end><except_stmt>ValueError<block_start><continue><block_end><block_end><return>parseable_requirements<block_end>
""" Simple plot In this section, we want to draw the cosine and sine functions on the same plot. Starting from the default settings, we'll enrich the figure step by step to make it nicer. First step is to get the data for the sine and cosine functions: :lesson goal file: goal01.py """<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep>x=np.linspace(-np.pi np.pi 256 endpoint=<true>)<line_sep>c,s=np.cos(x) np.sin(x)<line_sep># x is now a numpy array with 256 values ranging from -pi to +pi # (included). c is the cosine (256 values) and s is the sine # (256 values). # To see the plot in PyCharm, first run this file normally. # That should show the plot in a new window. If it shows up in # the tool window inside PyCharm, you should probably disable # the Python Scientific mode under File: Settings. # Next, choose Run: Start Live Turtle. That should show you two # plots: the current plot and the goal plot. # Can you add the sine data to make the first plot match the # second one? plt.plot(x c)# Copy this line and change it. # Once they match exactly, the goal plot should disappear. # Then you can open lesson 2. plt.show()<line_sep>
<import_from_stmt>app.models.agent Agent<import_from_stmt>app.models.agent_config AgentConfig<import_from_stmt>app.models.agent_script AgentScript<import_from_stmt>app.models.config_item ConfigItem<import_from_stmt>app.models.natlas_services NatlasServices<import_from_stmt>app.models.rescan_task RescanTask<import_from_stmt>app.models.scope_item ScopeItem<import_from_stmt>app.models.tag Tag<import_from_stmt>app.models.user User<import_from_stmt>app.models.user_invitation UserInvitation<import_from_stmt>app.models.scope_log ScopeLog<line_sep>__all__=["Agent" "AgentConfig" "AgentScript" "ConfigItem" "NatlasServices" "RescanTask" "ScopeItem" "ScopeLog" "Tag" "User" "UserInvitation" ]<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>RecoPixelVertexing.PixelTrackFitting.pixelTracks_cfi pixelTracks<as>_pixelTracks<import_from_stmt>RecoTauTag.HLTProducers.trackingRegionsFromBeamSpotAndL2Tau_cfi trackingRegionsFromBeamSpotAndL2Tau<line_sep># Note from new seeding framework migration # Previously the TrackingRegion was set as a parameter of PixelTrackProducer # Now the TrackingRegion EDProducer must be inserted in a sequence, and set as an input to HitPairEDProducer pixelTracksL2Tau=_pixelTracks.clone(passLabel='pixelTracksL2Tau')<line_sep>
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. <import_stmt>importlib<import_stmt>os<import_from_stmt>.fairseq_criterion FairseqCriterion<line_sep>CRITERION_REGISTRY={}<line_sep>CRITERION_CLASS_NAMES=set()<def_stmt>build_criterion args task<block_start><return>CRITERION_REGISTRY[args.criterion](args task)<block_end><def_stmt>register_criterion name<block_start>"""Decorator to register a new criterion."""<def_stmt>register_criterion_cls cls<block_start><if_stmt>name<in>CRITERION_REGISTRY<block_start><raise>ValueError('Cannot register duplicate criterion ({})'.format(name))<block_end><if_stmt><not>issubclass(cls FairseqCriterion)<block_start><raise>ValueError('Criterion ({}: {}) must extend FairseqCriterion'.format(name cls.__name__))<block_end><if_stmt>cls.__name__<in>CRITERION_CLASS_NAMES# We use the criterion class name as a unique identifier in # checkpoints, so all criterions must have unique class names. <block_start><raise>ValueError('Cannot register criterion with duplicate class name ({})'.format(cls.__name__))<block_end>CRITERION_REGISTRY[name]=cls<line_sep>CRITERION_CLASS_NAMES.add(cls.__name__)<line_sep><return>cls<block_end><return>register_criterion_cls<block_end># automatically import any Python files in the criterions/ directory <for_stmt>file os.listdir(os.path.dirname(__file__))<block_start><if_stmt>file.endswith('.py')<and><not>file.startswith('_')<block_start>module=file[:file.find('.py')]<line_sep>importlib.import_module('fairseq.criterions.'+module)<block_end><block_end>
<import_from_future_stmt> print_function<import_stmt>sys<import_stmt>os<import_stmt>shutil<import_stmt>time<import_stmt>argparse<import_stmt>logging<import_stmt>hashlib<import_stmt>copy<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_stmt>torch.optim<as>optim<import_stmt>torch.backends.cudnn<as>cudnn<import_stmt>numpy<as>np<import_stmt>sparselearning<import_from_stmt>sparselearning.core Masking CosineDecay LinearDecay<import_from_stmt>sparselearning.models AlexNet VGG16 LeNet_300_100 LeNet_5_Caffe WideResNet<import_from_stmt>sparselearning.utils get_mnist_dataloaders get_cifar10_dataloaders plot_class_feature_histograms<import_from_stmt>extensions magnitude_variance_pruning variance_redistribution<line_sep>cudnn.benchmark=<true><line_sep>cudnn.deterministic=<true><if_stmt><not>os.path.exists('./models')<block_start>os.mkdir('./models')<block_end><if_stmt><not>os.path.exists('./logs')<block_start>os.mkdir('./logs')<block_end>logger=<none><line_sep>models={}<line_sep>models['lenet5']=(LeNet_5_Caffe [])<line_sep>models['lenet300-100']=(LeNet_300_100 [])<line_sep>models['alexnet-s']=(AlexNet ['s' 10])<line_sep>models['alexnet-b']=(AlexNet ['b' 10])<line_sep>models['vgg-c']=(VGG16 ['C' 10])<line_sep>models['vgg-d']=(VGG16 ['D' 10])<line_sep>models['vgg-like']=(VGG16 ['like' 10])<line_sep>models['wrn-28-2']=(WideResNet [28 2 10 0.3])<line_sep>models['wrn-22-8']=(WideResNet [22 8 10 0.3])<line_sep>models['wrn-16-8']=(WideResNet [16 8 10 0.3])<line_sep>models['wrn-16-10']=(WideResNet [16 10 10 0.3])<def_stmt>setup_logger args<block_start><global>logger<if_stmt>logger<eq><none><block_start>logger=logging.getLogger()<block_end><else_stmt># wish there was a logger.close() <block_start><for_stmt>handler logger.handlers[:]# make a copy of the list <block_start>logger.removeHandler(handler)<block_end><block_end>args_copy=copy.deepcopy(args)<line_sep># copy to get a clean hash # use the same log file hash if iterations or verbose are different # these flags do not change the results args_copy.iters=1<line_sep>args_copy.verbose=<false><line_sep>args_copy.log_interval=1<line_sep>args_copy.seed=0<line_sep>log_path='./logs/{0}_{1}_{2}.log'.format(args.model args.density hashlib.md5(str(args_copy).encode('utf-8')).hexdigest()[:8])<line_sep>logger.setLevel(logging.INFO)<line_sep>formatter=logging.Formatter(fmt='%(asctime)s: %(message)s' datefmt='%H:%M:%S')<line_sep>fh=logging.FileHandler(log_path)<line_sep>fh.setFormatter(formatter)<line_sep>logger.addHandler(fh)<block_end><def_stmt>print_and_log msg<block_start><global>logger<line_sep>print(msg)<line_sep>logger.info(msg)<block_end><def_stmt>train args model device train_loader optimizer epoch lr_scheduler mask=<none><block_start>model.train()<for_stmt>batch_idx,(data target) enumerate(train_loader)<block_start><if_stmt>lr_scheduler<is><not><none><block_start>lr_scheduler.step()<block_end>data,target=data.to(device) target.to(device)<if_stmt>args.fp16<block_start>data=data.half()<block_end>optimizer.zero_grad()<line_sep>output=model(data)<line_sep>loss=F.nll_loss(output target)<if_stmt>args.fp16<block_start>optimizer.backward(loss)<block_end><else_stmt><block_start>loss.backward()<block_end><if_stmt>mask<is><not><none><block_start>mask.step()<block_end><else_stmt><block_start>optimizer.step()<block_end><if_stmt>batch_idx%args.log_interval<eq>0<block_start>print_and_log('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch batch_idx<times>len(data) len(train_loader)<times>args.batch_size 100.<times>batch_idx/len(train_loader) loss.item()))<block_end><block_end><block_end><def_stmt>evaluate args model device test_loader is_test_set=<false><block_start>model.eval()<line_sep>test_loss=0<line_sep>correct=0<line_sep>n=0<with_stmt>torch.no_grad()<block_start><for_stmt>data,target test_loader<block_start>data,target=data.to(device) target.to(device)<if_stmt>args.fp16<block_start>data=data.half()<block_end>model.t=target<line_sep>output=model(data)<line_sep>test_loss<augadd>F.nll_loss(output target reduction='sum').item()# sum up batch loss pred=output.argmax(dim=1 keepdim=<true>)# get the index of the max log-probability correct<augadd>pred.eq(target.view_as(pred)).sum().item()<line_sep>n<augadd>target.shape[0]<block_end><block_end>test_loss<augdiv>float(n)<line_sep>print_and_log('\n{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format('Test evaluation'<if>is_test_set<else>'Evaluation' test_loss correct n 100.<times>correct/float(n)))<line_sep><return>correct/float(n)<block_end><def_stmt>save_checkpoint state is_best filename='checkpoint.pth.tar'<block_start>torch.save(state filename)<if_stmt>is_best<block_start>shutil.copyfile(filename 'model_best.pth.tar')<block_end><block_end><def_stmt>main # Training settings <block_start>parser=argparse.ArgumentParser(description='PyTorch MNIST Example')<line_sep>parser.add_argument('--batch-size' type=int default=100 metavar='N' help='input batch size for training (default: 100)')<line_sep>parser.add_argument('--test-batch-size' type=int default=100 metavar='N' help='input batch size for testing (default: 100)')<line_sep>parser.add_argument('--epochs' type=int default=100 metavar='N' help='number of epochs to train (default: 100)')<line_sep>parser.add_argument('--lr' type=float default=0.1 metavar='LR' help='learning rate (default: 0.1)')<line_sep>parser.add_argument('--momentum' type=float default=0.9 metavar='M' help='SGD momentum (default: 0.9)')<line_sep>parser.add_argument('--no-cuda' action='store_true' default=<false> help='disables CUDA training')<line_sep>parser.add_argument('--seed' type=int default=17 metavar='S' help='random seed (default: 17)')<line_sep>parser.add_argument('--log-interval' type=int default=100 metavar='N' help='how many batches to wait before logging training status')<line_sep>parser.add_argument('--optimizer' type=str default='sgd' help='The optimizer to use. Default: sgd. Options: sgd, adam.')<line_sep>parser.add_argument('--save-model' type=str default='./models/model.pt' help='For Saving the current Model')<line_sep>parser.add_argument('--data' type=str default='mnist')<line_sep>parser.add_argument('--decay_frequency' type=int default=25000)<line_sep>parser.add_argument('--l1' type=float default=0.0)<line_sep>parser.add_argument('--fp16' action='store_true' help='Run in fp16 mode.')<line_sep>parser.add_argument('--valid_split' type=float default=0.1)<line_sep>parser.add_argument('--resume' type=str)<line_sep>parser.add_argument('--start-epoch' type=int default=1)<line_sep>parser.add_argument('--model' type=str default='')<line_sep>parser.add_argument('--l2' type=float default=5.0e-4)<line_sep>parser.add_argument('--iters' type=int default=1 help='How many times the model should be run after each other. Default=1')<line_sep>parser.add_argument('--save-features' action='store_true' help='Resumes a saved model and saves its feature data to disk for plotting.')<line_sep>parser.add_argument('--bench' action='store_true' help='Enables the benchmarking of layers and estimates sparse speedups')<line_sep>parser.add_argument('--max-threads' type=int default=10 help='How many threads to use for data loading.')<line_sep>parser.add_argument('--decay-schedule' type=str default='cosine' help='The decay schedule for the pruning rate. Default: cosine. Choose from: cosine, linear.')<line_sep>sparselearning.core.add_sparse_args(parser)<line_sep>args=parser.parse_args()<line_sep>setup_logger(args)<line_sep>print_and_log(args)<if_stmt>args.fp16<block_start><try_stmt><block_start><import_from_stmt>apex.fp16_utils FP16_Optimizer<block_end><except_stmt><block_start>print('WARNING: apex not installed, ignoring --fp16 option')<line_sep>args.fp16=<false><block_end><block_end>use_cuda=<not>args.no_cuda<and>torch.cuda.is_available()<line_sep>device=torch.device("cuda"<if>use_cuda<else>"cpu")<line_sep>print_and_log('\n\n')<line_sep>print_and_log('='<times>80)<line_sep>torch.manual_seed(args.seed)<for_stmt>i range(args.iters)<block_start>print_and_log("\nIteration start: {0}/{1}\n".format(i+1 args.iters))<if_stmt>args.data<eq>'mnist'<block_start>train_loader,valid_loader,test_loader=get_mnist_dataloaders(args validation_split=args.valid_split)<block_end><else_stmt><block_start>train_loader,valid_loader,test_loader=get_cifar10_dataloaders(args args.valid_split max_threads=args.max_threads)<block_end><if_stmt>args.model<not><in>models<block_start>print('You need to select an existing model via the --model argument. Available models include: ')<for_stmt>key models<block_start>print('\t{0}'.format(key))<block_end><raise>Exception('You need to select a model')<block_end><else_stmt><block_start>cls,cls_args=models[args.model]<line_sep>model=cls(*(cls_args+[args.save_features args.bench])).to(device)<line_sep>print_and_log(model)<line_sep>print_and_log('='<times>60)<line_sep>print_and_log(args.model)<line_sep>print_and_log('='<times>60)<line_sep>print_and_log('='<times>60)<line_sep>print_and_log('Prune mode: {0}'.format(args.prune))<line_sep>print_and_log('Growth mode: {0}'.format(args.growth))<line_sep>print_and_log('Redistribution mode: {0}'.format(args.redistribution))<line_sep>print_and_log('='<times>60)<block_end># add custom prune/growth/redisribution here <if_stmt>args.prune<eq>'magnitude_variance'<block_start>print('Using magnitude-variance pruning. Switching to Adam optimizer...')<line_sep>args.prune=magnitude_variance_pruning<line_sep>args.optimizer='adam'<block_end><if_stmt>args.redistribution<eq>'variance'<block_start>print('Using variance redistribution. Switching to Adam optimizer...')<line_sep>args.redistribution=variance_redistribution<line_sep>args.optimizer='adam'<block_end>optimizer=<none><if_stmt>args.optimizer<eq>'sgd'<block_start>optimizer=optim.SGD(model.parameters() lr=args.lr momentum=args.momentum weight_decay=args.l2 nesterov=<true>)<block_end><elif_stmt>args.optimizer<eq>'adam'<block_start>optimizer=optim.Adam(model.parameters() lr=args.lr weight_decay=args.l2)<block_end><else_stmt><block_start>print('Unknown optimizer: {0}'.format(args.optimizer))<line_sep><raise>Exception('Unknown optimizer.')<block_end>lr_scheduler=optim.lr_scheduler.StepLR(optimizer args.decay_frequency gamma=0.1)<if_stmt>args.resume<block_start><if_stmt>os.path.isfile(args.resume)<block_start>print_and_log("=> loading checkpoint '{}'".format(args.resume))<line_sep>checkpoint=torch.load(args.resume)<line_sep>args.start_epoch=checkpoint['epoch']<line_sep>model.load_state_dict(checkpoint['state_dict'])<line_sep>optimizer.load_state_dict(checkpoint['optimizer'])<line_sep>print_and_log("=> loaded checkpoint '{}' (epoch {})".format(args.resume checkpoint['epoch']))<line_sep>print_and_log('Testing...')<line_sep>evaluate(args model device test_loader)<line_sep>model.feats=[]<line_sep>model.densities=[]<line_sep>plot_class_feature_histograms(args model device train_loader optimizer)<block_end><else_stmt><block_start>print_and_log("=> no checkpoint found at '{}'".format(args.resume))<block_end><block_end><if_stmt>args.fp16<block_start>print('FP16')<line_sep>optimizer=FP16_Optimizer(optimizer static_loss_scale=<none> dynamic_loss_scale=<true> dynamic_loss_args={'init_scale':2<power>16})<line_sep>model=model.half()<block_end>mask=<none><if_stmt><not>args.dense<block_start><if_stmt>args.decay_schedule<eq>'cosine'<block_start>decay=CosineDecay(args.prune_rate len(train_loader)<times>(args.epochs))<block_end><elif_stmt>args.decay_schedule<eq>'linear'<block_start>decay=LinearDecay(args.prune_rate len(train_loader)<times>(args.epochs))<block_end>mask=Masking(optimizer decay prune_rate=args.prune_rate prune_mode=args.prune growth_mode=args.growth redistribution_mode=args.redistribution verbose=args.verbose fp16=args.fp16)<line_sep>mask.add_module(model density=args.density)<block_end><for_stmt>epoch range(1 args.epochs+1)<block_start>t0=time.time()<line_sep>train(args model device train_loader optimizer epoch lr_scheduler mask)<if_stmt>args.valid_split<g>0.0<block_start>val_acc=evaluate(args model device valid_loader)<block_end>save_checkpoint({'epoch':epoch+1 'state_dict':model.state_dict() 'optimizer':optimizer.state_dict()} is_best=<false> filename=args.save_model)<if_stmt><not>args.dense<and>epoch<l>args.epochs<block_start>mask.at_end_of_epoch()<block_end>print_and_log('Current learning rate: {0}. Time taken for epoch: {1:.2f} seconds.\n'.format(optimizer.param_groups[0]['lr'] time.time()-t0))<block_end>evaluate(args model device test_loader is_test_set=<true>)<line_sep>print_and_log("\nIteration end: {0}/{1}\n".format(i+1 args.iters))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>os<import_stmt>pytest<import_from_stmt>django.core management<import_stmt>static_precompiler.settings<import_from_stmt>static_precompiler.management.commands compilestatic<def_stmt>test_get_scanned_dirs <block_start><assert_stmt>compilestatic.get_scanned_dirs()<eq>sorted([os.path.join(os.path.dirname(__file__) "compilestatic") os.path.join(os.path.dirname(__file__) "staticfiles_dir") os.path.join(os.path.dirname(__file__) "staticfiles_dir_with_prefix") static_precompiler.settings.STATIC_ROOT ])<block_end>@[email protected]("verbosity" (0 1 ) )<def_stmt>test_compilestatic_command verbosity capsys monkeypatch tmpdir<block_start>monkeypatch.setattr("static_precompiler.management.commands.compilestatic.get_scanned_dirs" <lambda>:(os.path.join(os.path.dirname(__file__) "compilestatic") ) )<line_sep>monkeypatch.setattr("static_precompiler.settings.ROOT" tmpdir.strpath)<line_sep>management.call_command("compilestatic" verbosity=verbosity)<line_sep>output_path=os.path.join(tmpdir.strpath static_precompiler.settings.OUTPUT_DIR)<line_sep>compiled_files=[]<for_stmt>root,dirs,files os.walk(output_path)<block_start><for_stmt>filename files<block_start>compiled_files.append(os.path.join(root[len(output_path):].lstrip("/") filename))<block_end><block_end>compiled_files.sort()<assert_stmt>compiled_files<eq>["coffee/test.js" "less/test.css" "scss/test.css" ]<line_sep>stdout,_=capsys.readouterr()<if_stmt>verbosity<ge>1<block_start><assert_stmt>stdout<eq>("Compiled 'coffee/test.coffee' to 'COMPILED/coffee/test.js'\n"<concat>"Compiled 'less/test.less' to 'COMPILED/less/test.css'\n"<concat>"Compiled 'scss/test.scss' to 'COMPILED/scss/test.css'\n")<block_end><else_stmt><block_start><assert_stmt>stdout<eq>""<block_end><block_end>@pytest.mark.skip("Re-enable when pytest-django>3.1.2 is released")@pytest.mark.django_db<def_stmt>test_ignore_dependencies_option django_assert_num_queries monkeypatch tmpdir<block_start>monkeypatch.setattr("static_precompiler.management.commands.compilestatic.get_scanned_dirs" <lambda>:(os.path.join(os.path.dirname(__file__) "compilestatic") ) )<line_sep>monkeypatch.setattr("static_precompiler.settings.ROOT" tmpdir.strpath)<with_stmt>django_assert_num_queries(0)<block_start>management.call_command("compilestatic" ignore_dependencies=<true>)<block_end><block_end>@pytest.mark.django_db<def_stmt>test_delete_stale_files monkeypatch tmpdir<block_start>output_path=os.path.join(tmpdir.strpath static_precompiler.settings.OUTPUT_DIR)<if_stmt><not>os.path.exists(output_path)<block_start>os.makedirs(output_path)<block_end>unmanaged_file=os.path.join(tmpdir.strpath "unmanaged.js")<with_stmt>open(unmanaged_file "w+")<as>f<block_start>f.write("unmanaged")<block_end><with_stmt>open(os.path.join(output_path "stale.js") "w+")<as>f<block_start>f.write("stale")<block_end>monkeypatch.setattr("static_precompiler.management.commands.compilestatic.get_scanned_dirs" <lambda>:(os.path.join(os.path.dirname(__file__) "compilestatic") ) )<line_sep>monkeypatch.setattr("static_precompiler.settings.ROOT" tmpdir.strpath)<line_sep>management.call_command("compilestatic" delete_stale_files=<true>)<line_sep>compiled_files=[]<for_stmt>root,dirs,files os.walk(output_path)<block_start><for_stmt>filename files<block_start>compiled_files.append(os.path.join(root[len(output_path):].lstrip("/") filename))<block_end><block_end>compiled_files.sort()<assert_stmt>compiled_files<eq>["coffee/test.js" "less/test.css" "scss/test.css" ]<line_sep># Files outside of `COMPILED` directory are untouched <assert_stmt>os.path.exists(unmanaged_file)<block_end>
# Data sources database(thermoLibraries=['primaryThermoLibrary'] reactionLibraries=[] seedMechanisms=[] kineticsDepositories=['training'] kineticsFamilies='default' kineticsEstimator='rate rules' )<line_sep># List of species species(label='ethane' reactive=<true> structure=SMILES("CC") )<line_sep>species(label='O2' reactive=<true> structure=SMILES('[O][O]'))<line_sep>species(label='N2' reactive=<false> structure=SMILES('N#N') )<line_sep># Reaction systems simpleReactor(temperature=[(1000 'K') (1500 'K')] pressure=[(1.0 'bar') (10.0 'bar')] nSims=3 initialMoleFractions={"ethane":[0.05 0.15] "O2":0.1 "N2":0.9 } terminationConversion={'ethane':0.1 } terminationTime=(1e1 's') balanceSpecies="N2" )<line_sep>simulator(atol=1e-16 rtol=1e-8 )<line_sep>model(toleranceKeepInEdge=0.001 toleranceMoveToCore=0.01 toleranceInterruptSimulation=1e8 maximumEdgeSpecies=20 filterReactions=<true> minCoreSizeForPrune=5 )<line_sep>options(units='si' generateOutputHTML=<false> generatePlots=<false> saveEdgeSpecies=<false> saveSimulationProfiles=<false> )<line_sep>
# # Copyright (c) 2008 <NAME> <EMAIL> # # forms.py 31-Jul-2011 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # under the License. # # """ .. _WTForms: http://wtforms.simplecodes.com/ A simple wrapper for WTForms_. Basically we only need to map the request handler's `arguments` to the `wtforms.form.Form` input. Quick example:: from wtforms import TextField, validators from tornadotools.forms import Form class SampleForm(Form): username = TextField('Username', [ validators.Length(min=4, message="Too short") ]) email = TextField('Email', [ validators.Length(min=4, message="Not a valid mail address"), validators.Email() ]) Then, in the `RequestHandler`:: def get(self): form = SampleForm(self) if form.validate(): # do something with form.username or form.email pass self.render('template.html', form=form) """<import_from_stmt>wtforms Form<class_stmt>Form(Form)<block_start>""" `WTForms` wrapper for Tornado. """<def_stmt>__init__ self formdata=<none> obj=<none> prefix='' **kwargs<block_start>""" Wrap the `formdata` with the `TornadoInputWrapper` and call the base constuctor. """<line_sep>self._handler=formdata<line_sep>super(Form self).__init__(TornadoInputWrapper(formdata) obj=obj prefix=prefix **kwargs)<block_end><def_stmt>_get_translations self<block_start><return>TornadoLocaleWrapper(self._handler.get_user_locale())<block_end><block_end><class_stmt>TornadoInputWrapper(object)<block_start><def_stmt>__init__ self handler<block_start>self._handler=handler<block_end><def_stmt>__iter__ self<block_start><return>iter(self._handler.request.arguments)<block_end><def_stmt>__len__ self<block_start><return>len(self._handler.request.arguments)<block_end><def_stmt>__contains__ self name<block_start><return>(name<in>self._handler.request.arguments)<block_end><def_stmt>getlist self name<block_start><return>self._handler.get_arguments(name)<block_end><block_end><class_stmt>TornadoLocaleWrapper(object)<block_start><def_stmt>__init__ self locale<block_start>self.locale=locale<block_end><def_stmt>gettext self message<block_start><return>self.locale.translate(message)<if>self.locale<else>message<block_end><def_stmt>ngettext self message plural_message count<block_start><return>self.locale.translate(message plural_message count)<if>self.locale<else>message<block_end><block_end>
<import_from_stmt>setuptools setup<line_sep>setup(name="rich-click" install_requires=["click>=7" "rich>=10.7.0" "importlib-metadata; python_version < '3.8'" ] extras_require={"typer":"typer>=0.4" "dev":"pre-commit" } package_data={"rich_click":["py.typed"]} )<line_sep>
# Copyright (C) The Arvados Authors. All rights reserved. # # SPDX-License-Identifier: Apache-2.0 <import_from_future_stmt> absolute_import<import_stmt>arvados<import_stmt>sys<import_from_stmt>. run_test_server<import_from_stmt>. arvados_testutil<as>tutil<import_from_stmt>. manifest_examples<import_from_stmt>.performance.performance_profiler profiled<class_stmt>CollectionBenchmark(run_test_server.TestCaseWithServers tutil.ArvadosBaseTestCase manifest_examples.ManifestExamples)<block_start>MAIN_SERVER={}<line_sep>TEST_BLOCK_SIZE=0<line_sep>@classmethod<def_stmt>list_recursive cls coll parent_name=<none><block_start><if_stmt>parent_name<is><none><block_start>current_name=coll.stream_name()<block_end><else_stmt><block_start>current_name='{}/{}'.format(parent_name coll.name)<block_end><try_stmt><block_start><for_stmt>name coll<block_start><for_stmt>item cls.list_recursive(coll[name] current_name)<block_start><yield>item<block_end><block_end><block_end><except_stmt>TypeError<block_start><yield>current_name<block_end><block_end>@classmethod<def_stmt>setUpClass cls<block_start>super(CollectionBenchmark cls).setUpClass()<line_sep>run_test_server.authorize_with('active')<line_sep>cls.api_client=arvados.api('v1')<line_sep>cls.keep_client=arvados.KeepClient(api_client=cls.api_client local_store=cls.local_store)<block_end>@profiled<def_stmt>profile_new_collection_from_manifest self manifest_text<block_start><return>arvados.collection.Collection(manifest_text)<block_end>@profiled<def_stmt>profile_new_collection_from_server self uuid<block_start><return>arvados.collection.Collection(uuid)<block_end>@profiled<def_stmt>profile_new_collection_copying_bytes_from_collection self src<block_start>dst=arvados.collection.Collection()<with_stmt>tutil.mock_keep_responses('x'<times>self.TEST_BLOCK_SIZE 200)<block_start><for_stmt>name self.list_recursive(src)<block_start><with_stmt>src.open(name 'rb')<as>srcfile dst.open(name 'wb')<as>dstfile<block_start>dstfile.write(srcfile.read())<block_end><block_end>dst.save_new()<block_end><block_end>@profiled<def_stmt>profile_new_collection_copying_files_from_collection self src<block_start>dst=arvados.collection.Collection()<with_stmt>tutil.mock_keep_responses('x'<times>self.TEST_BLOCK_SIZE 200)<block_start><for_stmt>name self.list_recursive(src)<block_start>dst.copy(name name src)<block_end>dst.save_new()<block_end><block_end>@profiled<def_stmt>profile_collection_list_files self coll<block_start><return>sum(1<for>name self.list_recursive(coll))<block_end><def_stmt>test_medium_sized_manifest self<block_start>"""Exercise manifest-handling code. Currently, this test puts undue emphasis on some code paths that don't reflect typical use because the contrived example manifest has some unusual characteristics: * Block size is zero. * Every block is identical, so block caching patterns are unrealistic. * Every file begins and ends at a block boundary. """<line_sep>specs={'streams':100 'files_per_stream':100 'blocks_per_file':20 'bytes_per_block':self.TEST_BLOCK_SIZE }<line_sep>my_manifest=self.make_manifest(**specs)<line_sep>coll=self.profile_new_collection_from_manifest(my_manifest)<line_sep>coll.save_new()<line_sep>self.profile_new_collection_from_server(coll.manifest_locator())<line_sep>num_items=self.profile_collection_list_files(coll)<line_sep>self.assertEqual(num_items specs['streams']<times>specs['files_per_stream'])<line_sep>self.profile_new_collection_copying_bytes_from_collection(coll)<line_sep>self.profile_new_collection_copying_files_from_collection(coll)<block_end><block_end>
{"targets":[{"target_name":"cld-c" "type":"static_library" "include_dirs":["internal" ] "sources":["internal/cldutil.cc" "internal/cldutil_shared.cc" "internal/compact_lang_det.cc" "internal/compact_lang_det_hint_code.cc" "internal/compact_lang_det_impl.cc" "internal/debug.cc" "internal/fixunicodevalue.cc" "internal/generated_entities.cc" "internal/generated_language.cc" "internal/generated_ulscript.cc" "internal/getonescriptspan.cc" "internal/lang_script.cc" "internal/offsetmap.cc" "internal/scoreonescriptspan.cc" "internal/tote.cc" "internal/utf8statetable.cc" "internal/cld_generated_cjk_uni_prop_80.cc" "internal/cld2_generated_cjk_compatible.cc" "internal/cld_generated_cjk_delta_bi_32.cc" "internal/generated_distinct_bi_0.cc" "internal/cld2_generated_quad0122.cc" "internal/cld2_generated_deltaocta0122.cc" "internal/cld2_generated_deltaoctachrome.cc" "internal/cld2_generated_distinctocta0122.cc" "internal/cld2_generated_distinctoctachrome.cc" "internal/cld2_generated_quadchrome_16.cc" "internal/cld2_generated_quadchrome_2.cc" "internal/cld_generated_score_quad_octa_0122.cc" "internal/cld_generated_score_quad_octa_2.cc"] "defines":[] "cflags_cc":["-w" "-std=gnu++98"] "cflags_cc!":["-std=gnu++0x"] "link_settings":{"ldflags":["-z" "muldefs"]} "xcode_settings":{"OTHER_CFLAGS":["-w"] 'CLANG_CXX_LANGUAGE_STANDARD':'c++98'}}]}<line_sep>
<import_from_stmt>pythonforandroid.recipe PythonRecipe<class_stmt>BabelRecipe(PythonRecipe)<block_start>name='babel'<line_sep>version='2.2.0'<line_sep>url='https://pypi.python.org/packages/source/B/Babel/Babel-{version}.tar.gz'<line_sep>depends=['setuptools' 'pytz']<line_sep>call_hostpython_via_targetpython=<false><line_sep>install_in_hostpython=<true><block_end>recipe=BabelRecipe()<line_sep>
<import_from_future_stmt> absolute_import division print_function unicode_literals<import_stmt>argparse<import_stmt>sys<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>yaml<import_from_stmt>recognition.backbones.resnet_v1 ResNet_v1_50<import_from_stmt>recognition.data.generate_data GenerateData<import_from_stmt>recognition.models.models MyModel<import_from_stmt>recognition.predict get_embeddings<line_sep>tf.enable_eager_execution()<class_stmt>Valid_Data<block_start><def_stmt>__init__ self model data<block_start>self.model=model<line_sep>self.data=data<block_end>@staticmethod<def_stmt>_cal_cos_sim emb1 emb2<block_start><return>tf.reduce_sum(emb1<times>emb2 axis=-1)<block_end><def_stmt>_get_sim_label self<block_start>sims=<none><line_sep>labels=<none><for_stmt>image1,image2,label self.data<block_start>emb1=get_embeddings(self.model image1)<line_sep>emb2=get_embeddings(self.model image2)<line_sep>sim=self._cal_cos_sim(emb1 emb2)<if_stmt>sims<is><none><block_start>sims=sim<block_end><else_stmt><block_start>sims=tf.concat([sims sim] axis=0)<block_end><if_stmt>labels<is><none><block_start>labels=label<block_end><else_stmt><block_start>labels=tf.concat([labels label] axis=0)<block_end><block_end><return>sims labels<block_end>@staticmethod<def_stmt>_cal_metric sim label thresh<block_start>tp=tn=fp=fn=0<line_sep>predict=tf.greater_equal(sim thresh)<for_stmt>i range(len(predict))<block_start><if_stmt>predict[i]<and>label[i]<block_start>tp<augadd>1<block_end><elif_stmt>predict[i]<and><not>label[i]<block_start>fp<augadd>1<block_end><elif_stmt><not>predict[i]<and>label[i]<block_start>fn<augadd>1<block_end><else_stmt><block_start>tn<augadd>1<block_end><block_end>acc=(tp+tn)/len(predict)<line_sep>p=0<if>tp+fp<eq>0<else>tp/(tp+fp)<line_sep>r=0<if>tp+fn<eq>0<else>tp/(tp+fn)<line_sep>fpr=0<if>fp+tn<eq>0<else>fp/(fp+tn)<line_sep><return>acc p r fpr<block_end><def_stmt>_cal_metric_fpr self sim label below_fpr=0.001<block_start>acc=p=r=thresh=0<for_stmt>t np.linspace(-1 1 100)<block_start>thresh=t<line_sep>acc,p,r,fpr=self._cal_metric(sim label thresh)<if_stmt>fpr<le>below_fpr<block_start><break><block_end><block_end><return>acc p r thresh<block_end><def_stmt>get_metric self thresh=0.2 below_fpr=0.001<block_start>sim,label=self._get_sim_label()<line_sep>acc,p,r,fpr=self._cal_metric(sim label thresh)<line_sep>acc_fpr,p_fpr,r_fpr,thresh_fpr=self._cal_metric_fpr(sim label below_fpr)<line_sep><return>acc p r fpr acc_fpr p_fpr r_fpr thresh_fpr<block_end><def_stmt>draw_curve self<block_start>P=[]<line_sep>R=[]<line_sep>TPR=[]<line_sep>FPR=[]<line_sep>sim,label=self._get_sim_label()<for_stmt>thresh np.linspace(-1 1 100)<block_start>acc,p,r,fpr=self._cal_metric(sim label thresh)<line_sep>P.append(p)<line_sep>R.append(r)<line_sep>TPR.append(r)<line_sep>FPR.append(fpr)<block_end>plt.axis([0 1 0 1])<line_sep>plt.xlabel("R")<line_sep>plt.ylabel("P")<line_sep>plt.plot(R P color="r" linestyle="--" marker="*" linewidth=1.0)<line_sep>plt.show()<line_sep>plt.axis([0 1 0 1])<line_sep>plt.xlabel("FRP")<line_sep>plt.ylabel("TPR")<line_sep>plt.plot(FPR TPR color="r" linestyle="--" marker="*" linewidth=1.0)<line_sep>plt.show()<block_end><block_end><def_stmt>parse_args argv<block_start>parser=argparse.ArgumentParser(description='valid model')<line_sep>parser.add_argument('--config_path' type=str help='path to config path' default='configs/config.yaml')<line_sep>args=parser.parse_args(argv)<line_sep><return>args<block_end><def_stmt>main <block_start>args=parse_args(sys.argv[1:])<line_sep># logger.info(args) <with_stmt>open(args.config_path)<as>cfg<block_start>config=yaml.load(cfg Loader=yaml.FullLoader)<block_end>gd=GenerateData(config)<line_sep>valid_data=gd.get_val_data(config['valid_num'])<line_sep>model=MyModel(ResNet_v1_50 embedding_size=config['embedding_size'])<import_stmt>os<line_sep>ckpt_dir=os.path.expanduser(config['ckpt_dir'])<line_sep>ckpt=tf.train.Checkpoint(backbone=model.backbone)<line_sep>ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial()<line_sep>print("Restored from {}".format(tf.train.latest_checkpoint(ckpt_dir)))<line_sep>vd=Valid_Data(model valid_data)<line_sep>acc,p,r,fpr,acc_fpr,p_fpr,r_fpr,thresh_fpr=vd.get_metric(0.2 0.001)<line_sep>print(acc p r fpr acc_fpr p_fpr r_fpr thresh_fpr)<line_sep>vd.draw_curve()<block_end><if_stmt>__name__<eq>'__main__'# logger.info("hello, insightface/recognition") <block_start>main()<block_end>
<import_from_stmt>fuzzconfig FuzzConfig<import_stmt>nonrouting<import_stmt>pytrellis<import_stmt>fuzzloops<import_stmt>interconnect<line_sep>cfg=FuzzConfig(job="GSR" family="ECP5" device="LFE5U-45F" ncl="empty.ncl" tiles=["MIB_R71C4:EFB0_PICB0" "MIB_R34C41:VIQ_BUF"])<def_stmt>get_substs gsrmode="ACTIVE_LOW" syncmode="NONE"<block_start><if_stmt>gsrmode<eq>"NONE"<block_start>comment="//"<block_end><else_stmt><block_start>comment=""<block_end><if_stmt>syncmode<eq>"NONE"<block_start>syncmode="#OFF"<block_end><return>dict(comment=comment gsrmode=gsrmode syncmode=syncmode)<block_end><def_stmt>main <block_start>pytrellis.load_database("../../../database")<line_sep>cfg.setup()<line_sep>empty_bitfile=cfg.build_design(cfg.ncl {})<line_sep>cfg.ncl="gsr.ncl"<line_sep>nonrouting.fuzz_enum_setting(cfg "GSR.GSRMODE" ["NONE" "ACTIVE_LOW" "ACTIVE_HIGH"] <lambda>x:get_substs(gsrmode=x) empty_bitfile <false>)<line_sep>nonrouting.fuzz_enum_setting(cfg "GSR.SYNCMODE" ["NONE" "ASYNC" "SYNC"] <lambda>x:get_substs(syncmode=x) empty_bitfile <false>)<for_stmt>rcfg,rc,prefix [(FuzzConfig(job="GSR" family="ECP5" device="LFE5U-25F" ncl="gsr_routing_25k.ncl" tiles=["MIB_R50C4:EFB0_PICB0"]) "R49C4" "25K_") (FuzzConfig(job="GSR" family="ECP5" device="LFE5U-45F" ncl="gsr_routing.ncl" tiles=["MIB_R71C4:EFB0_PICB0"]) "R70C4" "45K_") (FuzzConfig(job="GSR" family="ECP5" device="LFE5U-85F" ncl="gsr_routing_85k.ncl" tiles=["MIB_R95C4:EFB0_PICB0"]) "R94C4" "85K_") ]<block_start>rcfg.setup()<line_sep>interconnect.fuzz_interconnect_with_netnames(rcfg ["{}_JGSR_GSR".format(rc) "{}_JCLK_GSR".format(rc)] bidir=<true> nonlocal_prefix=prefix)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """quantized_avg_pool test case"""<import_stmt>os<import_stmt>pytest<import_from_stmt>tests.common.base TestBase<import_from_stmt>tests.common.test_run.ascend.quantized_avg_pool_run quantized_avg_pool_run<class_stmt>TestQuantizedAvgPool(TestBase)<block_start>"""test case class for quantized_avg_pool"""<def_stmt>setup self<block_start>case_name="test_akg_quantized_avg_pool_001"<line_sep>case_path=os.getcwd()<line_sep># params init self.params_init(case_name case_path)<line_sep>"""setup case parameters for test"""<line_sep>self.caseresult=<true><line_sep>self._log.info("=================%s Setup case=================" self.casename)<line_sep>self.testarg_mini=[# testflag, opfunc, (shape, dtype1, shape_list, dtype2, # ksize, strides, padding, data_format, # quant_algo, scale_mode, scale_sqrt), dimArgs ("qavgpool_mini_01" quantized_avg_pool_run ((1 1 16 16 16) "float16" ((1 ) (1 )) "float16" (1 1 4 4 1) (1 1 3 3 1) "VALID" "NC1HWC0" [1 0] 2 0)) ("qavgpool_mini_02" quantized_avg_pool_run ((1 1 16 16 16) "float16" ((1 ) (1 )) "float16" (1 1 4 4) (1 1 3 3) "VALID" "NCHW" [1 0] 2 0)) ("qavgpool_mini_03" quantized_avg_pool_run ((1 1 16 16 16) "float16" ((1 ) (1 )) "float16" (1 4 4 1) (1 3 3 1) "VALID" "NHWC" [1 0] 2 0)) ("qavgpool_mini_04" quantized_avg_pool_run ((1 1 16 16 16) "float16" <none> <none> (1 1 4 4 1) (1 1 3 3 1) "VALID" "NC1HWC0" <none> <none> <none>)) ("qavgpool_mini_05" quantized_avg_pool_run ((1 1 16 16 16) "float16" ((1 ) (1 )) "float16" (1 1 4 4 1) (1 1 3 3 1) "VALID" "NC1HWC0" [0 0] 2 0)) ("qavgpool_mini_06" quantized_avg_pool_run ((1 1 16 16 16) "float16" ((1 ) (1 )) "float16" (1 1 4 4 1) (1 1 3 3 1) "SAME" "NC1HWC0" [1 0] 2 0)) ("qavgpool_mini_07" quantized_avg_pool_run ((1 1 16 16 16) "float16" ((1 ) (1 )) "float16" (1 1 4 4) (1 1 3 3) "SAME" "NCHW" [1 0] 2 0)) ("qavgpool_mini_08" quantized_avg_pool_run ((1 1 16 16 16) "float16" ((1 ) (1 )) "float16" (1 4 4 1) (1 3 3 1) "SAME" "NHWC" [1 0] 2 0)) ("qavgpool_mini_09" quantized_avg_pool_run ((1 1 16 16 16) "float16" <none> <none> (1 1 4 4 1) (1 1 3 3 1) "SAME" "NC1HWC0" <none> <none> <none>)) ("qavgpool_mini_10" quantized_avg_pool_run ((1 1 16 16 16) "float16" ((1 ) (1 )) "float16" (1 1 4 4 1) (1 1 3 3 1) "SAME" "NC1HWC0" [0 0] 2 0)) ]<line_sep>self.testarg_cloud=[("qavgpool_mini_05" quantized_avg_pool_run ((1 1 64 64 16) "float16" <none> <none> (1 1 4 4 1) (1 1 3 3 1) "VALID" "NC1HWC0" <none> <none> <none>)) ("qavgpool_mini_05" quantized_avg_pool_run ((1 1 64 64 16) "float16" ((1 ) (1 )) "float16" (1 1 4 4 1) (1 1 3 3 1) "VALID" "NC1HWC0" [0 0] 2 0)) ("qavgpool_cld_big" quantized_avg_pool_run ((32 4 112 112 16) "float16" ((1 ) (1 )) "float16" (1 1 3 3 1) (1 1 2 2 1) "SAME" "NC1HWC0" [0 0] 2 0)) ("qavgpool_cld_big" quantized_avg_pool_run ((32 4 112 112 16) "float16" ((1 ) (1 )) "float16" (1 1 3 3 1) (1 1 2 2 1) "SAME" "NC1HWC0" [1 0] 2 0)) ]<block_end>@[email protected][email protected][email protected]_onecard<def_stmt>test_mini_run self<block_start>"""run case for mini"""<line_sep>self.common_run(self.testarg_mini[0:3])<block_end><def_stmt>test_cloud_run self<block_start>"""run case for cloud"""<line_sep>self.common_run(self.testarg_cloud)<block_end><def_stmt>teardown self<block_start>"""clean environment"""<line_sep>self._log.info("=============%s Teardown===========" self.casename)<block_end><block_end>
<import_stmt>os<import_stmt>time<import_stmt>torch<import_stmt>glob<import_stmt>numpy<as>np<import_from_stmt>torch.utils.data Dataset<import_from_stmt>torch.nn.utils.rnn pad_sequence<import_from_stmt>dataclasses dataclass<import_from_stmt>transformers.data.data_collator DataCollatorMixin<import_from_stmt>fengshen.data.MMapIndexDataset MMapIndexDataset<def_stmt>safe_check a type='uint8'<block_start>d={'uint8':[0 255] 'uint16':[0 65535]}<line_sep>range=d[type]<for_stmt>l a<block_start><for_stmt>e l<block_start><assert_stmt>e<ge>range[0]<and>e<le>range[1]<block_end><block_end><block_end>@dataclass<class_stmt>CBartDataCollator(DataCollatorMixin)<block_start>tokenizer:<none><line_sep>return_tensors:str="pt"<def_stmt>__init__ self args<block_start>self.masked_lm=args.masked_lm<line_sep>self.encoder_loss_type=args.encoder_loss_type<block_end>@staticmethod<def_stmt>create_decoder_inputs encoder_inputs encoder_labels mask_token_id<block_start>""" :param encoder_inputs: list, each element is an int :param encoder_labels: list, each element is an int :return: """<line_sep>decoder_inputs=[]<for_stmt>i,l zip(encoder_inputs encoder_labels)<block_start><if_stmt>l<eq>0<block_start>decoder_inputs.append(i)<block_end><elif_stmt>l<eq>1<block_start>decoder_inputs.append(mask_token_id)<block_end><else_stmt><block_start>decoder_inputs<augadd>[mask_token_id]<times>(l-1)<line_sep>decoder_inputs.append(i)<block_end><block_end><return>torch.tensor(decoder_inputs dtype=torch.long)<block_end>@staticmethod<def_stmt>torch_call self features<block_start>encoder_inputs=[s[0]<for>s features]<line_sep>encoder_labels=[s[1]<for>s features]<line_sep>decoder_labels=[s[2]<for>s features]<line_sep># Mask to avoid performing attention on padding token indices in encoder_inputs. _mask=pad_sequence(encoder_inputs batch_first=<true> padding_value=-100)<line_sep>attention_mask=torch.zeros(_mask.shape dtype=torch.float32)<line_sep>attention_mask=attention_mask.masked_fill(_mask<ne>-100 1)<line_sep>encoder_inputs=pad_sequence(encoder_inputs batch_first=<true> padding_value=self.tokenizer.pad_token_id)<line_sep>encoder_labels=pad_sequence(encoder_labels batch_first=<true> padding_value=-100)<if_stmt>self.encoder_loss_type<eq>1# labels for mse loss <block_start>encoder_labels=encoder_labels.float()<block_end>decoder_labels=pad_sequence(decoder_labels batch_first=<true> padding_value=-100)<line_sep># avoid computing loss on the first token, i.e. bos_token decoder_labels[: 0]=-100<line_sep># this method is for non-autoregressive decoding. decoder_inputs=[self.create_decoder_inputs(s[0] s[1] self.tokenizer.mask_token_id)<for>s features]<line_sep># replace the eos_token_id with pad_token_id <for_stmt>i,_ enumerate(decoder_inputs)<block_start>decoder_inputs[i][-1]=self.tokenizer.pad_token_id<block_end>decoder_inputs=pad_sequence(decoder_inputs batch_first=<true> padding_value=self.tokenizer.pad_token_id)<line_sep># create decoder_inputs by shifting the decoder_labels right, _tmp=decoder_inputs.clone()<line_sep>decoder_inputs[: 1:]=_tmp[: :-1]<line_sep>decoder_inputs[: 0]=self.tokenizer.eos_token_id<line_sep># construct labels for masked lm loss masked_lm_labels=decoder_labels.clone()<line_sep>masked_lm_labels[_tmp<ne>self.tokenizer.mask_token_id]=-100<if_stmt>self.masked_lm<block_start>decoder_labels=masked_lm_labels<block_end><return>{"input_ids":encoder_inputs "encoder_labels":encoder_labels "decoder_input_ids":decoder_inputs "labels":decoder_labels "attention_mask":attention_mask }<block_end><block_end><class_stmt>BARTDataset(Dataset)<block_start><def_stmt>__init__ self dataset mode tokenizer=<none> num_labels=-1 insert_mode=-1 max_sentence_length=40 encoder_loss_type=0 statistics=<true><block_start>self.encoder_loss_type=encoder_loss_type<assert_stmt>mode<in>["train" "test" 'dev']<line_sep>self.mode=mode<if_stmt>self.mode<eq>'test'<or>self.mode<eq>'dev'<block_start>self.is_train=<false><block_end><else_stmt><block_start>self.is_train=<true><block_end>self.tokenizer=tokenizer<line_sep>self.max_sentence_length=max_sentence_length+2# the bos and eos tokens self.input_dataset=[]<line_sep>self.encoder_labels_dataset=[]<line_sep>self.decoder_labels_dataset=[]<line_sep>data_dict_path_format='/cognitive_comp/gaoxinyu/data/{}/{}_synthetic_max_insert_label{}_insert_mode{}_*.pt'.format(dataset mode num_labels-2 insert_mode)<line_sep>data_dict_paths=glob.glob(data_dict_path_format)<for_stmt>data_dict_path data_dict_paths<block_start><if_stmt>os.path.exists(data_dict_path)<block_start>print(f'''Loading data from {data_dict_path}''' flush=<true>)<line_sep>filename=''.join(data_dict_path.rsplit('.pt' 1))<line_sep>self.input_dataset<augadd>[MMapIndexDataset(filename+"_incorrect_input_ids_list")]<line_sep>self.encoder_labels_dataset<augadd>[MMapIndexDataset(filename+"_label_ids_list")]<line_sep>self.decoder_labels_dataset<augadd>[MMapIndexDataset(filename+"_target_ids_list")]<block_end><else_stmt><block_start>print(f'Please create the synthetic datafile {data_dict_path} with create_synthetic_data.py.')<block_end><block_end>self.len=0<for_stmt>ds self.input_dataset<block_start>self.len<augadd>len(ds)<block_end># TODO make sure the encoder loss weighting logic applys to every rank ! <if_stmt>statistics# print('Statistics for sentence length:') # lengths = [len(e) for e in self.decoder_labels] # (unique, counts) = np.unique(lengths, return_counts=True) # for k, v in zip(unique,counts): # print(f'sentence length{k}: {v}') # print('Statistics for sentence labels:') <block_start>labels=[]<line_sep># too slow!! # for ds in self.encoder_labels_dataset: # for i in range(0, len(ds)): # labels.extend(ds.__getitem__(i)) # use only one dataset to calc <for_stmt>i self.encoder_labels_dataset[0]<block_start>labels.extend(i)<block_end>print(len(labels))<line_sep>(unique counts)=np.unique(labels return_counts=<true>)<line_sep>all_label_counts=0<for_stmt>k,v zip(unique counts)<block_start>print(f'Label {k}: {v}')<line_sep>all_label_counts<augadd>v<block_end># ZZ: calculate weights for differnet labels, labels with higher numbers get lower weights proportionally! revert_label_weights=1/np.array([v/all_label_counts<for>k,v zip(unique counts)])<line_sep>self.label_weights=revert_label_weights/np.sum(revert_label_weights)<block_end><else_stmt># ZZ: if statistics is not triggered, manually assign weights to different class <block_start><if_stmt>num_labels<eq>7# the cross entropy loss weighst does not need to sum to 1 <block_start>self.label_weights=[0.01 0.05 0.1 0.1 0.5 0.5 0.5]<block_end><else_stmt><block_start>self.label_weights=[1/num_labels]<times>num_labels<block_end><block_end>print(f"label weights for encoder will be {self.label_weights}")<block_end><def_stmt>__getitem__ self idx<block_start><for_stmt>i range(0 len(self.input_dataset))<block_start><if_stmt>idx<ge>len(self.input_dataset[i])<block_start>idx<augsub>len(self.input_dataset[i])<block_end><else_stmt><block_start><break><block_end><block_end><return>torch.tensor(self.input_dataset[i].__getitem__(idx) dtype=torch.long) torch.tensor(self.encoder_labels_dataset[i].__getitem__(idx) dtype=torch.long) torch.tensor(self.decoder_labels_dataset[i].__getitem__(idx) dtype=torch.long)<block_end><def_stmt>__len__ self<block_start><return>self.len<block_end><def_stmt>create_decoder_inputs self encoder_inputs encoder_labels mask_token_id<block_start>""" :param encoder_inputs: list, each element is an int :param encoder_labels: list, each element is an int :return: """<line_sep>decoder_inputs=[]<for_stmt>i,l zip(encoder_inputs encoder_labels)<block_start><if_stmt>l<eq>0<block_start>decoder_inputs.append(i)<block_end><elif_stmt>l<eq>1<block_start>decoder_inputs.append(mask_token_id)<block_end><else_stmt><block_start>decoder_inputs<augadd>[mask_token_id]<times>(l-1)<line_sep>decoder_inputs.append(i)<block_end><block_end><return>torch.tensor(decoder_inputs dtype=torch.long)<block_end><def_stmt>create_mini_batch self samples<block_start>encoder_inputs=[s[0]<for>s samples]<line_sep>encoder_labels=[s[1]<for>s samples]<line_sep>decoder_labels=[s[2]<for>s samples]<line_sep># Mask to avoid performing attention on padding token indices in encoder_inputs. _mask=pad_sequence(encoder_inputs batch_first=<true> padding_value=-100)<line_sep>attention_mask=torch.zeros(_mask.shape dtype=torch.float32)<line_sep>attention_mask=attention_mask.masked_fill(_mask<ne>-100 1)<line_sep>encoder_inputs=pad_sequence(encoder_inputs batch_first=<true> padding_value=self.tokenizer.pad_token_id)<line_sep>encoder_labels=pad_sequence(encoder_labels batch_first=<true> padding_value=-100)<if_stmt>self.encoder_loss_type<eq>1# labels for mse loss <block_start>encoder_labels=encoder_labels.float()<block_end>decoder_labels=pad_sequence(decoder_labels batch_first=<true> padding_value=-100)<line_sep># avoid computing loss on the first token, i.e. bos_token decoder_labels[: 0]=-100<line_sep># this method is for non-autoregressive decoding. decoder_inputs=[self.create_decoder_inputs(s[0] s[1] self.tokenizer.mask_token_id)<for>s samples]<line_sep># replace the eos_token_id with pad_token_id <for_stmt>i,_ enumerate(decoder_inputs)<block_start>decoder_inputs[i][-1]=self.tokenizer.pad_token_id<block_end>decoder_inputs=pad_sequence(decoder_inputs batch_first=<true> padding_value=self.tokenizer.pad_token_id)<line_sep># create decoder_inputs by shifting the decoder_labels right, _tmp=decoder_inputs.clone()<line_sep>decoder_inputs[: 1:]=_tmp[: :-1]<line_sep>decoder_inputs[: 0]=self.tokenizer.eos_token_id<line_sep># construct labels for masked lm loss masked_lm_labels=decoder_labels.clone()<line_sep>masked_lm_labels[_tmp<ne>self.tokenizer.mask_token_id]=-100<line_sep><return>{"input_ids":encoder_inputs "encoder_labels":encoder_labels "decoder_input_ids":decoder_inputs "labels":decoder_labels "attention_mask":attention_mask }<block_end><block_end><def_stmt>get_train_dev_dataset args tokenizer<block_start>trainset=BARTDataset(args.dataset "train" tokenizer=tokenizer num_labels=args.num_labels insert_mode=args.insert_mode encoder_loss_type=args.encoder_loss_type)<line_sep>testset=BARTDataset(args.dataset mode='dev' tokenizer=tokenizer num_labels=args.num_labels insert_mode=args.insert_mode encoder_loss_type=args.encoder_loss_type)<line_sep><return>trainset testset<block_end>
<import_stmt>torch<import_stmt>torchvision<import_from_stmt>..models.model Discriminator Generator<import_from_stmt>.backends *<if_stmt>TENSORBOARD_LOGGING<eq>1<block_start><import_from_stmt>tensorboardX SummaryWriter<block_end><if_stmt>VISDOM_LOGGING<eq>1<block_start><import_stmt>visdom<block_end>__all__=["Visualize" "LossVisualize" "MetricVisualize" "GradientVisualize" "ImageVisualize" ]<class_stmt>Visualize(object)<block_start>r"""Base class for all Visualizations. Args: visualize_list (list, optional): List of the functions needed for visualization. visdom_port (int, optional): Port to log using ``visdom``. The visdom server needs to be manually started at this port else an error will be thrown and the code will crash. This is ignored if ``VISDOM_LOGGING`` is ``0``. log_dir (str, optional): Directory where TensorboardX should store the logs. This is ignored if ``TENSORBOARD_LOGGING`` is ``0``. writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you don't want to start a new SummaryWriter. """<def_stmt>__init__ self visualize_list visdom_port=8097 log_dir=<none> writer=<none><block_start>self.logs={}<for_stmt>item visualize_list<block_start>name=type(item).__name__<line_sep>self.logs[name]=[]<block_end>self.step=1<if_stmt>TENSORBOARD_LOGGING<eq>1<block_start>self._build_tensorboard(log_dir writer)<block_end><if_stmt>VISDOM_LOGGING<eq>1<block_start>self._build_visdom(visdom_port)<block_end><block_end><def_stmt>_build_tensorboard self log_dir writer<block_start>r"""Starts the tensorboard logging utilities. Args: log_dir (str, optional): Directory where TensorboardX should store the logs. writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you don't want to start a new SummaryWriter. """<line_sep>self.writer=SummaryWriter(log_dir)<if>writer<is><none><else>writer<block_end><def_stmt>_build_visdom self port<block_start>r"""Starts the visdom logging utilities. Args: port (int, optional): Port to log using ``visdom``. A deafult server is started at port ``8097``. So manually a new server has to be started if the post is changed. """<line_sep>self.vis=visdom.Visdom(port=port)<block_end><def_stmt>step_update self<block_start>r"""Helper function which updates the step at the end of one print iteration. """<line_sep>self.step<augadd>1<block_end><def_stmt>log_tensorboard self<block_start>r"""Tensorboard logging function. Needs to be defined in the subclass :raises NotImplementedError: """<line_sep><raise>NotImplementedError<block_end><def_stmt>log_console self<block_start>r"""Console logging function. Needs to be defined in the subclass :raises NotImplementedError: """<line_sep><raise>NotImplementedError<block_end><def_stmt>log_visdom self<block_start>r"""Visdom logging function. Needs to be defined in the subclass :raises NotImplementedError: """<line_sep><raise>NotImplementedError<block_end><def_stmt>__call__ self *args lock_console=<false> lock_tensorboard=<false> lock_visdom=<false> **kwargs<block_start><if_stmt><not>lock_console<and>CONSOLE_LOGGING<eq>1<block_start>self.log_console(*args **kwargs)<block_end><if_stmt><not>lock_tensorboard<and>TENSORBOARD_LOGGING<eq>1<block_start>self.log_tensorboard(*args **kwargs)<block_end><if_stmt><not>lock_visdom<and>VISDOM_LOGGING<eq>1<block_start>self.log_visdom(*args **kwargs)<block_end>self.step_update()<block_end><block_end><class_stmt>LossVisualize(Visualize)<block_start>r"""This class provides the Visualizations for Generator and Discriminator Losses. Args: visualize_list (list, optional): List of the functions needed for visualization. visdom_port (int, optional): Port to log using ``visdom``. The visdom server needs to be manually started at this port else an error will be thrown and the code will crash. This is ignored if ``VISDOM_LOGGING`` is ``0``. log_dir (str, optional): Directory where TensorboardX should store the logs. This is ignored if ``TENSORBOARD_LOGGING`` is ``0``. writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you don't want to start a new SummaryWriter. """<def_stmt>log_tensorboard self running_losses<block_start>r"""Tensorboard logging function. This function logs the following: - ``Running Discriminator Loss`` - ``Running Generator Loss`` - ``Running Losses`` - Loss Values of the individual Losses. Args: running_losses (dict): A dict with 2 items namely, ``Running Discriminator Loss``, and ``Running Generator Loss``. """<line_sep>self.writer.add_scalar("Running Discriminator Loss" running_losses["Running Discriminator Loss"] self.step )<line_sep>self.writer.add_scalar("Running Generator Loss" running_losses["Running Generator Loss"] self.step )<line_sep>self.writer.add_scalars("Running Losses" running_losses self.step)<for_stmt>name,value self.logs.items()<block_start>val=value[-1]<if_stmt>type(val)<is>tuple<block_start>self.writer.add_scalar("Losses/{}-Generator".format(name) val[0] self.step)<line_sep>self.writer.add_scalar("Losses/{}-Discriminator".format(name) val[1] self.step)<block_end><else_stmt><block_start>self.writer.add_scalar("Losses/{}".format(name) val self.step)<block_end><block_end><block_end><def_stmt>log_console self running_losses<block_start>r"""Console logging function. This function logs the mean ``generator`` and ``discriminator`` losses. Args: running_losses (dict): A dict with 2 items namely, ``Running Discriminator Loss``, and ``Running Generator Loss``. """<for_stmt>name,val running_losses.items()<block_start>print("Mean {} : {}".format(name val))<block_end><block_end><def_stmt>log_visdom self running_losses<block_start>r"""Visdom logging function. This function logs the following: - ``Running Discriminator Loss`` - ``Running Generator Loss`` - ``Running Losses`` - Loss Values of the individual Losses. Args: running_losses (dict): A dict with 2 items namely, ``Running Discriminator Loss``, and ``Running Generator Loss``. """<line_sep>self.vis.line([running_losses["Running Discriminator Loss"]] [self.step] win="Running Discriminator Loss" update="append" opts=dict(title="Running Discriminator Loss" xlabel="Time Step" ylabel="Running Loss" ) )<line_sep>self.vis.line([running_losses["Running Generator Loss"]] [self.step] win="Running Generator Loss" update="append" opts=dict(title="Running Generator Loss" xlabel="Time Step" ylabel="Running Loss" ) )<line_sep>self.vis.line([[running_losses["Running Discriminator Loss"] running_losses["Running Generator Loss"] ]] [self.step] win="Running Losses" update="append" opts=dict(title="Running Losses" xlabel="Time Step" ylabel="Running Loss" legend=["Discriminator" "Generator"] ) )<for_stmt>name,value self.logs.items()<block_start>val=value[-1]<if_stmt>type(val)<is>tuple<block_start>name1="{}-Generator".format(name)<line_sep>name2="{}-Discriminator".format(name)<line_sep>self.vis.line([val[0]] [self.step] win=name1 update="append" opts=dict(title=name1 xlabel="Time Step" ylabel="Loss Value") )<line_sep>self.vis.line([val[1]] [self.step] win=name2 update="append" opts=dict(title=name2 xlabel="Time Step" ylabel="Loss Value") )<block_end><else_stmt><block_start>self.vis.line([val] [self.step] win=name update="append" opts=dict(title=name xlabel="Time Step" ylabel="Loss Value") )<block_end><block_end><block_end><def_stmt>__call__ self trainer **kwargs<block_start>running_generator_loss=(trainer.loss_information["generator_losses"]/trainer.loss_information["generator_iters"])<line_sep>running_discriminator_loss=(trainer.loss_information["discriminator_losses"]/trainer.loss_information["discriminator_iters"])<line_sep>running_losses={"Running Discriminator Loss":running_discriminator_loss "Running Generator Loss":running_generator_loss }<line_sep>super(LossVisualize self).__call__(running_losses **kwargs)<block_end><block_end><class_stmt>MetricVisualize(Visualize)<block_start>r"""This class provides the Visualizations for Metrics. Args: visualize_list (list, optional): List of the functions needed for visualization. visdom_port (int, optional): Port to log using ``visdom``. The visdom server needs to be manually started at this port else an error will be thrown and the code will crash. This is ignored if ``VISDOM_LOGGING`` is ``0``. log_dir (str, optional): Directory where TensorboardX should store the logs. This is ignored if ``TENSORBOARD_LOGGING`` is ``0``. writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you don't want to start a new SummaryWriter. """<def_stmt>log_tensorboard self<block_start>r"""Tensorboard logging function. This function logs the values of the individual metrics."""<for_stmt>name,value self.logs.items()<block_start>self.writer.add_scalar("Metrics/{}".format(name) value[-1] self.step)<block_end><block_end><def_stmt>log_console self<block_start>r"""Console logging function. This function logs the mean metrics."""<for_stmt>name,val self.logs.items()<block_start>print("{} : {}".format(name val[-1]))<block_end><block_end><def_stmt>log_visdom self<block_start>r"""Visdom logging function. This function logs the values of the individual metrics."""<for_stmt>name,value self.logs.items()<block_start>self.vis.line([value[-1]] [self.step] win=name update="append" opts=dict(title=name xlabel="Time Step" ylabel="Metric Value") )<block_end><block_end><block_end><class_stmt>GradientVisualize(Visualize)<block_start>r"""This class provides the Visualizations for the Gradients. Args: visualize_list (list, optional): List of the functions needed for visualization. visdom_port (int, optional): Port to log using ``visdom``. The visdom server needs to be manually started at this port else an error will be thrown and the code will crash. This is ignored if ``VISDOM_LOGGING`` is ``0``. log_dir (str, optional): Directory where TensorboardX should store the logs. This is ignored if ``TENSORBOARD_LOGGING`` is ``0``. writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you don't want to start a new SummaryWriter. """<def_stmt>__init__ self visualize_list visdom_port=8097 log_dir=<none> writer=<none><block_start><if_stmt>visualize_list<is><none><or>len(visualize_list)<eq>0<block_start><raise>Exception("Gradient Visualizer requires list of model names")<block_end>self.logs={}<for_stmt>item visualize_list<block_start>self.logs[item]=[0.0]<block_end>self.step=1<if_stmt>TENSORBOARD_LOGGING<eq>1<block_start>self._build_tensorboard(log_dir writer)<block_end><if_stmt>VISDOM_LOGGING<eq>1<block_start>self._build_visdom(visdom_port)<block_end><block_end><def_stmt>log_tensorboard self name<block_start>r"""Tensorboard logging function. This function logs the values of the individual gradients. Args: name (str): Name of the model whose gradients are to be logged. """<line_sep>self.writer.add_scalar("Gradients/{}".format(name) self.logs[name][len(self.logs[name])-1] self.step )<block_end><def_stmt>log_console self name<block_start>r"""Console logging function. This function logs the mean gradients. Args: name (str): Name of the model whose gradients are to be logged. """<line_sep>print("{} Gradients : {}".format(name self.logs[name][len(self.logs[name])-1]))<block_end><def_stmt>log_visdom self name<block_start>r"""Visdom logging function. This function logs the values of the individual gradients. Args: name (str): Name of the model whose gradients are to be logged. """<line_sep>self.vis.line([self.logs[name][len(self.logs[name])-1]] [self.step] win=name update="append" opts=dict(title=name xlabel="Time Step" ylabel="Gradient") )<block_end><def_stmt>update_grads self name model eps=1e-5<block_start>r"""Updates the gradient logs. Args: name (str): Name of the model. model (torch.nn.Module): Either a ``torchgan.models.Generator`` or a ``torchgan.models.Discriminator`` or their subclass. eps (float, optional): Tolerance value. """<line_sep>gradsum=0.0<for_stmt>p model.parameters()<block_start><if_stmt>p.grad<is><not><none><block_start>gradsum<augadd>torch.sum(p.grad<power>2).clone().item()<block_end><block_end><if_stmt>gradsum<g>eps<block_start>self.logs[name][len(self.logs[name])-1]<augadd>gradsum<line_sep>model.zero_grad()<block_end><block_end><def_stmt>report_end_epoch self<block_start>r"""Prints to the console at the end of the epoch."""<if_stmt>CONSOLE_LOGGING<eq>1<block_start><for_stmt>key,val self.logs.items()<block_start>print("{} Mean Gradients : {}".format(key sum(val)/len(val)))<block_end><block_end><block_end><def_stmt>__call__ self trainer **kwargs<block_start><for_stmt>name trainer.model_names<block_start>super(GradientVisualize self).__call__(name **kwargs)<line_sep>self.logs[name].append(0.0)<block_end><block_end><block_end><class_stmt>ImageVisualize(Visualize)<block_start>r"""This class provides the Logging for the Images. Args: trainer (torchgan.trainer.Trainer): The base trainer used for training. visdom_port (int, optional): Port to log using ``visdom``. The visdom server needs to be manually started at this port else an error will be thrown and the code will crash. This is ignored if ``VISDOM_LOGGING`` is ``0``. log_dir (str, optional): Directory where TensorboardX should store the logs. This is ignored if ``TENSORBOARD_LOGGING`` is ``0``. writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you don't want to start a new SummaryWriter. test_noise (torch.Tensor, optional): If provided then it will be used as the noise for image sampling. nrow (int, optional): Number of rows in which the image is to be stored. """<def_stmt>__init__ self trainer visdom_port=8097 log_dir=<none> writer=<none> test_noise=<none> nrow=8 <block_start>super(ImageVisualize self).__init__([] visdom_port=visdom_port log_dir=log_dir writer=writer)<line_sep>self.test_noise=[]<for_stmt>model trainer.model_names<block_start><if_stmt>isinstance(getattr(trainer model) Generator)<block_start>self.test_noise.append(getattr(trainer model).sampler(trainer.sample_size trainer.device)<if>test_noise<is><none><else>test_noise)<block_end><block_end>self.step=1<line_sep>self.nrow=nrow<block_end><def_stmt>log_tensorboard self trainer image model<block_start>r"""Logs a generated image in tensorboard at the end of an epoch. Args: trainer (torchgan.trainer.Trainer): The base trainer used for training. image (Image): The generated image. model (str): The name of the model which generated the ``image``. """<line_sep>self.writer.add_image("Generated Samples/{}".format(model) image self.step)<block_end><def_stmt>log_console self trainer image model<block_start>r"""Saves a generated image at the end of an epoch. The path where the image is being stored is controlled by the ``trainer``. Args: trainer (torchgan.trainer.Trainer): The base trainer used for training. image (Image): The generated image. model (str): The name of the model which generated the ``image``. """<line_sep>save_path="{}/epoch{}_{}.png".format(trainer.recon self.step model)<line_sep>print("Generating and Saving Images to {}".format(save_path))<line_sep>torchvision.utils.save_image(image save_path)<block_end><def_stmt>log_visdom self trainer image model<block_start>r"""Logs a generated image in visdom at the end of an epoch. Args: trainer (torchgan.trainer.Trainer): The base trainer used for training. image (Image): The generated image. model (str): The name of the model which generated the ``image``. """<line_sep>self.vis.image(image opts=dict(caption="Generated Samples/{}".format(model)))<block_end><def_stmt>__call__ self trainer **kwargs<block_start>pos=0<for_stmt>model trainer.model_names<block_start><if_stmt>isinstance(getattr(trainer model) Generator)<block_start>generator=getattr(trainer model)<with_stmt>torch.no_grad()<block_start>image=generator(*self.test_noise[pos])<line_sep>image=torchvision.utils.make_grid(image nrow=self.nrow normalize=<true> range=(-1 1))<line_sep>super(ImageVisualize self).__call__(trainer image model **kwargs)<block_end>self.step<augsub>1<line_sep>pos=pos+1<block_end><block_end>self.step<augadd>1<if>pos<g>0<else>0<block_end><block_end>
<import_stmt>re<line_sep># simple code to extract serial from xml # use regex and findall method <with_stmt>open('keys.txt')<as>fp<block_start>data=fp.read()<line_sep>p=re.compile(r'\w{5}-\w{5}-\w{5}-\w{5}-\w{5}')<line_sep>keys=p.findall(data)<block_end><with_stmt>open('newkey.csv' 'w')<as>fp<block_start>fp.writelines('win7pro-key\n')<for_stmt>key keys<block_start>fp.write(key+'\n')<block_end><block_end>
""" Copyright (c) 2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>numpy<as>np<import_stmt>cv2<import_from_stmt>tqdm tqdm<import_from_stmt>place_recognition_demo.common crop_resize<import_from_stmt>openvino.inference_engine IECore# pylint: disable=no-name-in-module <class_stmt>IEModel# pylint: disable=too-few-public-methods <block_start>""" Class that allows working with Inference Engine model. """<def_stmt>__init__ self model_path device cpu_extension<block_start>ie=IECore()<if_stmt>cpu_extension<and>device<eq>'CPU'<block_start>ie.add_extension(cpu_extension 'CPU')<block_end>self.net=ie.read_network(model_path model_path.with_suffix('.bin'))<line_sep>self.input_name=next(iter(self.net.input_info))<line_sep>self.output_name=next(iter(self.net.outputs))<line_sep>self.input_size=self.net.input_info[self.input_name].input_data.shape<line_sep>self.exec_net=ie.load_network(network=self.net device_name=device)<block_end><def_stmt>predict self image<block_start>''' Takes input image and returns L2-normalized embedding vector. '''<assert_stmt>len(image.shape)<eq>4<line_sep>image=np.transpose(image (0 3 1 2))<line_sep>out=self.exec_net.infer(inputs={self.input_name:image})[self.output_name]<line_sep><return>out<block_end><block_end><class_stmt>PlaceRecognition<block_start>""" Class representing Place Recognition algorithm. """<def_stmt>__init__ self model_path device gallery_path cpu_extension gallery_size<block_start>self.impaths=(list(gallery_path.rglob("*.jpg")))[:gallery_size<or><none>]<line_sep>self.model=IEModel(model_path device cpu_extension)<line_sep>self.input_size=self.model.input_size[2:]<line_sep>self.embeddings=self.compute_gallery_embeddings()<block_end><def_stmt>compute_embedding self image<block_start>''' Takes input image and computes embedding vector. '''<line_sep>image=crop_resize(image self.input_size)<line_sep>embedding=self.model.predict(image)<line_sep><return>embedding<block_end><def_stmt>search_in_gallery self embedding<block_start>''' Takes input embedding vector and searches it in the gallery. '''<line_sep>distances=np.linalg.norm(embedding-self.embeddings axis=1 ord=2)<line_sep>sorted_indexes=np.argsort(distances)<line_sep><return>sorted_indexes distances<block_end><def_stmt>compute_gallery_embeddings self<block_start>''' Computes embedding vectors for the gallery images. '''<line_sep>images=[]<for_stmt>full_path tqdm(self.impaths desc='Reading gallery images.')<block_start>image=cv2.imread(str(full_path))<if_stmt>image<is><none><block_start>print("ERROR: cannot process image, full_path =" str(full_path))<line_sep><continue><block_end>image=crop_resize(image self.input_size)<line_sep>images.append(image)<block_end>embeddings=np.vstack([self.model.predict(image)<for>image tqdm(images desc='Computing embeddings of gallery images.')])<line_sep><return>embeddings<block_end><block_end>
# terrascript/resource/cappyzawa/concourse.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:14:28 UTC) <import_stmt>terrascript<class_stmt>concourse_team(terrascript.Resource)<block_start><pass><block_end>__all__=["concourse_team" ]<line_sep>
<import_from_stmt>fontTools ttLib<line_sep>superclass=ttLib.getTableClass("hmtx")<class_stmt>table__v_m_t_x(superclass)<block_start>headerTag='vhea'<line_sep>advanceName='height'<line_sep>sideBearingName='tsb'<line_sep>numberOfMetricsName='numberOfVMetrics'<block_end>
<import_stmt>pandas<as>pd<import_stmt>tushare<as>ts<import_from_stmt>StockAnalysisSystem.core.config TS_TOKEN<import_from_stmt>StockAnalysisSystem.core.Utility.common *<import_from_stmt>StockAnalysisSystem.core.Utility.time_utility *<import_from_stmt>StockAnalysisSystem.core.Utility.CollectorUtility *<line_sep># ------------------------------------------------------- Fields ------------------------------------------------------- FIELDS={'Market.SecuritiesInfo':{'ts_code':'TS代码' 'symbol':'股票代码' 'name':'股票名称' 'area':'所在地域' 'industry':'所属行业' 'fullname':'股票全称' 'enname':'英文全称' 'market':'市场类型' # 主板/中小板/创业板/科创板 'exchange':'交易所代码' 'curr_type':'交易货币' 'list_status':'上市状态' # L上市;D退市;P暂停上市 'list_date':'上市日期' 'delist_date':'退市日期' 'is_hs':'是否沪深港通标的' # N否;H沪股通;S深股通 } 'Market.IndexInfo':{'ts_code':'TS代码' 'name':'简称' 'fullname':'指数全称' 'market':'市场' 'publisher':'发布方' 'index_type':'指数风格' 'category':'指数类别' 'base_date':'基期' 'base_point':'基点' 'list_date':'发布日期' 'weight_rule':'加权方式' 'desc':'描述' 'exp_date':'终止日期' } 'Market.TradeCalender':{'exchange':'交易所' # SSE上交所;SZSE深交所 'cal_date':'日历日期' 'is_open':'是否交易' # 0休市;1交易 'pretrade_date':'上一个交易日' } 'Market.NamingHistory':{'ts_code':'TS代码' 'name':'证券名称' 'start_date':'开始日期' 'end_date':'结束日期' 'ann_date':'公告日期' 'change_reason':'变更原因' } 'Market.IndexComponent':{'ts_code':'TS代码' 'symbol':'股票代码' 'name':'股票名称' 'area':'所在地域' 'industry':'所属行业' 'fullname':'股票全称' 'enname':'英文全称' 'market':'市场类型' # 主板/中小板/创业板/科创板 'exchange':'交易所代码' 'curr_type':'交易货币' 'list_status':'上市状态' # L上市;D退市;P暂停上市 'list_date':'上市日期' 'delist_date':'退市日期' 'is_hs':'是否沪深港通标的' # N否;H沪股通;S深股通 } 'Market.SecuritiesTags':{} }<line_sep># -------------------------------------------------------- Prob -------------------------------------------------------- <def_stmt>plugin_prob <arrow>dict<block_start><return>{'plugin_name':'market_data_tushare_pro' 'plugin_version':'0.0.0.1' 'tags':['tusharepro']}<block_end><def_stmt>plugin_adapt uri:str<arrow>bool<block_start><return>uri<in>FIELDS.keys()<block_end><def_stmt>plugin_capacities <arrow>list<block_start><return>list(FIELDS.keys())<block_end># ---------------------------------------------------------------------------------------------------------------------- # stock_basic: https://tushare.pro/document/2?doc_id=25 <def_stmt>__fetch_securities_info **kwargs<arrow>pd.DataFrame<or><none><block_start>result=check_execute_test_flag(**kwargs)<if_stmt>result<is><none><block_start>pro=ts.pro_api(TS_TOKEN)<line_sep># If we specify the exchange parameter, it raises error. result=pro.stock_basic(fields=list(FIELDS.get('Market.SecuritiesInfo').keys()))<block_end>check_execute_dump_flag(result **kwargs)<if_stmt>result<is><not><none><block_start>convert_ts_date_field(result 'list_date' 'listing_date')<line_sep>convert_ts_date_field(result 'delist_date')<line_sep># result['list_date'] = pd.to_datetime(result['list_date'], format='%Y-%m-%d') # result['delist_date'] = pd.to_datetime(result['delist_date'], format='%Y-%m-%d') # result['listing_date'] = pd.to_datetime(result['list_date'], format='%Y-%m-%d') <if_stmt>'code'<not><in>result.columns<block_start>result['code']=result['ts_code'].apply(<lambda>val:val.split('.')[0])<block_end><if_stmt>'exchange'<not><in>result.columns<block_start>result['exchange']=result['ts_code'].apply(<lambda>val:val.split('.')[1])<line_sep>result['exchange']=result['exchange'].apply(<lambda>val:'SSE'<if>val<eq>'SH'<else>val)<line_sep>result['exchange']=result['exchange'].apply(<lambda>val:'SZSE'<if>val<eq>'SZ'<else>val)<block_end>result['stock_identity']=result['code']+'.'+result['exchange']<block_end><return>result<block_end># concept_detail: https://tushare.pro/document/2?doc_id=126 <def_stmt>__fetch_stock_concept **kwargs<arrow>pd.DataFrame<or><none><block_start>ts_code=pickup_ts_code(kwargs)<line_sep>result=check_execute_test_flag(**kwargs)<if_stmt>result<is><none><block_start>pro=ts.pro_api(TS_TOKEN)<line_sep>ts_delay('concept_detail')<line_sep>result=pro.concept_detail(ts_code=ts_code fields=['id' 'concept_name' 'ts_code' 'name' 'in_date' 'out_date'])<block_end>check_execute_dump_flag(result **kwargs)<if_stmt>result<is><not><none><block_start>convert_ts_code_field(result)<line_sep># del result['ts_code'] # result['ts_concept'] = result.to_dict('records') # result['stock_identity'] = ts_code_to_stock_identity(ts_code) <block_end><return>result<block_end># index_basic: https://tushare.pro/document/2?doc_id=94 <def_stmt>__fetch_indexes_info **kwargs<arrow>pd.DataFrame<or><none><block_start>SUPPORT_MARKETS=['SSE' 'SZSE' 'CSI' 'CICC' 'SW' 'MSCI' 'OTH']<line_sep>result=check_execute_test_flag(**kwargs)<if_stmt>result<is><none><block_start>pro=ts.pro_api(TS_TOKEN)<line_sep>result=<none><for_stmt>market SUPPORT_MARKETS<block_start>sub_result=pro.index_basic(market=market fields=list(FIELDS.get('Market.IndexInfo').keys()))<line_sep>result=pd.concat([result sub_result])<block_end><block_end>check_execute_dump_flag(result **kwargs)<if_stmt>result<is><not><none><block_start>result['exchange']=result['market']<line_sep>result['code']=result['ts_code'].apply(<lambda>val:val.split('.')[0])<line_sep>result['listing_date']=pd.to_datetime(result['list_date'] format='%Y-%m-%d')<line_sep>result['index_identity']=result['code'].astype(str)+'.'+result['exchange']<block_end><return>result<block_end># trade_cal: https://tushare.pro/document/2?doc_id=26 <def_stmt>__fetch_trade_calender **kwargs<arrow>pd.DataFrame<or><none><block_start>exchange=kwargs.get('exchange' '')<if_stmt>str_available(exchange)<and>exchange<not><in>['SSE' 'SZSE' 'A-SHARE']<block_start><return><none><block_end>result=check_execute_test_flag(**kwargs)<if_stmt>result<is><none><block_start>time_serial=kwargs.get('trade_date' <none>)<line_sep>since,until=normalize_time_serial(time_serial default_since() today())<line_sep>ts_since=since.strftime('%Y%m%d')<line_sep>ts_until=until.strftime('%Y%m%d')<line_sep>pro=ts.pro_api(TS_TOKEN)<line_sep># If we specify the exchange parameter, it raises error. result=pro.trade_cal('' start_date=ts_since end_date=ts_until)<block_end>check_execute_dump_flag(result **kwargs)<if_stmt>result<is><not><none><block_start>result.rename(columns={'exchange':'exchange' 'cal_date':'trade_date' 'is_open':'status'} inplace=<true>)<line_sep># Because tushare only support SSE and they are the same <if_stmt>exchange<eq>'SZSE'<or>exchange<eq>'A-SHARE'<block_start>result.drop(result[result.exchange<ne>'SSE'].index inplace=<true>)<line_sep>result['exchange']=exchange<block_end><else_stmt><block_start>result.drop(result[result.exchange<ne>exchange].index inplace=<true>)<block_end>result['trade_date']=pd.to_datetime(result['trade_date'])<block_end><return>result<block_end># namechange: https://tushare.pro/document/2?doc_id=100 <def_stmt>__fetch_naming_history **kwargs<block_start>result=check_execute_test_flag(**kwargs)<if_stmt>result<is><none><block_start>ts_code=pickup_ts_code(kwargs)<line_sep>period=kwargs.get('naming_date')<line_sep>since,until=normalize_time_serial(period default_since() today())<line_sep>ts_since=since.strftime('%Y%m%d')<line_sep>ts_until=until.strftime('%Y%m%d')<line_sep>pro=ts.pro_api(TS_TOKEN)<line_sep># 抱歉,您每分钟最多访问该接口100次 ts_delay('namechange')<line_sep>result=pro.namechange(ts_code=ts_code start_date=ts_since end_date=ts_until fields='ts_code,name,start_date,end_date,ann_date,change_reason')<block_end>check_execute_dump_flag(result **kwargs)<if_stmt>result<is><not><none><block_start><if_stmt>'start_date'<in>result.columns<block_start>result['naming_date']=pd.to_datetime(result['start_date'] format='%Y-%m-%d')<block_end><if_stmt>'stock_identity'<not><in>result.columns<block_start>result['stock_identity']=result['ts_code'].apply(ts_code_to_stock_identity)<block_end><block_end><return>result<block_end># ---------------------------------------------------------------------------------------------------------------------- <def_stmt>query **kwargs<arrow>pd.DataFrame<or><none><block_start>uri=kwargs.get('uri')<if_stmt>uri<eq>'Market.SecuritiesInfo'<block_start><return>__fetch_securities_info(**kwargs)<block_end><elif_stmt>uri<eq>'Market.IndexInfo'<block_start><return>__fetch_indexes_info(**kwargs)<block_end><elif_stmt>uri<eq>'Market.TradeCalender'<block_start><return>__fetch_trade_calender(**kwargs)<block_end><elif_stmt>uri<eq>'Market.NamingHistory'<block_start><return>__fetch_naming_history(**kwargs)<block_end><elif_stmt>uri<eq>'Market.IndexComponent'<block_start><return><none><block_end><elif_stmt>uri<eq>'Market.SecuritiesTags'<block_start><return>__fetch_stock_concept(**kwargs)<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>validate **kwargs<arrow>bool<block_start>nop(kwargs)<line_sep><return><true><block_end><def_stmt>fields <arrow>dict<block_start><return>FIELDS<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>typing Optional<import_from_stmt>airflow.exceptions AirflowException<import_from_stmt>airflow.models BaseOperator<import_from_stmt>airflow.providers.tableau.hooks.tableau TableauHook TableauJobFailedException TableauJobFinishCode <line_sep>RESOURCES_METHODS={'datasources':['delete' 'refresh'] 'groups':['delete'] 'projects':['delete'] 'schedule':['delete'] 'sites':['delete'] 'subscriptions':['delete'] 'tasks':['delete' 'run'] 'users':['remove'] 'workbooks':['delete' 'refresh'] }<class_stmt>TableauOperator(BaseOperator)<block_start>""" Execute a Tableau API Resource https://tableau.github.io/server-client-python/docs/api-ref .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:TableauOperator` :param resource: The name of the resource to use. :type resource: str :param method: The name of the resource's method to execute. :type method: str :param find: The reference of resource that will receive the action. :type find: str :param match_with: The resource field name to be matched with find parameter. :type match_with: Optional[str] :param site_id: The id of the site where the workbook belongs to. :type site_id: Optional[str] :param blocking_refresh: By default will be blocking means it will wait until it has finished. :type blocking_refresh: bool :param check_interval: time in seconds that the job should wait in between each instance state checks until operation is completed :type check_interval: float :param tableau_conn_id: The :ref:`Tableau Connection id <howto/connection:tableau>` containing the credentials to authenticate to the Tableau Server. :type tableau_conn_id: str """<def_stmt>__init__ self * resource:str method:str find:str match_with:str='id' site_id:Optional[str]=<none> blocking_refresh:bool=<true> check_interval:float=20 tableau_conn_id:str='tableau_default' **kwargs <arrow><none><block_start>super().__init__(**kwargs)<line_sep>self.resource=resource<line_sep>self.method=method<line_sep>self.find=find<line_sep>self.match_with=match_with<line_sep>self.check_interval=check_interval<line_sep>self.site_id=site_id<line_sep>self.blocking_refresh=blocking_refresh<line_sep>self.tableau_conn_id=tableau_conn_id<block_end><def_stmt>execute self context:dict<arrow>str<block_start>""" Executes the Tableau API resource and pushes the job id or downloaded file URI to xcom. :param context: The task context during execution. :type context: dict :return: the id of the job that executes the extract refresh or downloaded file URI. :rtype: str """<line_sep>available_resources=RESOURCES_METHODS.keys()<if_stmt>self.resource<not><in>available_resources<block_start>error_message=f'Resource not found! Available Resources: {available_resources}'<line_sep><raise>AirflowException(error_message)<block_end>available_methods=RESOURCES_METHODS[self.resource]<if_stmt>self.method<not><in>available_methods<block_start>error_message=f'Method not found! Available methods for {self.resource}: {available_methods}'<line_sep><raise>AirflowException(error_message)<block_end><with_stmt>TableauHook(self.site_id self.tableau_conn_id)<as>tableau_hook<block_start>resource=getattr(tableau_hook.server self.resource)<line_sep>method=getattr(resource self.method)<line_sep>resource_id=self._get_resource_id(tableau_hook)<line_sep>response=method(resource_id)<block_end><if_stmt>self.method<eq>'refresh'<block_start>job_id=response.id<if_stmt>self.blocking_refresh<block_start><if_stmt><not>tableau_hook.wait_for_state(job_id=job_id check_interval=self.check_interval target_state=TableauJobFinishCode.SUCCESS )<block_start><raise>TableauJobFailedException(f'The Tableau Refresh {self.resource} Job failed!')<block_end><block_end><return>job_id<block_end><block_end><def_stmt>_get_resource_id self tableau_hook:TableauHook<arrow>str<block_start><if_stmt>self.match_with<eq>'id'<block_start><return>self.find<block_end><for_stmt>resource tableau_hook.get_all(resource_name=self.resource)<block_start><if_stmt>getattr(resource self.match_with)<eq>self.find<block_start>resource_id=resource.id<line_sep>self.log.info('Found matching with id %s' resource_id)<line_sep><return>resource_id<block_end><block_end><raise>AirflowException(f'{self.resource} with {self.match_with} {self.find} not found!')<block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>os<import_stmt>shutil<import_stmt>tempfile<import_stmt>unittest<import_from_stmt>programs.buck_tool BuckToolException<import_from_stmt>programs.java_lookup _get_java_path_for_highest_minor_version get_java_path<import_from_stmt>programs.subprocutils which<line_sep>ANY_JAVA_VERSION=8<line_sep>JAVA_VERSION_THAT_OBVIOUSLY_CANT_EXIST_LOCALLY=500<class_stmt>TestJavaPath(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.java_home=tempfile.mkdtemp()<line_sep>self.java_exec="java.exe"<if>os.name<eq>"nt"<else>"java"<line_sep>bin_dir=os.path.join(self.java_home "bin")<line_sep>os.mkdir(bin_dir)<line_sep>open(os.path.join(bin_dir self.java_exec) "w")<block_end><def_stmt>test_with_java_home_valid self<block_start>os.environ["JAVA_HOME"]=self.java_home<line_sep>self.assertEqual(get_java_path(ANY_JAVA_VERSION).lower() os.path.join(self.java_home "bin" self.java_exec).lower() )<block_end><def_stmt>test_with_java_home_invalid self<block_start>os.environ["JAVA_HOME"]="/nosuchfolder/89aabebc-42cb-4cd8-bcf7-d964371daf3e"<line_sep>self.assertRaises(BuckToolException)<block_end><def_stmt>test_without_java_home self<block_start>self.assertEquals(get_java_path(JAVA_VERSION_THAT_OBVIOUSLY_CANT_EXIST_LOCALLY).lower() which("java").lower() )<block_end><def_stmt>test_java_home_for_wrong_version_ignored self<block_start>os.environ["JAVA_HOME"]=("/Library/Java/JavaVirtualMachines/jdk-"+str(JAVA_VERSION_THAT_OBVIOUSLY_CANT_EXIST_LOCALLY+1)+".jdk/Contents/Home")<line_sep>self.assertEquals(get_java_path(JAVA_VERSION_THAT_OBVIOUSLY_CANT_EXIST_LOCALLY).lower() which("java").lower() )<block_end><def_stmt>test_java_home_for_wrong_version_not_ignored_if_respect_java_home_set self<block_start>os.environ["JAVA_HOME"]=("/Library/Java/JavaVirtualMachines/jdk-"+str(JAVA_VERSION_THAT_OBVIOUSLY_CANT_EXIST_LOCALLY+1)+".jdk/Contents/Home")<line_sep>os.environ["BUCK_RESPECT_JAVA_HOME"]="1"<line_sep>self.assertRaises(BuckToolException)<block_end><def_stmt>test_java_8_highest_version_lookup self<block_start>java_base_path=tempfile.mkdtemp()<line_sep>os.mkdir(os.path.join(java_base_path "jdk1.7.0"))<line_sep>os.mkdir(os.path.join(java_base_path "jdk1.8.0_100"))<line_sep>os.mkdir(os.path.join(java_base_path "jdk1.8.0_200"))<line_sep>os.mkdir(os.path.join(java_base_path "jdk1.8.1"))<line_sep>os.mkdir(os.path.join(java_base_path "jdk1.8.1_100"))<line_sep>self.assertEquals(_get_java_path_for_highest_minor_version(java_base_path 8) os.path.join(java_base_path "jdk1.8.1_100") )<block_end><def_stmt>test_openjdk_8_highest_version_lookup self<block_start>java_base_path=tempfile.mkdtemp()<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-7.jdk"))<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-8.jdk"))<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-9.jdk"))<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-18.jdk"))<line_sep>self.assertEquals(_get_java_path_for_highest_minor_version(java_base_path 8) os.path.join(java_base_path "adoptopenjdk-8.jdk") )<block_end><def_stmt>test_java_11_highest_version_lookup self<block_start>java_base_path=tempfile.mkdtemp()<line_sep>os.mkdir(os.path.join(java_base_path "jdk-10.0.1"))<line_sep>os.mkdir(os.path.join(java_base_path "jdk-11.0.1"))<line_sep>os.mkdir(os.path.join(java_base_path "jdk-11.0.2"))<line_sep>os.mkdir(os.path.join(java_base_path "jdk-11.0.2_100"))<line_sep>os.mkdir(os.path.join(java_base_path "jdk-11.0.2_200"))<line_sep>os.mkdir(os.path.join(java_base_path "jdk-12"))<line_sep>os.mkdir(os.path.join(java_base_path "jdk-13"))<line_sep>self.assertEquals(_get_java_path_for_highest_minor_version(java_base_path 11) os.path.join(java_base_path "jdk-11.0.2_200") )<block_end><def_stmt>test_openjdk_11_highest_version_lookup self<block_start>java_base_path=tempfile.mkdtemp()<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-7.jdk"))<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-8.jdk"))<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-9.jdk"))<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-10.jdk"))<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-11.jdk"))<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-11.0.2.jdk"))<line_sep>os.mkdir(os.path.join(java_base_path "adoptopenjdk-12.jdk"))<line_sep>self.assertEquals(_get_java_path_for_highest_minor_version(java_base_path 11) os.path.join(java_base_path "adoptopenjdk-11.0.2.jdk") )<line_sep>self.assertEquals(_get_java_path_for_highest_minor_version(java_base_path 12) os.path.join(java_base_path "adoptopenjdk-12.jdk") )<block_end><def_stmt>tearDown self<block_start><if_stmt>"JAVA_HOME"<in>os.environ<block_start><del_stmt>os.environ["JAVA_HOME"]<block_end><if_stmt>"BUCK_RESPECT_JAVA_HOME"<in>os.environ<block_start><del_stmt>os.environ["BUCK_RESPECT_JAVA_HOME"]<block_end>shutil.rmtree(self.java_home)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
__all__=['calculate_metric_by_expr' 'infer_eval_log_metrics']<import_stmt>ast<import_stmt>operator<as>op<import_from_stmt>autogluon.core.constants MULTICLASS BINARY REGRESSION<line_sep># supported operators operators={ast.Add:op.add ast.Sub:op.sub ast.Mult:op.mul ast.Div:op.truediv ast.Pow:op.pow ast.BitXor:op.xor ast.USub:op.neg}<def_stmt>infer_eval_log_metrics problem_type eval_metric=<none><block_start>"""Decide default evaluation, stopping, and logging metrics (based on type of prediction problem). Parameters ---------- problem_type Type of the problem. Either regression, multiclass, or binary eval_metric The eval metric provided by the user Returns ------- eval_metric The updated evaluation metric log_metrics The updated logging metric """<if_stmt>problem_type<eq>MULTICLASS<block_start><if_stmt>eval_metric<is><none><block_start>eval_metric='acc'<block_end>log_metrics=['acc' 'log_loss']<block_end><elif_stmt>problem_type<eq>BINARY<block_start><if_stmt>eval_metric<is><none><block_start>eval_metric='acc'<block_end>log_metrics=['f1' 'mcc' 'roc_auc' 'acc' 'log_loss']<block_end><elif_stmt>problem_type<eq>REGRESSION<block_start><if_stmt>eval_metric<is><none><block_start>eval_metric='rmse'<block_end>log_metrics=['r2' 'rmse' 'mae']<block_end><else_stmt><block_start><raise>NotImplementedError('The problem type is not supported yet!')<block_end><if_stmt>eval_metric<not><in>log_metrics<block_start>log_metrics.append(eval_metric)<block_end><return>eval_metric log_metrics<block_end><def_stmt>eval_math_expr expr<block_start>"""Evaluate an expression Parameters ---------- expr expression Returns ------- ret Returned value Examples -------- >>> eval_math_expr('2^6') 4 >>> eval_math_expr('2**6') 64 >>> eval_math_expr('1 + 2*3**(4^5) / (6 + -7)') -5.0 """<line_sep><return>eval_(ast.parse(expr mode='eval').body)<block_end><def_stmt>eval_ node<block_start><if_stmt>isinstance(node ast.Num)# <number> <block_start><return>node.n<block_end><elif_stmt>isinstance(node ast.BinOp)# <left> <operator> <right> <block_start><return>operators[type(node.op)](eval_(node.left) eval_(node.right))<block_end><elif_stmt>isinstance(node ast.UnaryOp)# <operator> <operand> e.g., -1 <block_start><return>operators[type(node.op)](eval_(node.operand))<block_end><else_stmt><block_start><raise>TypeError(node)<block_end><block_end><def_stmt>calculate_metric_by_expr label_metric_scores:dict label_names:list expr:str<arrow>float<block_start>"""Calculate the metric scores based on the given expression. Parameters ---------- label_metric_scores All metric scores reported in the validation phase. It will be a dict of metric scores. label_names Name of the labels expr The expression. Supports different possibilities: - A single metric like 'acc', 'f1', or 'auc' This means to use this value as the final result. If there are multiple labels, we use the average of all the individual metrics - Combined metric, we use the syntax `label.metric_name` to describe the metric of a given label - expr = (acc + f1) / 2 The average of the accuracy and f1 of all labels - expr = (label1.auc + label2.auc) / 2 The average of the auc of "label1" and the auc of "label2" - expr = 0.8 * intent.acc + 0.2 * slot.f1 0.8 * the accuracy of a label named "intent" + 0.2 * the f1 score of a label named "slot" - expr = 0.1 * label1.f1 + 0.9 * acc 0.1 * the F1 of label 1 + 0.9 * the average accuracy Returns ------- score The returned score. """<line_sep>original_expr=expr<line_sep>possible_metric_names=set()<for_stmt>label_name label_names<block_start><assert_stmt>label_name<in>label_metric_scores 'Invalid label_metric_scores,'<concat>' all provided labels should be in the aggregated label metric scores. '<concat>'label_names={}, label_metric_scores={}'.format(label_names label_metric_scores)<line_sep>metric_scores=label_metric_scores[label_name]<for_stmt>metric_name,value, metric_scores.items()<block_start>expr=expr.replace('{}.{}'.format(label_name metric_name) str(value))<line_sep>possible_metric_names.add(metric_name)<block_end><block_end><for_stmt>metric_name possible_metric_names<block_start><if_stmt>metric_name<in>expr<block_start>avg_metric=0<for_stmt>label_name label_names<block_start>avg_metric<augadd>label_metric_scores[label_name][metric_name]<block_end>avg_metric<augdiv>len(label_names)<line_sep>expr=expr.replace(metric_name str(avg_metric))<block_end><block_end><try_stmt><block_start>ret=eval_math_expr(expr)<block_end><except_stmt>Exception<block_start><raise>ValueError('Cannot successfully parse the given expression. '<concat>'The original expression = "{}". After the parsing, it becomes {} but '<concat>'still cannot be evalauted.'.format(original_expr expr))<block_end><return>ret<block_end>
<import_stmt>numpy<as>np<line_sep># Hyperparameters x=0.1<line_sep>noise=0.1<line_sep>print("x: %f"%x)<line_sep>print("noise: %f"%noise)<line_sep># Simulated training loss loss=np.sin(5<times>x)<times>(1-np.tanh(x<power>2))+np.random.randn()<times>noise<line_sep>print("loss: %f"%loss)<line_sep>
<import_from_stmt>.plugin_file *<import_from_stmt>.plugin_configuration *<line_sep>
<import_stmt>torch<import_from_stmt>torch_geometric.nn ChebConv<import_from_stmt>torch_geometric.testing is_full_test<def_stmt>test_cheb_conv <block_start>in_channels,out_channels=(16 32)<line_sep>edge_index=torch.tensor([[0 0 0 1 2 3] [1 2 3 0 0 0]])<line_sep>num_nodes=edge_index.max().item()+1<line_sep>edge_weight=torch.rand(edge_index.size(1))<line_sep>x=torch.randn((num_nodes in_channels))<line_sep>conv=ChebConv(in_channels out_channels K=3)<assert_stmt>conv.__repr__()<eq>'ChebConv(16, 32, K=3, normalization=sym)'<line_sep>out1=conv(x edge_index)<assert_stmt>out1.size()<eq>(num_nodes out_channels)<line_sep>out2=conv(x edge_index edge_weight)<assert_stmt>out2.size()<eq>(num_nodes out_channels)<line_sep>out3=conv(x edge_index edge_weight lambda_max=3.0)<assert_stmt>out3.size()<eq>(num_nodes out_channels)<if_stmt>is_full_test()<block_start>jit=torch.jit.script(conv.jittable())<assert_stmt>jit(x edge_index).tolist()<eq>out1.tolist()<assert_stmt>jit(x edge_index edge_weight).tolist()<eq>out2.tolist()<assert_stmt>jit(x edge_index edge_weight lambda_max=torch.tensor(3.0)).tolist()<eq>out3.tolist()<block_end>batch=torch.tensor([0 0 1 1])<line_sep>edge_index=torch.tensor([[0 1 2 3] [1 0 3 2]])<line_sep>num_nodes=edge_index.max().item()+1<line_sep>edge_weight=torch.rand(edge_index.size(1))<line_sep>x=torch.randn((num_nodes in_channels))<line_sep>lambda_max=torch.tensor([2.0 3.0])<line_sep>out4=conv(x edge_index edge_weight batch)<assert_stmt>out4.size()<eq>(num_nodes out_channels)<line_sep>out5=conv(x edge_index edge_weight batch lambda_max)<assert_stmt>out5.size()<eq>(num_nodes out_channels)<if_stmt>is_full_test()<block_start><assert_stmt>jit(x edge_index edge_weight batch).tolist()<eq>out4.tolist()<assert_stmt>jit(x edge_index edge_weight batch lambda_max).tolist()<eq>out5.tolist()<block_end><block_end>
# built-in <import_stmt>re<import_from_stmt>logging getLogger<import_from_stmt>typing TYPE_CHECKING Optional<line_sep># external <import_from_stmt>packaging.markers Marker<import_from_stmt>yaspin yaspin<line_sep># app <import_from_stmt>..context_tools nullcontext<import_from_stmt>..models RootDependency<import_from_stmt>._conflict analyze_conflict<if_stmt>TYPE_CHECKING# project <block_start><import_from_stmt>dephell.controllers._graph Graph<import_from_stmt>dephell.controllers._mutator Mutator<block_end>logger=getLogger('dephell.resolver')<line_sep>REX_BASE_VERSION=re.compile(r'[0-9\.]+')<class_stmt>Resolver<block_start><def_stmt>__init__ self graph:'Graph' mutator:'Mutator'<arrow><none><block_start>self.graph=graph<line_sep>self.mutator=mutator<block_end><def_stmt>apply self parent recursive:bool=<false><block_start>""" Returns conflicting (incompatible) dependency. """<for_stmt>new_dep parent.dependencies<block_start>other_dep=self.graph.get(new_dep.name)<if_stmt>other_dep<is><none># add new dep to graph <block_start>other_dep=new_dep.copy()<line_sep>self.graph.add(other_dep)<block_end><elif_stmt>isinstance(other_dep RootDependency)# if some of the dependencies cyclicaly depends on root # then ignore these deps <block_start><continue><block_end><else_stmt># if dep is locked, but not used, let's just unlock it <block_start><if_stmt>other_dep.locked<and><not>other_dep.used<block_start>other_dep.unlock()<block_end># merge deps <try_stmt><block_start>other_dep<augadd>new_dep<block_end><except_stmt>TypeError# conflict happened <block_start><return>other_dep<block_end># `recursive` used only in re-application of dependencies, # when the graph already was built before. <if_stmt>recursive<block_start>self.apply(other_dep recursive=<true>)<block_end><block_end># check <if_stmt><not>other_dep.compat<block_start><return>other_dep<block_end><block_end>parent.applied=<true><block_end><def_stmt>unapply self dep * force:bool=<true> soft:bool=<false><arrow><none><block_start>""" force -- unapply deps that not applied yet soft -- do not mark dep as not applied. """<if_stmt><not>force<and><not>dep.applied<block_start><return><block_end># it must be before actual unapplying to avoid recursion on circular dependencies <if_stmt><not>soft<block_start>dep.applied=<false><block_end><for_stmt>child dep.dependencies<block_start>child_name=child.name<line_sep>child=self.graph.get(child_name)<if_stmt>child<is><none><block_start>logger.debug('child not found' extra=dict(dep=dep.name child=child_name))<line_sep><continue><block_end># unapply current dependency for child child.unapply(dep.name)<line_sep># unapply child because he is modified self.unapply(child force=<false> soft=soft)<block_end><if_stmt><not>soft<and>dep.locked<block_start>dep.unlock()<block_end><block_end><def_stmt>resolve self debug:bool=<false> silent:bool=<false> level:Optional[int]=<none><arrow>bool<block_start><if_stmt>silent<block_start>spinner=nullcontext(type('Mock' () {}))<block_end><else_stmt><block_start>spinner=yaspin(text='resolving...')<block_end><with_stmt>spinner<as>spinner<block_start><while_stmt><true><block_start>resolved=self._resolve(debug=debug silent=silent level=level spinner=spinner)<if_stmt>resolved<is><none><block_start><continue><block_end>self.graph.clear()# remove unused deps from graph <return>resolved<block_end><block_end><block_end><def_stmt>_resolve self debug:bool silent:bool level:Optional[int] spinner<arrow>Optional[bool]<block_start><if_stmt>silent<block_start>logger.debug('next iteration' extra=dict(layers=len(self.graph._layers) mutations=self.mutator.mutations ))<block_end><else_stmt><block_start>spinner.text='layers: {layers}, mutations: {mutations}'.format(layers=len(self.graph._layers) mutations=self.mutator.mutations )<block_end># get not applied deps deps=self.graph.get_leafs(level=level)<line_sep># if we already build deps for all nodes in graph <if_stmt><not>deps<block_start><return><true><block_end># check python version <for_stmt>dep deps<block_start><if_stmt><not>dep.python_compat<block_start>self.graph.conflict=dep<line_sep><return><false><block_end><block_end>no_conflicts=self._apply_deps(deps debug=debug)<if_stmt>no_conflicts<block_start><return><none><block_end># if we have conflict, try to mutate graph groups=self.mutator.mutate(self.graph)<line_sep># if cannot mutate <if_stmt>groups<is><none><block_start><return><false><block_end>self.graph.conflict=<none><line_sep># apply mutation <for_stmt>group groups<block_start>dep=self.graph.get(group.name)<if_stmt>dep.group.number<ne>group.number<block_start>logger.debug('mutated' extra=dict(group_from=str(dep.group) group_to=str(group) ))<line_sep>self.unapply(dep)<line_sep>dep.group=group<block_end><block_end><return><none><block_end><def_stmt>apply_envs self envs:set deep:bool=<true><arrow><none><block_start>"""Filter out dependencies from the graph by the given envs. deep: Helps to avoid fetching dependencies (hence the network requests). Set it to False for not resolved graph to make filtering faster. """<if_stmt><not>any(root.dependencies<for>root self.graph.get_layer(0))<block_start>logger.debug('no dependencies, nothing to filter')<line_sep><return><block_end>layer=self.graph.get_layer(1)<line_sep># Unapply deps that we don't need <for_stmt>dep layer<block_start><if_stmt><not>dep.applied<block_start><continue><block_end><if_stmt>dep.envs&envs<block_start><continue><block_end><if_stmt>dep.inherited_envs&envs<block_start><continue><block_end>logger.debug('unapply by envs' extra=dict(dep=dep.name envs=envs))<line_sep># without `soft=True` all deps of this dep will be marked as unapplied # and ignored in Requirement.from_graph. # It's bad behavior because deps of this dep can be required for other # deps that won't be unapplied. <if_stmt>deep<block_start>self.unapply(dep soft=<true>)<block_end>dep.applied=<false><block_end># Some child deps can be unapplied from other child deps, but we need them. # For example, if we need A, but don't need B, and A and B depends on C, # then C will be unapplied from B. Let's return B in the graph by reapplying A. <for_stmt>dep self.graph<block_start><if_stmt><not>dep.applied<block_start><continue><block_end><if_stmt><not>(dep.envs|dep.inherited_envs)&envs<block_start><continue><block_end>logger.debug('reapply' extra=dict(dep=dep.name envs=envs))<if_stmt>deep<block_start>self.apply(dep recursive=<true>)<block_end>dep.applied=<true><block_end><block_end><def_stmt>apply_markers self python<arrow><none><block_start>implementation=python.implementation<if_stmt>implementation<eq>'python'<block_start>implementation='cpython'<block_end># get only base part of python version because `packagings` drops # all markers for python prereleases python_version=str(python.version)<line_sep>match=REX_BASE_VERSION.match(python_version)<if_stmt>match<block_start>python_version=match.group()<block_end><for_stmt>dep self.graph<block_start><if_stmt><not>dep.applied<block_start><continue><block_end><if_stmt><not>dep.marker<block_start><continue><block_end>fit=Marker(str(dep.marker)).evaluate(dict(python_version=python_version implementation_name=implementation ))<if_stmt>fit<block_start><continue><block_end>self.unapply(dep soft=<true>)<line_sep>dep.applied=<false><block_end><block_end><def_stmt>_apply_deps self deps debug:bool=<false><arrow>bool<block_start><for_stmt>dep deps<block_start>conflict=self.apply(dep)<if_stmt>conflict<is><none><block_start><continue><block_end>logger.debug('conflict' extra=dict(dep=conflict.name constraint=conflict.constraint ))<line_sep>self.graph.conflict=conflict.copy()<if_stmt>debug<block_start>print(analyze_conflict(resolver=self suffix=str(self.mutator.mutations) ))<block_end># Dep can be partialy applied. Clean it. self.unapply(dep)<line_sep><return><false><block_end># only if all deps applied <return><true><block_end><block_end>
<import_stmt>logging<import_stmt>os<import_stmt>pytest<import_from_stmt>kubeflow.kubeflow.ci kfctl_go_test_utils<as>kfctl_util<import_from_stmt>kubeflow.testing util<line_sep>@pytest.mark.skipif(os.getenv("JOB_TYPE")<eq>"presubmit" reason="test second apply doesn't run in presubmits")<def_stmt>test_second_apply record_xml_attribute app_path<block_start>"""Test that we can run kfctl apply again with error. Args: kfctl_path: The path to kfctl binary. app_path: The app dir of kubeflow deployment. """<line_sep>_,kfctl_path=kfctl_util.get_kfctl_go_build_dir_binary_path()<if_stmt><not>os.path.exists(kfctl_path)<block_start>msg="kfctl Go binary not found: {path}".format(path=kfctl_path)<line_sep>logging.error(msg)<line_sep><raise>RuntimeError(msg)<block_end>util.run([kfctl_path "apply" "-V" "-f="+os.path.join(app_path "tmp.yaml")] cwd=app_path)<block_end>
<import_stmt>abc<import_stmt>logging<import_stmt>six<import_from_stmt>collections namedtuple<import_from_stmt>urllib3.response HTTPHeaderDict<import_from_stmt>..utils parse_bool parse_int parse_real<line_sep>logger=logging.getLogger(__name__)<class_stmt>CaseInsensitiveDict(HTTPHeaderDict)<block_start><def_stmt>add self key val<block_start>self[key]=val<block_end><def_stmt>__getitem__ self key<block_start><return>self._container[key.lower()][1]<block_end><def_stmt>__repr__ self<block_start><return>str(dict(self.items()))<block_end><def_stmt>copy self<block_start><return>CaseInsensitiveDict(self._container.values())<block_end><block_end><class_stmt>Bool(namedtuple('Bool' 'version_from,version_till'))<block_start>@staticmethod<def_stmt>transform name value<block_start><if_stmt>parse_bool(value)<is><not><none><block_start><return>value<block_end>logger.warning('Removing bool parameter=%s from the config due to the invalid value=%s' name value)<block_end><block_end>@six.add_metaclass(abc.ABCMeta)<class_stmt>Number(namedtuple('Number' 'version_from,version_till,min_val,max_val,unit'))<block_start>@[email protected]<def_stmt>parse value unit<block_start>"""parse value"""<block_end><def_stmt>transform self name value<block_start>num_value=self.parse(value self.unit)<if_stmt>num_value<is><not><none><block_start><if_stmt>num_value<l>self.min_val<block_start>logger.warning('Value=%s of parameter=%s is too low, increasing to %s%s' value name self.min_val self.unit<or>'')<line_sep><return>self.min_val<block_end><if_stmt>num_value<g>self.max_val<block_start>logger.warning('Value=%s of parameter=%s is too big, decreasing to %s%s' value name self.max_val self.unit<or>'')<line_sep><return>self.max_val<block_end><return>value<block_end>logger.warning('Removing %s parameter=%s from the config due to the invalid value=%s' self.__class__.__name__.lower() name value)<block_end><block_end><class_stmt>Integer(Number)<block_start>@staticmethod<def_stmt>parse value unit<block_start><return>parse_int(value unit)<block_end><block_end><class_stmt>Real(Number)<block_start>@staticmethod<def_stmt>parse value unit<block_start><return>parse_real(value unit)<block_end><block_end><class_stmt>Enum(namedtuple('Enum' 'version_from,version_till,possible_values'))<block_start><def_stmt>transform self name value<block_start><if_stmt>str(value).lower()<in>self.possible_values<block_start><return>value<block_end>logger.warning('Removing enum parameter=%s from the config due to the invalid value=%s' name value)<block_end><block_end><class_stmt>EnumBool(Enum)<block_start><def_stmt>transform self name value<block_start><if_stmt>parse_bool(value)<is><not><none><block_start><return>value<block_end><return>super(EnumBool self).transform(name value)<block_end><block_end><class_stmt>String(namedtuple('String' 'version_from,version_till'))<block_start>@staticmethod<def_stmt>transform name value<block_start><return>value<block_end><block_end># Format: # key - parameter name # value - tuple or multiple tuples if something was changing in GUC across postgres versions parameters=CaseInsensitiveDict({'allow_system_table_mods':Bool(90300 <none>) 'application_name':String(90300 <none>) 'archive_command':String(90300 <none>) 'archive_mode':(Bool(90300 90500) EnumBool(90500 <none> ('always' ))) 'archive_timeout':Integer(90300 <none> 0 1073741823 's') 'array_nulls':Bool(90300 <none>) 'authentication_timeout':Integer(90300 <none> 1 600 's') 'autovacuum':Bool(90300 <none>) 'autovacuum_analyze_scale_factor':Real(90300 <none> 0 100 <none>) 'autovacuum_analyze_threshold':Integer(90300 <none> 0 2147483647 <none>) 'autovacuum_freeze_max_age':Integer(90300 <none> 100000 2000000000 <none>) 'autovacuum_max_workers':(Integer(90300 90600 1 8388607 <none>) Integer(90600 <none> 1 262143 <none>)) 'autovacuum_multixact_freeze_max_age':Integer(90300 <none> 10000 2000000000 <none>) 'autovacuum_naptime':Integer(90300 <none> 1 2147483 's') 'autovacuum_vacuum_cost_delay':(Integer(90300 120000 -1 100 'ms') Real(120000 <none> -1 100 'ms')) 'autovacuum_vacuum_cost_limit':Integer(90300 <none> -1 10000 <none>) 'autovacuum_vacuum_insert_scale_factor':Real(130000 <none> 0 100 <none>) 'autovacuum_vacuum_insert_threshold':Integer(130000 <none> -1 2147483647 <none>) 'autovacuum_vacuum_scale_factor':Real(90300 <none> 0 100 <none>) 'autovacuum_vacuum_threshold':Integer(90300 <none> 0 2147483647 <none>) 'autovacuum_work_mem':Integer(90400 <none> -1 2147483647 'kB') 'backend_flush_after':Integer(90600 <none> 0 256 '8kB') 'backslash_quote':EnumBool(90300 <none> ('safe_encoding' )) 'backtrace_functions':String(130000 <none>) 'bgwriter_delay':Integer(90300 <none> 10 10000 'ms') 'bgwriter_flush_after':Integer(90600 <none> 0 256 '8kB') 'bgwriter_lru_maxpages':(Integer(90300 100000 0 1000 <none>) Integer(100000 <none> 0 1073741823 <none>)) 'bgwriter_lru_multiplier':Real(90300 <none> 0 10 <none>) 'bonjour':Bool(90300 <none>) 'bonjour_name':String(90300 <none>) 'bytea_output':Enum(90300 <none> ('escape' 'hex')) 'check_function_bodies':Bool(90300 <none>) 'checkpoint_completion_target':Real(90300 <none> 0 1 <none>) 'checkpoint_flush_after':Integer(90600 <none> 0 256 '8kB') 'checkpoint_segments':Integer(90300 90500 1 2147483647 <none>) 'checkpoint_timeout':(Integer(90300 90600 30 3600 's') Integer(90600 <none> 30 86400 's')) 'checkpoint_warning':Integer(90300 <none> 0 2147483647 's') 'client_connection_check_interval':Integer(140000 <none> '0' '2147483647' 'ms') 'client_encoding':String(90300 <none>) 'client_min_messages':Enum(90300 <none> ('debug5' 'debug4' 'debug3' 'debug2' 'debug1' 'log' 'notice' 'warning' 'error')) 'cluster_name':String(90500 <none>) 'commit_delay':Integer(90300 <none> 0 100000 <none>) 'commit_siblings':Integer(90300 <none> 0 1000 <none>) 'compute_query_id':EnumBool(140000 <none> ('auto' )) 'config_file':String(90300 <none>) 'constraint_exclusion':EnumBool(90300 <none> ('partition' )) 'cpu_index_tuple_cost':Real(90300 <none> 0 1.79769e+308 <none>) 'cpu_operator_cost':Real(90300 <none> 0 1.79769e+308 <none>) 'cpu_tuple_cost':Real(90300 <none> 0 1.79769e+308 <none>) 'cursor_tuple_fraction':Real(90300 <none> 0 1 <none>) 'data_directory':String(90300 <none>) 'data_sync_retry':Bool(90400 <none>) 'DateStyle':String(90300 <none>) 'db_user_namespace':Bool(90300 <none>) 'deadlock_timeout':Integer(90300 <none> 1 2147483647 'ms') 'debug_pretty_print':Bool(90300 <none>) 'debug_print_parse':Bool(90300 <none>) 'debug_print_plan':Bool(90300 <none>) 'debug_print_rewritten':Bool(90300 <none>) 'default_statistics_target':Integer(90300 <none> 1 10000 <none>) 'default_table_access_method':String(120000 <none>) 'default_tablespace':String(90300 <none>) 'default_text_search_config':String(90300 <none>) 'default_toast_compression':Enum(140000 <none> ('pglz' 'lz4')) 'default_transaction_deferrable':Bool(90300 <none>) 'default_transaction_isolation':Enum(90300 <none> ('serializable' 'repeatable read' 'read committed' 'read uncommitted')) 'default_transaction_read_only':Bool(90300 <none>) 'default_with_oids':Bool(90300 120000) 'dynamic_library_path':String(90300 <none>) 'dynamic_shared_memory_type':(Enum(90400 120000 ('posix' 'sysv' 'mmap' 'none')) Enum(120000 <none> ('posix' 'sysv' 'mmap'))) 'effective_cache_size':Integer(90300 <none> 1 2147483647 '8kB') 'effective_io_concurrency':Integer(90300 <none> 0 1000 <none>) 'enable_async_append':Bool(140000 <none>) 'enable_bitmapscan':Bool(90300 <none>) 'enable_gathermerge':Bool(100000 <none>) 'enable_hashagg':Bool(90300 <none>) 'enable_hashjoin':Bool(90300 <none>) 'enable_incremental_sort':Bool(130000 <none>) 'enable_indexonlyscan':Bool(90300 <none>) 'enable_indexscan':Bool(90300 <none>) 'enable_material':Bool(90300 <none>) 'enable_mergejoin':Bool(90300 <none>) 'enable_nestloop':Bool(90300 <none>) 'enable_parallel_append':Bool(110000 <none>) 'enable_parallel_hash':Bool(110000 <none>) 'enable_partition_pruning':Bool(110000 <none>) 'enable_partitionwise_aggregate':Bool(110000 <none>) 'enable_partitionwise_join':Bool(110000 <none>) 'enable_seqscan':Bool(90300 <none>) 'enable_sort':Bool(90300 <none>) 'enable_tidscan':Bool(90300 <none>) 'escape_string_warning':Bool(90300 <none>) 'event_source':String(90300 <none>) 'exit_on_error':Bool(90300 <none>) 'extension_destdir':String(140000 <none>) 'external_pid_file':String(90300 <none>) 'extra_float_digits':Integer(90300 <none> -15 3 <none>) 'force_parallel_mode':EnumBool(90600 <none> ('regress' )) 'from_collapse_limit':Integer(90300 <none> 1 2147483647 <none>) 'fsync':Bool(90300 <none>) 'full_page_writes':Bool(90300 <none>) 'geqo':Bool(90300 <none>) 'geqo_effort':Integer(90300 <none> 1 10 <none>) 'geqo_generations':Integer(90300 <none> 0 2147483647 <none>) 'geqo_pool_size':Integer(90300 <none> 0 2147483647 <none>) 'geqo_seed':Real(90300 <none> 0 1 <none>) 'geqo_selection_bias':Real(90300 <none> 1.5 2 <none>) 'geqo_threshold':Integer(90300 <none> 2 2147483647 <none>) 'gin_fuzzy_search_limit':Integer(90300 <none> 0 2147483647 <none>) 'gin_pending_list_limit':Integer(90500 <none> 64 2147483647 'kB') 'hash_mem_multiplier':Real(130000 <none> 1 1000 <none>) 'hba_file':String(90300 <none>) 'hot_standby':Bool(90300 <none>) 'hot_standby_feedback':Bool(90300 <none>) 'huge_pages':EnumBool(90400 <none> ('try' )) 'huge_page_size':Integer(140000 <none> '0' '2147483647' 'kB') 'ident_file':String(90300 <none>) 'idle_in_transaction_session_timeout':Integer(90600 <none> 0 2147483647 'ms') 'idle_session_timeout':Integer(140000 <none> '0' '2147483647' 'ms') 'ignore_checksum_failure':Bool(90300 <none>) 'ignore_invalid_pages':Bool(130000 <none>) 'ignore_system_indexes':Bool(90300 <none>) 'IntervalStyle':Enum(90300 <none> ('postgres' 'postgres_verbose' 'sql_standard' 'iso_8601')) 'jit':Bool(110000 <none>) 'jit_above_cost':Real(110000 <none> -1 1.79769e+308 <none>) 'jit_debugging_support':Bool(110000 <none>) 'jit_dump_bitcode':Bool(110000 <none>) 'jit_expressions':Bool(110000 <none>) 'jit_inline_above_cost':Real(110000 <none> -1 1.79769e+308 <none>) 'jit_optimize_above_cost':Real(110000 <none> -1 1.79769e+308 <none>) 'jit_profiling_support':Bool(110000 <none>) 'jit_provider':String(110000 <none>) 'jit_tuple_deforming':Bool(110000 <none>) 'join_collapse_limit':Integer(90300 <none> 1 2147483647 <none>) 'krb_caseins_users':Bool(90300 <none>) 'krb_server_keyfile':String(90300 <none>) 'krb_srvname':String(90300 90400) 'lc_messages':String(90300 <none>) 'lc_monetary':String(90300 <none>) 'lc_numeric':String(90300 <none>) 'lc_time':String(90300 <none>) 'listen_addresses':String(90300 <none>) 'local_preload_libraries':String(90300 <none>) 'lock_timeout':Integer(90300 <none> 0 2147483647 'ms') 'lo_compat_privileges':Bool(90300 <none>) 'log_autovacuum_min_duration':Integer(90300 <none> -1 2147483647 'ms') 'log_checkpoints':Bool(90300 <none>) 'log_connections':Bool(90300 <none>) 'log_destination':String(90300 <none>) 'log_directory':String(90300 <none>) 'log_disconnections':Bool(90300 <none>) 'log_duration':Bool(90300 <none>) 'log_error_verbosity':Enum(90300 <none> ('terse' 'default' 'verbose')) 'log_executor_stats':Bool(90300 <none>) 'log_file_mode':Integer(90300 <none> 0 511 <none>) 'log_filename':String(90300 <none>) 'logging_collector':Bool(90300 <none>) 'log_hostname':Bool(90300 <none>) 'logical_decoding_work_mem':Integer(130000 <none> 64 2147483647 'kB') 'log_line_prefix':String(90300 <none>) 'log_lock_waits':Bool(90300 <none>) 'log_min_duration_sample':Integer(130000 <none> -1 2147483647 'ms') 'log_min_duration_statement':Integer(90300 <none> -1 2147483647 'ms') 'log_min_error_statement':Enum(90300 <none> ('debug5' 'debug4' 'debug3' 'debug2' 'debug1' 'info' 'notice' 'warning' 'error' 'log' 'fatal' 'panic')) 'log_min_messages':Enum(90300 <none> ('debug5' 'debug4' 'debug3' 'debug2' 'debug1' 'info' 'notice' 'warning' 'error' 'log' 'fatal' 'panic')) 'log_parameter_max_length':Integer(130000 <none> -1 1073741823 'B') 'log_parameter_max_length_on_error':Integer(130000 <none> -1 1073741823 'B') 'log_parser_stats':Bool(90300 <none>) 'log_planner_stats':Bool(90300 <none>) 'log_recovery_conflict_waits':Bool(140000 <none>) 'log_replication_commands':Bool(90500 <none>) 'log_rotation_age':Integer(90300 <none> 0 35791394 'min') 'log_rotation_size':Integer(90300 <none> 0 2097151 'kB') 'log_statement':Enum(90300 <none> ('none' 'ddl' 'mod' 'all')) 'log_statement_sample_rate':Real(130000 <none> 0 1 <none>) 'log_statement_stats':Bool(90300 <none>) 'log_temp_files':Integer(90300 <none> -1 2147483647 'kB') 'log_timezone':String(90300 <none>) 'log_transaction_sample_rate':Real(120000 <none> 0 1 <none>) 'log_truncate_on_rotation':Bool(90300 <none>) 'maintenance_io_concurrency':Integer(130000 <none> 0 1000 <none>) 'maintenance_work_mem':Integer(90300 <none> 1024 2147483647 'kB') 'max_connections':(Integer(90300 90600 1 8388607 <none>) Integer(90600 <none> 1 262143 <none>)) 'max_files_per_process':(Integer(90300 130000 25 2147483647 <none>) Integer(130000 <none> 64 2147483647 <none>)) 'max_locks_per_transaction':Integer(90300 <none> 10 2147483647 <none>) 'max_logical_replication_workers':Integer(100000 <none> 0 262143 <none>) 'max_parallel_maintenance_workers':Integer(110000 <none> 0 1024 <none>) 'max_parallel_workers':Integer(100000 <none> 0 1024 <none>) 'max_parallel_workers_per_gather':Integer(90600 <none> 0 1024 <none>) 'max_pred_locks_per_page':Integer(100000 <none> 0 2147483647 <none>) 'max_pred_locks_per_relation':Integer(100000 <none> -2147483648 2147483647 <none>) 'max_pred_locks_per_transaction':Integer(90300 <none> 10 2147483647 <none>) 'max_prepared_transactions':(Integer(90300 90600 0 8388607 <none>) Integer(90600 <none> 0 262143 <none>)) 'max_replication_slots':(Integer(90400 90600 0 8388607 <none>) Integer(90600 <none> 0 262143 <none>)) 'max_slot_wal_keep_size':Integer(130000 <none> -1 2147483647 'MB') 'max_stack_depth':Integer(90300 <none> 100 2147483647 'kB') 'max_standby_archive_delay':Integer(90300 <none> -1 2147483647 'ms') 'max_standby_streaming_delay':Integer(90300 <none> -1 2147483647 'ms') 'max_sync_workers_per_subscription':Integer(100000 <none> 0 262143 <none>) 'max_wal_senders':(Integer(90300 90600 0 8388607 <none>) Integer(90600 <none> 0 262143 <none>)) 'max_wal_size':(Integer(90500 100000 2 2147483647 '16MB') Integer(100000 <none> 2 2147483647 'MB')) 'max_worker_processes':(Integer(90400 90600 1 8388607 <none>) Integer(90600 <none> 0 262143 <none>)) 'min_dynamic_shared_memory':Integer(140000 <none> '0' '2147483647' 'MB') 'min_parallel_index_scan_size':Integer(100000 <none> 0 715827882 '8kB') 'min_parallel_relation_size':Integer(90600 100000 0 715827882 '8kB') 'min_parallel_table_scan_size':Integer(100000 <none> 0 715827882 '8kB') 'min_wal_size':(Integer(90500 100000 2 2147483647 '16MB') Integer(100000 <none> 2 2147483647 'MB')) 'old_snapshot_threshold':Integer(90600 <none> -1 86400 'min') 'operator_precedence_warning':Bool(90500 140000) 'parallel_leader_participation':Bool(110000 <none>) 'parallel_setup_cost':Real(90600 <none> 0 1.79769e+308 <none>) 'parallel_tuple_cost':Real(90600 <none> 0 1.79769e+308 <none>) 'password_encryption':(Bool(90300 100000) Enum(100000 <none> ('md5' 'scram-sha-256'))) 'plan_cache_mode':Enum(120000 <none> ('auto' 'force_generic_plan' 'force_custom_plan')) 'port':Integer(90300 <none> 1 65535 <none>) 'post_auth_delay':Integer(90300 <none> 0 2147 's') 'pre_auth_delay':Integer(90300 <none> 0 60 's') 'quote_all_identifiers':Bool(90300 <none>) 'random_page_cost':Real(90300 <none> 0 1.79769e+308 <none>) 'recovery_init_sync_method':Enum(140000 <none> ('fsync' 'syncfs')) 'remove_temp_files_after_crash':Bool(140000 <none>) 'replacement_sort_tuples':Integer(90600 110000 0 2147483647 <none>) 'restart_after_crash':Bool(90300 <none>) 'row_security':Bool(90500 <none>) 'search_path':String(90300 <none>) 'seq_page_cost':Real(90300 <none> 0 1.79769e+308 <none>) 'session_preload_libraries':String(90400 <none>) 'session_replication_role':Enum(90300 <none> ('origin' 'replica' 'local')) 'shared_buffers':Integer(90300 <none> 16 1073741823 '8kB') 'shared_memory_type':Enum(120000 <none> ('sysv' 'mmap')) 'shared_preload_libraries':String(90300 <none>) 'sql_inheritance':Bool(90300 100000) 'ssl':Bool(90300 <none>) 'ssl_ca_file':String(90300 <none>) 'ssl_cert_file':String(90300 <none>) 'ssl_ciphers':String(90300 <none>) 'ssl_crl_dir':String(140000 <none>) 'ssl_crl_file':String(90300 <none>) 'ssl_dh_params_file':String(100000 <none>) 'ssl_ecdh_curve':String(90400 <none>) 'ssl_key_file':String(90300 <none>) 'ssl_max_protocol_version':Enum(120000 <none> ('' 'tlsv1' 'tlsv1.1' 'tlsv1.2' 'tlsv1.3')) 'ssl_min_protocol_version':Enum(120000 <none> ('tlsv1' 'tlsv1.1' 'tlsv1.2' 'tlsv1.3')) 'ssl_passphrase_command':String(110000 <none>) 'ssl_passphrase_command_supports_reload':Bool(110000 <none>) 'ssl_prefer_server_ciphers':Bool(90400 <none>) 'ssl_renegotiation_limit':Integer(90300 90500 0 2147483647 'kB') 'standard_conforming_strings':Bool(90300 <none>) 'statement_timeout':Integer(90300 <none> 0 2147483647 'ms') 'stats_temp_directory':String(90300 <none>) 'superuser_reserved_connections':(Integer(90300 90600 0 8388607 <none>) Integer(90600 <none> 0 262143 <none>)) 'synchronize_seqscans':Bool(90300 <none>) 'synchronous_commit':(EnumBool(90300 90600 ('local' 'remote_write')) EnumBool(90600 <none> ('local' 'remote_write' 'remote_apply'))) 'synchronous_standby_names':String(90300 <none>) 'syslog_facility':Enum(90300 <none> ('local0' 'local1' 'local2' 'local3' 'local4' 'local5' 'local6' 'local7')) 'syslog_ident':String(90300 <none>) 'syslog_sequence_numbers':Bool(90600 <none>) 'syslog_split_messages':Bool(90600 <none>) 'tcp_keepalives_count':Integer(90300 <none> 0 2147483647 <none>) 'tcp_keepalives_idle':Integer(90300 <none> 0 2147483647 's') 'tcp_keepalives_interval':Integer(90300 <none> 0 2147483647 's') 'tcp_user_timeout':Integer(120000 <none> 0 2147483647 'ms') 'temp_buffers':Integer(90300 <none> 100 1073741823 '8kB') 'temp_file_limit':Integer(90300 <none> -1 2147483647 'kB') 'temp_tablespaces':String(90300 <none>) 'TimeZone':String(90300 <none>) 'timezone_abbreviations':String(90300 <none>) 'trace_notify':Bool(90300 <none>) 'trace_recovery_messages':Enum(90300 <none> ('debug5' 'debug4' 'debug3' 'debug2' 'debug1' 'log' 'notice' 'warning' 'error')) 'trace_sort':Bool(90300 <none>) 'track_activities':Bool(90300 <none>) 'track_activity_query_size':(Integer(90300 110000 100 102400 <none>) Integer(110000 130000 100 102400 'B') Integer(130000 <none> 100 1048576 'B')) 'track_commit_timestamp':Bool(90500 <none>) 'track_counts':Bool(90300 <none>) 'track_functions':Enum(90300 <none> ('none' 'pl' 'all')) 'track_io_timing':Bool(90300 <none>) 'track_wal_io_timing':Bool(140000 <none>) 'transaction_deferrable':Bool(90300 <none>) 'transaction_isolation':Enum(90300 <none> ('serializable' 'repeatable read' 'read committed' 'read uncommitted')) 'transaction_read_only':Bool(90300 <none>) 'transform_null_equals':Bool(90300 <none>) 'unix_socket_directories':String(90300 <none>) 'unix_socket_group':String(90300 <none>) 'unix_socket_permissions':Integer(90300 <none> 0 511 <none>) 'update_process_title':Bool(90300 <none>) 'vacuum_cleanup_index_scale_factor':Real(110000 140000 0 1e+10 <none>) 'vacuum_cost_delay':(Integer(90300 120000 0 100 'ms') Real(120000 <none> 0 100 'ms')) 'vacuum_cost_limit':Integer(90300 <none> 1 10000 <none>) 'vacuum_cost_page_dirty':Integer(90300 <none> 0 10000 <none>) 'vacuum_cost_page_hit':Integer(90300 <none> 0 10000 <none>) 'vacuum_cost_page_miss':Integer(90300 <none> 0 10000 <none>) 'vacuum_defer_cleanup_age':Integer(90300 <none> 0 1000000 <none>) 'vacuum_failsafe_age':Integer(140000 <none> '0' '2100000000' <none>) 'vacuum_freeze_min_age':Integer(90300 <none> 0 1000000000 <none>) 'vacuum_freeze_table_age':Integer(90300 <none> 0 2000000000 <none>) 'vacuum_multixact_failsafe_age':Integer(140000 <none> '0' '2100000000' <none>) 'vacuum_multixact_freeze_min_age':Integer(90300 <none> 0 1000000000 <none>) 'vacuum_multixact_freeze_table_age':Integer(90300 <none> 0 2000000000 <none>) 'wal_buffers':Integer(90300 <none> -1 262143 '8kB') 'wal_compression':Bool(90500 <none>) 'wal_consistency_checking':String(100000 <none>) 'wal_init_zero':Bool(120000 <none>) 'wal_keep_segments':Integer(90300 130000 0 2147483647 <none>) 'wal_keep_size':Integer(130000 <none> 0 2147483647 'MB') 'wal_level':(Enum(90300 90400 ('minimal' 'archive' 'hot_standby')) Enum(90400 90600 ('minimal' 'archive' 'hot_standby' 'logical')) Enum(90600 <none> ('minimal' 'replica' 'logical'))) 'wal_log_hints':Bool(90400 <none>) 'wal_receiver_create_temp_slot':Bool(130000 <none>) 'wal_receiver_status_interval':Integer(90300 <none> 0 2147483 's') 'wal_receiver_timeout':Integer(90300 <none> 0 2147483647 'ms') 'wal_recycle':Bool(120000 <none>) 'wal_retrieve_retry_interval':Integer(90500 <none> 1 2147483647 'ms') 'wal_sender_timeout':Integer(90300 <none> 0 2147483647 'ms') 'wal_skip_threshold':Integer(130000 <none> 0 2147483647 'kB') 'wal_sync_method':Enum(90300 <none> ('fsync' 'fdatasync' 'open_sync' 'open_datasync')) 'wal_writer_delay':Integer(90300 <none> 1 10000 'ms') 'wal_writer_flush_after':Integer(90600 <none> 0 2147483647 '8kB') 'work_mem':Integer(90300 <none> 64 2147483647 'kB') 'xmlbinary':Enum(90300 <none> ('base64' 'hex')) 'xmloption':Enum(90300 <none> ('content' 'document')) 'zero_damaged_pages':Bool(90300 <none>)})<line_sep>recovery_parameters=CaseInsensitiveDict({'archive_cleanup_command':String(90300 <none>) 'pause_at_recovery_target':Bool(90300 90500) 'primary_conninfo':String(90300 <none>) 'primary_slot_name':String(90400 <none>) 'promote_trigger_file':String(120000 <none>) 'recovery_end_command':String(90300 <none>) 'recovery_min_apply_delay':Integer(90400 <none> 0 2147483647 'ms') 'recovery_target':Enum(90400 <none> ('immediate' '')) 'recovery_target_action':Enum(90500 <none> ('pause' 'promote' 'shutdown')) 'recovery_target_inclusive':Bool(90300 <none>) 'recovery_target_lsn':String(100000 <none>) 'recovery_target_name':String(90400 <none>) 'recovery_target_time':String(90300 <none>) 'recovery_target_timeline':String(90300 <none>) 'recovery_target_xid':String(90300 <none>) 'restore_command':String(90300 <none>) 'standby_mode':Bool(90300 120000) 'trigger_file':String(90300 120000)})<def_stmt>_transform_parameter_value validators version name value<block_start>validators=validators.get(name)<if_stmt>validators<block_start><for_stmt>validator (validators<if>isinstance(validators[0] tuple)<else>[validators])<block_start><if_stmt>version<ge>validator.version_from<and>(validator.version_till<is><none><or>version<l>validator.version_till)<block_start><return>validator.transform(name value)<block_end><block_end><block_end>logger.warning('Removing unexpected parameter=%s value=%s from the config' name value)<block_end><def_stmt>transform_postgresql_parameter_value version name value<block_start><if_stmt>'.'<in>name<block_start><return>value<block_end><if_stmt>name<in>recovery_parameters<block_start><return><none><block_end><return>_transform_parameter_value(parameters version name value)<block_end><def_stmt>transform_recovery_parameter_value version name value<block_start><return>_transform_parameter_value(recovery_parameters version name value)<block_end>
# -*- coding: utf-8 -*- """ Unit tests for the synchrofact detection app """<import_stmt>unittest<import_stmt>neo<import_stmt>numpy<as>np<import_stmt>quantities<as>pq<import_from_stmt>elephant utils<import_from_stmt>numpy.testing assert_array_equal<class_stmt>TestUtils(unittest.TestCase)<block_start><def_stmt>test_check_neo_consistency self<block_start>self.assertRaises(TypeError utils.check_neo_consistency [] object_type=neo.SpikeTrain)<line_sep>self.assertRaises(TypeError utils.check_neo_consistency [neo.SpikeTrain([1]<times>pq.s t_stop=2<times>pq.s) np.arange(2)] object_type=neo.SpikeTrain)<line_sep>self.assertRaises(ValueError utils.check_neo_consistency [neo.SpikeTrain([1]<times>pq.s t_start=1<times>pq.s t_stop=2<times>pq.s) neo.SpikeTrain([1]<times>pq.s t_start=0<times>pq.s t_stop=2<times>pq.s)] object_type=neo.SpikeTrain)<line_sep>self.assertRaises(ValueError utils.check_neo_consistency [neo.SpikeTrain([1]<times>pq.s t_stop=2<times>pq.s) neo.SpikeTrain([1]<times>pq.s t_stop=3<times>pq.s)] object_type=neo.SpikeTrain)<line_sep>self.assertRaises(ValueError utils.check_neo_consistency [neo.SpikeTrain([1]<times>pq.ms t_stop=2000<times>pq.ms) neo.SpikeTrain([1]<times>pq.s t_stop=2<times>pq.s)] object_type=neo.SpikeTrain)<block_end><def_stmt>test_round_binning_errors self<block_start><with_stmt>self.assertWarns(UserWarning)<block_start>n_bins=utils.round_binning_errors(0.999999 tolerance=1e-6)<line_sep>self.assertEqual(n_bins 1)<block_end>self.assertEqual(utils.round_binning_errors(0.999999 tolerance=<none>) 0)<line_sep>array=np.array([0 0.7 1-1e-8 1-1e-9])<with_stmt>self.assertWarns(UserWarning)<block_start>corrected=utils.round_binning_errors(array.copy())<line_sep>assert_array_equal(corrected [0 0 1 1])<block_end>assert_array_equal(utils.round_binning_errors(array.copy() tolerance=<none>) [0 0 0 0])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>pickle<import_stmt>pytest<import_from_stmt>spans daterangeset datetimerangeset floatrange floatrangeset intrange intrangeset strrangeset timedeltarangeset<def_stmt>test_empty <block_start><assert_stmt><not>intrangeset([])<block_end><def_stmt>test_non_empty <block_start><assert_stmt>intrangeset([intrange(1 5)])<block_end>@pytest.mark.parametrize("rangeset, span" [(intrangeset([intrange(1 5) intrange(10 15)]) intrange(1 15)) (intrangeset([]) intrange.empty()) ])<def_stmt>test_span rangeset span<block_start><assert_stmt>rangeset.span()<eq>span<block_end><def_stmt>test_iteration <block_start>ranges=[intrange(1 5) intrange(10 15)]<assert_stmt>list(intrangeset(ranges))<eq>ranges<block_end><def_stmt>test_copy <block_start>rset=intrangeset([intrange(1 5) intrange(10 15)])<line_sep>rcopy=rset.copy()<assert_stmt>list(rset)<eq>list(rcopy)<assert_stmt>rset._list<is><not>rcopy._list<block_end>@pytest.mark.parametrize("value" [intrange(1 5) intrange(5 10) intrange.empty() 1 5 ])<def_stmt>test_contains value<block_start><assert_stmt>intrangeset([intrange(1 10)]).contains(value)<block_end>@pytest.mark.parametrize("value" [intrange(5 15) 10 ])<def_stmt>test_not_contains value<block_start><assert_stmt><not>intrangeset([intrange(1 10)]).contains(value)<block_end>@pytest.mark.parametrize("rset" [intrangeset([]) intrangeset([intrange(1 5)]) ])<def_stmt>test_contains_empty rset<block_start><assert_stmt>rset.contains(intrange.empty())<block_end><def_stmt>test_contains_type_check <block_start><with_stmt>pytest.raises(ValueError)<block_start>intrangeset([]).contains(1.0)<block_end><with_stmt>pytest.raises(ValueError)<block_start>intrangeset([]).contains(floatrangeset([]))<block_end><block_end><def_stmt>test_add <block_start>rset=intrangeset([intrange(1 15)])<line_sep>rset.add(intrange(5 15))<assert_stmt>list(rset)<eq>[intrange(1 15)]<with_stmt>pytest.raises(TypeError)<block_start>rset.add(floatrange(1.0))<block_end><block_end><def_stmt>test_remove <block_start>rset=intrangeset([intrange(upper=1) intrange(5)])<line_sep>rset.remove(intrange(10 15))<assert_stmt>rset<eq>intrangeset([intrange(upper=1) intrange(5 10) intrange(15)])<line_sep># Test deletion of empty set temp=rset.copy()<line_sep>temp.remove(intrange.empty())<assert_stmt>rset<eq>temp<line_sep># Test total deletion rset.remove(intrange())<assert_stmt>rset<eq>intrangeset([])<line_sep># Test deletion on empty set temp=intrangeset([])<line_sep>temp.remove(intrange(1 5))<assert_stmt>temp<eq>intrangeset([])<with_stmt>pytest.raises(TypeError)<block_start>rset.remove(floatrange(1.0))<block_end><block_end><def_stmt>test_invert <block_start>rset=intrangeset([intrange(1 5) intrange(10 15)])<line_sep>rset_inv=intrangeset([intrange(upper=1) intrange(5 10) intrange(15)])<assert_stmt>~rset<eq>rset_inv<assert_stmt>rset<eq>~~rset<block_end><def_stmt>test_union <block_start>a=intrangeset([intrange(1 5) intrange(20 30)])<line_sep>b=intrangeset([intrange(5 10) intrange(20 100)])<line_sep>union=[intrange(1 10) intrange(20 100)]<assert_stmt>list(a.union(b))<eq>union<assert_stmt>list(a|b)<eq>union<with_stmt>pytest.raises(TypeError)<block_start>intrangeset([]).union(intrange())<block_end><assert_stmt>intrangeset([]).__or__(intrange())<is>NotImplemented<block_end><def_stmt>test_difference <block_start>a=intrangeset([intrange(1 5) intrange(20 30)])<line_sep>b=intrangeset([intrange(5 10) intrange(20 100)])<line_sep>difference=[intrange(1 5)]<assert_stmt>list(a.difference(b))<eq>difference<assert_stmt>list(a-b)<eq>difference<with_stmt>pytest.raises(TypeError)<block_start>intrangeset([]).difference(intrange())<block_end><assert_stmt>intrangeset([]).__sub__(intrange())<is>NotImplemented<block_end><def_stmt>test_intersection <block_start>a=intrangeset([intrange(1 5) intrange(20 30)])<line_sep>b=intrangeset([intrange(5 10) intrange(20 100)])<line_sep>intersection=[intrange(20 30)]<assert_stmt>list(a.intersection(b))<eq>intersection<assert_stmt>list(a&b)<eq>intersection<assert_stmt><not>intrangeset([intrange(1 5)]).intersection(intrangeset([intrange(5 10)]))<with_stmt>pytest.raises(TypeError)<block_start>intrangeset([]).intersection(intrange())<block_end><assert_stmt>intrangeset([]).__and__(intrange())<is>NotImplemented<block_end><def_stmt>test_values <block_start>values=intrangeset([intrange(1 5) intrange(10 15)]).values()<assert_stmt>list(values)<eq>list(range(1 5))+list(range(10 15))<block_end>@pytest.mark.parametrize("span, repr_str" [(intrangeset([]) "intrangeset([])") (intrangeset([intrange(1)]) "intrangeset([intrange(1)])") ])<def_stmt>test_repr span repr_str<block_start><assert_stmt>repr(span)<eq>repr_str<block_end><def_stmt>test_pickling <block_start>span=intrangeset([intrange(1 10) intrange(20 30)])<assert_stmt>span<eq>pickle.loads(pickle.dumps(span))<block_end><def_stmt>test_equal <block_start>range_a=intrange(1 5)<line_sep>range_b=intrange(10 15)<assert_stmt>intrangeset([range_a range_b])<eq>intrangeset([range_a range_b])<assert_stmt><not>intrangeset([range_a range_b])<eq>intrangeset([range_a])<assert_stmt><not>intrangeset([range_a])<eq>"foo"<block_end><def_stmt>test_less_than <block_start>range_a=intrange(1 5)<line_sep>range_b=intrange(10 15)<assert_stmt><not>intrangeset([range_a range_b])<l>intrangeset([range_a])<assert_stmt>intrangeset([range_a range_b])<l>intrangeset([range_b])<assert_stmt><not>intrangeset([range_a range_b])<le>intrangeset([range_a])<assert_stmt><not>intrangeset([range_a])<eq>"foo"<block_end><def_stmt>test_greater_than <block_start>range_a=intrange(1 5)<line_sep>range_b=intrange(10 15)<assert_stmt>intrangeset([range_a range_b])<g>intrangeset([range_a])<assert_stmt><not>intrangeset([range_a range_b])<g>intrangeset([range_b])<assert_stmt>intrangeset([range_b])<g>intrangeset([range_a range_b])<assert_stmt>intrangeset([range_a range_b])<ge>intrangeset([range_a])<block_end><def_stmt>test_bug3_intersection <block_start>""" `Bug #3 <https://github.com/runfalk/spans/issues/3>`_ """<line_sep>range_a=intrange(1 5)<line_sep>range_b=intrange(5 10)<line_sep>range_c=intrange(10 15)<line_sep>rangeset_a=intrangeset([range_a range_c])<line_sep>rangeset_b=intrangeset([range_b])<line_sep>rangeset_c=intrangeset([range_c])<line_sep>rangeset_empty=intrangeset([])<assert_stmt>rangeset_a.intersection(rangeset_b rangeset_c)<eq>rangeset_empty<block_end><def_stmt>test_bug4_empty_set_iteration <block_start>""" `Bug #4 <https://github.com/runfalk/spans/issues/4>`_ """<assert_stmt>list(intrangeset([]))<eq>[]<block_end>@pytest.mark.parametrize("cls" [daterangeset datetimerangeset intrangeset floatrangeset strrangeset timedeltarangeset ])<def_stmt>test_bug10_missing_slots_in_cls_hierarchy cls<block_start>""" `Bug #10 <https://github.com/runfalk/spans/issues/10`_ """<for_stmt>c cls.mro()<block_start><if_stmt>c<is>object<block_start><continue><block_end><assert_stmt>hasattr(c "__slots__")<block_end><block_end><def_stmt>test_bug14_pickle_not_working_for_rangesets <block_start>""" `Bug #14 <https://github.com/runfalk/spans/issues/14`_ """<line_sep># If __getstate__ returns a falsy value __setstate__ will not be called # when loading the value again, which is why this bug occured range_set=floatrangeset([])<line_sep>pickled=pickle.dumps(range_set protocol=1)<line_sep>pickle.loads(pickled)<assert_stmt>range_set<eq>pickle.loads(pickled)<line_sep># We need to ensure that code pickled using protocol 1 by spans versions # before 1.1.0 still loads old_data=(b"ccopy_reg\n_reconstructor\nq\x00(cspans.settypes\nfloatrangeset\n"<concat>b"q\x01c__builtin__\nobject\nq\x02Ntq\x03Rq\x04]q\x05h\x00(cspans."<concat>b"types\nfloatrange\nq\x06h\x02Ntq\x07Rq\x08}q\tX\x06\x00\x00\x00_"<concat>b"rangeq\nh\x00(cspans.types\n_internal_range\nq\x0bc__builtin__\n"<concat>b"tuple\nq\x0c(G?\xf0\x00\x00\x00\x00\x00\x00NI01\nI00\nI00\ntq\rtq"<concat>b"\x0eRq\x0fsbab.")<assert_stmt>pickle.loads(old_data)<eq>floatrangeset([floatrange(1.0)])<block_end>
<import_stmt>pybithumb<import_stmt>numpy<as>np<line_sep>df=pybithumb.get_ohlcv("BTC")<line_sep>df['range']=(df['high']-df['low'])<times>0.5<line_sep>df['target']=df['open']+df['range'].shift(1)<line_sep>df['ror']=np.where(df['high']<g>df['target'] df['close']/df['target'] 1)<line_sep>df.to_excel("trade.xlsx")<line_sep>
<import_from_stmt>math ceil floor<import_from_stmt>typing Dict List Optional<import_stmt>numpy<as>np<import_stmt>pytorch_lightning<as>pl<import_stmt>torch<import_from_stmt>torch.utils.data DataLoader Dataset<import_from_stmt>scvi REGISTRY_KEYS settings<import_from_stmt>scvi.data AnnDataManager<import_from_stmt>scvi.data._utils get_anndata_attribute<import_from_stmt>scvi.dataloaders._ann_dataloader AnnDataLoader BatchSampler<import_from_stmt>scvi.dataloaders._semi_dataloader SemiSupervisedDataLoader<import_from_stmt>scvi.model._utils parse_use_gpu_arg<def_stmt>validate_data_split n_samples:int train_size:float validation_size:Optional[float]=<none><block_start>""" Check data splitting parameters and return n_train and n_val. Parameters ---------- n_samples Number of samples to split train_size Size of train set. Need to be: 0 < train_size <= 1. validation_size Size of validation set. Need to be 0 <= validation_size < 1 """<if_stmt>train_size<g>1.0<or>train_size<le>0.0<block_start><raise>ValueError("Invalid train_size. Must be: 0 < train_size <= 1")<block_end>n_train=ceil(train_size<times>n_samples)<if_stmt>validation_size<is><none><block_start>n_val=n_samples-n_train<block_end><elif_stmt>validation_size<ge>1.0<or>validation_size<l>0.0<block_start><raise>ValueError("Invalid validation_size. Must be 0 <= validation_size < 1")<block_end><elif_stmt>(train_size+validation_size)<g>1<block_start><raise>ValueError("train_size + validation_size must be between 0 and 1")<block_end><else_stmt><block_start>n_val=floor(n_samples<times>validation_size)<block_end><if_stmt>n_train<eq>0<block_start><raise>ValueError("With n_samples={}, train_size={} and validation_size={}, the "<concat>"resulting train set will be empty. Adjust any of the "<concat>"aforementioned parameters.".format(n_samples train_size validation_size))<block_end><return>n_train n_val<block_end><class_stmt>DataSplitter(pl.LightningDataModule)<block_start>""" Creates data loaders ``train_set``, ``validation_set``, ``test_set``. If ``train_size + validation_set < 1`` then ``test_set`` is non-empty. Parameters ---------- adata_manager :class:`~scvi.data.AnnDataManager` object that has been created via ``setup_anndata``. train_size float, or None (default is 0.9) validation_size float, or None (default is None) use_gpu Use default GPU if available (if None or True), or index of GPU to use (if int), or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False). **kwargs Keyword args for data loader. If adata has labeled data, data loader class is :class:`~scvi.dataloaders.SemiSupervisedDataLoader`, else data loader class is :class:`~scvi.dataloaders.AnnDataLoader`. Examples -------- >>> adata = scvi.data.synthetic_iid() >>> scvi.model.SCVI.setup_anndata(adata) >>> adata_manager = scvi.model.SCVI(adata).adata_manager >>> splitter = DataSplitter(adata) >>> splitter.setup() >>> train_dl = splitter.train_dataloader() """<def_stmt>__init__ self adata_manager:AnnDataManager train_size:float=0.9 validation_size:Optional[float]=<none> use_gpu:bool=<false> **kwargs <block_start>super().__init__()<line_sep>self.adata_manager=adata_manager<line_sep>self.train_size=float(train_size)<line_sep>self.validation_size=validation_size<line_sep>self.data_loader_kwargs=kwargs<line_sep>self.use_gpu=use_gpu<line_sep>self.n_train,self.n_val=validate_data_split(self.adata_manager.adata.n_obs self.train_size self.validation_size)<block_end><def_stmt>setup self stage:Optional[str]=<none><block_start>"""Split indices in train/test/val sets."""<line_sep>n_train=self.n_train<line_sep>n_val=self.n_val<line_sep>random_state=np.random.RandomState(seed=settings.seed)<line_sep>permutation=random_state.permutation(self.adata_manager.adata.n_obs)<line_sep>self.val_idx=permutation[:n_val]<line_sep>self.train_idx=permutation[n_val:(n_val+n_train)]<line_sep>self.test_idx=permutation[(n_val+n_train):]<line_sep>gpus,self.device=parse_use_gpu_arg(self.use_gpu return_device=<true>)<line_sep>self.pin_memory=(<true><if>(settings.dl_pin_memory_gpu_training<and>gpus<ne>0)<else><false>)<block_end><def_stmt>train_dataloader self<block_start><return>AnnDataLoader(self.adata_manager indices=self.train_idx shuffle=<true> drop_last=3 pin_memory=self.pin_memory **self.data_loader_kwargs )<block_end><def_stmt>val_dataloader self<block_start><if_stmt>len(self.val_idx)<g>0<block_start><return>AnnDataLoader(self.adata_manager indices=self.val_idx shuffle=<false> drop_last=3 pin_memory=self.pin_memory **self.data_loader_kwargs )<block_end><else_stmt><block_start><pass><block_end><block_end><def_stmt>test_dataloader self<block_start><if_stmt>len(self.test_idx)<g>0<block_start><return>AnnDataLoader(self.adata_manager indices=self.test_idx shuffle=<false> drop_last=3 pin_memory=self.pin_memory **self.data_loader_kwargs )<block_end><else_stmt><block_start><pass><block_end><block_end><block_end><class_stmt>SemiSupervisedDataSplitter(pl.LightningDataModule)<block_start>""" Creates data loaders ``train_set``, ``validation_set``, ``test_set``. If ``train_size + validation_set < 1`` then ``test_set`` is non-empty. The ratio between labeled and unlabeled data in adata will be preserved in the train/test/val sets. Parameters ---------- adata_manager :class:`~scvi.data.AnnDataManager` object that has been created via ``setup_anndata``. train_size float, or None (default is 0.9) validation_size float, or None (default is None) n_samples_per_label Number of subsamples for each label class to sample per epoch use_gpu Use default GPU if available (if None or True), or index of GPU to use (if int), or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False). **kwargs Keyword args for data loader. If adata has labeled data, data loader class is :class:`~scvi.dataloaders.SemiSupervisedDataLoader`, else data loader class is :class:`~scvi.dataloaders.AnnDataLoader`. Examples -------- >>> adata = scvi.data.synthetic_iid() >>> scvi.model.SCVI.setup_anndata(adata, labels_key="labels") >>> adata_manager = scvi.model.SCVI(adata).adata_manager >>> unknown_label = 'label_0' >>> splitter = SemiSupervisedDataSplitter(adata, unknown_label) >>> splitter.setup() >>> train_dl = splitter.train_dataloader() """<def_stmt>__init__ self adata_manager:AnnDataManager train_size:float=0.9 validation_size:Optional[float]=<none> n_samples_per_label:Optional[int]=<none> use_gpu:bool=<false> **kwargs <block_start>super().__init__()<line_sep>self.adata_manager=adata_manager<line_sep>self.train_size=float(train_size)<line_sep>self.validation_size=validation_size<line_sep>self.data_loader_kwargs=kwargs<line_sep>self.n_samples_per_label=n_samples_per_label<line_sep>labels_state_registry=adata_manager.get_state_registry(REGISTRY_KEYS.LABELS_KEY)<line_sep>labels=get_anndata_attribute(adata_manager.adata adata_manager.data_registry.labels.attr_name labels_state_registry.original_key ).ravel()<line_sep>self.unlabeled_category=labels_state_registry.unlabeled_category<line_sep>self._unlabeled_indices=np.argwhere(labels<eq>self.unlabeled_category).ravel()<line_sep>self._labeled_indices=np.argwhere(labels<ne>self.unlabeled_category).ravel()<line_sep>self.data_loader_kwargs=kwargs<line_sep>self.use_gpu=use_gpu<block_end><def_stmt>setup self stage:Optional[str]=<none><block_start>"""Split indices in train/test/val sets."""<line_sep>n_labeled_idx=len(self._labeled_indices)<line_sep>n_unlabeled_idx=len(self._unlabeled_indices)<if_stmt>n_labeled_idx<ne>0<block_start>n_labeled_train,n_labeled_val=validate_data_split(n_labeled_idx self.train_size self.validation_size)<line_sep>rs=np.random.RandomState(seed=settings.seed)<line_sep>labeled_permutation=rs.choice(self._labeled_indices len(self._labeled_indices) replace=<false>)<line_sep>labeled_idx_val=labeled_permutation[:n_labeled_val]<line_sep>labeled_idx_train=labeled_permutation[n_labeled_val:(n_labeled_val+n_labeled_train)]<line_sep>labeled_idx_test=labeled_permutation[(n_labeled_val+n_labeled_train):]<block_end><else_stmt><block_start>labeled_idx_test=[]<line_sep>labeled_idx_train=[]<line_sep>labeled_idx_val=[]<block_end><if_stmt>n_unlabeled_idx<ne>0<block_start>n_unlabeled_train,n_unlabeled_val=validate_data_split(n_unlabeled_idx self.train_size self.validation_size)<line_sep>rs=np.random.RandomState(seed=settings.seed)<line_sep>unlabeled_permutation=rs.choice(self._unlabeled_indices len(self._unlabeled_indices))<line_sep>unlabeled_idx_val=unlabeled_permutation[:n_unlabeled_val]<line_sep>unlabeled_idx_train=unlabeled_permutation[n_unlabeled_val:(n_unlabeled_val+n_unlabeled_train)]<line_sep>unlabeled_idx_test=unlabeled_permutation[(n_unlabeled_val+n_unlabeled_train):]<block_end><else_stmt><block_start>unlabeled_idx_train=[]<line_sep>unlabeled_idx_val=[]<line_sep>unlabeled_idx_test=[]<block_end>indices_train=np.concatenate((labeled_idx_train unlabeled_idx_train))<line_sep>indices_val=np.concatenate((labeled_idx_val unlabeled_idx_val))<line_sep>indices_test=np.concatenate((labeled_idx_test unlabeled_idx_test))<line_sep>self.train_idx=indices_train.astype(int)<line_sep>self.val_idx=indices_val.astype(int)<line_sep>self.test_idx=indices_test.astype(int)<line_sep>gpus=parse_use_gpu_arg(self.use_gpu return_device=<false>)<line_sep>self.pin_memory=(<true><if>(settings.dl_pin_memory_gpu_training<and>gpus<ne>0)<else><false>)<if_stmt>len(self._labeled_indices)<ne>0<block_start>self.data_loader_class=SemiSupervisedDataLoader<line_sep>dl_kwargs={"n_samples_per_label":self.n_samples_per_label }<block_end><else_stmt><block_start>self.data_loader_class=AnnDataLoader<line_sep>dl_kwargs={}<block_end>self.data_loader_kwargs.update(dl_kwargs)<block_end><def_stmt>train_dataloader self<block_start><return>self.data_loader_class(self.adata_manager indices=self.train_idx shuffle=<true> drop_last=3 pin_memory=self.pin_memory **self.data_loader_kwargs )<block_end><def_stmt>val_dataloader self<block_start><if_stmt>len(self.val_idx)<g>0<block_start><return>self.data_loader_class(self.adata_manager indices=self.val_idx shuffle=<false> drop_last=3 pin_memory=self.pin_memory **self.data_loader_kwargs )<block_end><else_stmt><block_start><pass><block_end><block_end><def_stmt>test_dataloader self<block_start><if_stmt>len(self.test_idx)<g>0<block_start><return>self.data_loader_class(self.adata_manager indices=self.test_idx shuffle=<false> drop_last=3 pin_memory=self.pin_memory **self.data_loader_kwargs )<block_end><else_stmt><block_start><pass><block_end><block_end><block_end><class_stmt>DeviceBackedDataSplitter(DataSplitter)<block_start>""" Creates loaders for data that is already on device, e.g., GPU. If ``train_size + validation_set < 1`` then ``test_set`` is non-empty. Parameters ---------- adata_manager :class:`~scvi.data.AnnDataManager` object that has been created via ``setup_anndata``. train_size float, or None (default is 0.9) validation_size float, or None (default is None) use_gpu Use default GPU if available (if None or True), or index of GPU to use (if int), or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False). shuffle if ``True``, shuffles indices before sampling for training set shuffle_test_val Shuffle test and validation indices. batch_size batch size of each iteration. If `None`, do not minibatch Examples -------- >>> adata = scvi.data.synthetic_iid() >>> scvi.model.SCVI.setup_anndata(adata) >>> adata_manager = scvi.model.SCVI(adata).adata_manager >>> splitter = DeviceBackedDataSplitter(adata) >>> splitter.setup() >>> train_dl = splitter.train_dataloader() """<def_stmt>__init__ self adata_manager:AnnDataManager train_size:float=1.0 validation_size:Optional[float]=<none> use_gpu:bool=<false> shuffle:bool=<false> shuffle_test_val:bool=<false> batch_size:Optional[int]=<none> **kwargs <block_start>super().__init__(adata_manager=adata_manager train_size=train_size validation_size=validation_size use_gpu=use_gpu **kwargs )<line_sep>self.batch_size=batch_size<line_sep>self.shuffle=shuffle<line_sep>self.shuffle_test_val=shuffle_test_val<block_end><def_stmt>setup self stage:Optional[str]=<none><block_start>super().setup()<if_stmt>self.shuffle<is><false><block_start>self.train_idx=np.sort(self.train_idx)<line_sep>self.val_idx=(np.sort(self.val_idx)<if>len(self.val_idx)<g>0<else>self.val_idx)<line_sep>self.test_idx=(np.sort(self.test_idx)<if>len(self.test_idx)<g>0<else>self.test_idx)<block_end>self.train_tensor_dict=self._get_tensor_dict(self.train_idx device=self.device)<line_sep>self.test_tensor_dict=self._get_tensor_dict(self.test_idx device=self.device)<line_sep>self.val_tensor_dict=self._get_tensor_dict(self.val_idx device=self.device)<block_end><def_stmt>_get_tensor_dict self indices device<block_start><if_stmt>len(indices)<is><not><none><and>len(indices)<g>0<block_start>dl=AnnDataLoader(self.adata_manager indices=indices batch_size=len(indices) shuffle=<false> pin_memory=self.pin_memory **self.data_loader_kwargs )<line_sep># will only have one minibatch <for_stmt>batch dl<block_start>tensor_dict=batch<block_end><for_stmt>k,v tensor_dict.items()<block_start>tensor_dict[k]=v.to(device)<block_end><return>tensor_dict<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>_make_dataloader self tensor_dict:Dict[str torch.Tensor] shuffle<block_start><if_stmt>tensor_dict<is><none><block_start><return><none><block_end>dataset=_DeviceBackedDataset(tensor_dict)<line_sep>indices=np.arange(len(dataset))<line_sep>bs=self.batch_size<if>self.batch_size<is><not><none><else>len(indices)<line_sep>sampler=BatchSampler(shuffle=shuffle indices=indices batch_size=bs)<line_sep><return>DataLoader(dataset sampler=sampler batch_size=<none>)<block_end><def_stmt>train_dataloader self<block_start><return>self._make_dataloader(self.train_tensor_dict self.shuffle)<block_end><def_stmt>test_dataloader self<block_start><return>self._make_dataloader(self.test_tensor_dict self.shuffle_test_val)<block_end><def_stmt>val_dataloader self<block_start><return>self._make_dataloader(self.val_tensor_dict self.shuffle_test_val)<block_end><block_end><class_stmt>_DeviceBackedDataset(Dataset)<block_start><def_stmt>__init__ self tensor_dict:Dict[str torch.Tensor]<block_start>self.data=tensor_dict<block_end><def_stmt>__getitem__ self idx:List[int]<arrow>Dict[str torch.Tensor]<block_start>return_dict={}<for_stmt>key,value self.data.items()<block_start>return_dict[key]=value[idx]<block_end><return>return_dict<block_end><def_stmt>__len__ self<block_start><for_stmt>_,value self.data.items()<block_start><return>len(value)<block_end><block_end><block_end>
<import_stmt>os<import_stmt>pathlib<import_stmt>subprocess<import_stmt>sys<import_stmt>pytest<line_sep>@pytest.fixture()<def_stmt>attack_domain <block_start><return>"enterprise-attack"<block_end>@pytest.fixture()<def_stmt>dir_location <block_start>cwd=os.getcwd()<if_stmt>"tests"<in>cwd<block_start><return>os.path.dirname(cwd)<block_end><else_stmt><block_start><return>cwd<block_end><block_end>@pytest.mark.parametrize("attack_version" ["v8.2" "v9.0"])@pytest.mark.parametrize("rev" ["nist800-53-r4" "nist800-53-r5"])<def_stmt>test_list_mappings dir_location attack_domain attack_version rev<block_start>"""Tests list_mappings.py with both framework entries"""<line_sep>rx_controls=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "stix" f"{rev}-controls.json")<line_sep>rx_mappings=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "stix" f"{rev}-mappings.json")<line_sep>output_location=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev f"{rev}-mappings.xlsx")<line_sep>script_location=f"{dir_location}/src/list_mappings.py"<line_sep>child_process=subprocess.Popen([sys.executable script_location "-controls" str(rx_controls) "-mappings" str(rx_mappings) "-domain" attack_domain "-version" attack_version "-output" str(output_location) ])<line_sep>child_process.wait(timeout=240)<assert_stmt>child_process.returncode<eq>0<block_end>@pytest.mark.parametrize("attack_version" ["v8.2" "v9.0"])@pytest.mark.parametrize("rev" ["nist800-53-r4" "nist800-53-r5"])<def_stmt>test_mappings_to_heatmaps dir_location attack_domain attack_version rev<block_start>"""Tests mappings_to_heatmaps.py with both framework entries"""<line_sep>rx_controls=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "stix" f"{rev}-controls.json")<line_sep>rx_mappings=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "stix" f"{rev}-mappings.json")<line_sep>output_location=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "layers")<line_sep>script_location=f"{dir_location}/src/mappings_to_heatmaps.py"<line_sep>child_process=subprocess.Popen([sys.executable script_location "-framework" rev "-controls" str(rx_controls) "-mappings" str(rx_mappings) "-domain" attack_domain "-version" attack_version "-output" str(output_location) "--clear" "--build-directory" ])<line_sep>child_process.wait(timeout=90)<assert_stmt>child_process.returncode<eq>0<block_end>@pytest.mark.parametrize("attack_version" ["v8.2" "v9.0"])@pytest.mark.parametrize("rev" ["nist800-53-r4" "nist800-53-r5"])<def_stmt>test_substitute dir_location attack_domain attack_version rev<block_start>"""Tests substitute.py with both frameworks"""<line_sep>rx_controls=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "stix" f"{rev}-controls.json")<line_sep>rx_mappings=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "stix" f"{rev}-mappings.json")<line_sep>output_location=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "stix" f"{rev}-enterprise-attack.json")<line_sep>script_location=f"{dir_location}/src/substitute.py"<line_sep>child_process=subprocess.Popen([sys.executable script_location "-controls" str(rx_controls) "-mappings" str(rx_mappings) "-domain" attack_domain "-version" attack_version "-output" str(output_location) "--allow-unmapped" ])<line_sep>child_process.wait(timeout=90)<assert_stmt>child_process.returncode<eq>0<block_end><def_stmt>test_make dir_location<block_start>"""Test the main make.py script"""<line_sep>script_location=f"{dir_location}/src/make.py"<line_sep>child_process=subprocess.Popen([sys.executable script_location ])<line_sep>child_process.wait(timeout=1080)<assert_stmt>child_process.returncode<eq>0<block_end>@pytest.mark.parametrize("attack_version" ["v8.2" "v9.0"])@pytest.mark.parametrize("rev" ["nist800-53-r4" "nist800-53-r5"])<def_stmt>test_parse_framework dir_location attack_version rev<block_start>"""Tests parse.py with both frameworks"""<line_sep>rx_input_controls=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "input" f"{rev}-controls.tsv")<line_sep>rx_input_mappings=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "input" f"{rev}-mappings.tsv")<line_sep>rx_output_controls=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "stix" f"{rev}-controls.json")<line_sep>rx_output_mappings=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "stix" f"{rev}-mappings.json")<line_sep>config_location=pathlib.Path(dir_location "frameworks" f"ATT&CK-{attack_version}" rev "input" "config.json")<line_sep>script_location=f"{dir_location}/frameworks/ATT&CK-{attack_version}/{rev}/parse.py"<line_sep>child_process=subprocess.Popen([sys.executable script_location "-input-controls" str(rx_input_controls) "-input-mappings" str(rx_input_mappings) "-output-controls" str(rx_output_controls) "-output-mappings" str(rx_output_mappings) "-config-location" str(config_location) ])<line_sep>child_process.wait(timeout=90)<assert_stmt>child_process.returncode<eq>0<block_end>
<import_stmt>numpy<as>np<import_stmt>scipy.io<as>sio<import_stmt>torch<import_stmt>torch.nn.init<import_from_stmt>misc.l2net.l2net_model L2Net<line_sep>eps=1e-10<def_stmt>check_ported l2net_model test_patch img_mean<block_start>test_patch=test_patch.transpose(3 2 0 1)-img_mean<line_sep>desc=l2net_model(torch.from_numpy(test_patch))<line_sep>print(desc)<line_sep><return>desc<block_end><if_stmt>__name__<eq>'__main__'<block_start>path_to_l2net_weights='descriptors/sfm-evaluation-benchmarking/third_party/l2net/matlab/L2Net-LIB+.mat'<line_sep>l2net_weights=sio.loadmat(path_to_l2net_weights)<line_sep>l2net_model=L2Net()<line_sep>l2net_model.eval()<line_sep>new_state_dict=l2net_model.state_dict().copy()<line_sep>conv_layers,bn_layers={} {}<line_sep>all_layer_weights=l2net_weights['net']['layers'][0][0][0]<line_sep>img_mean=l2net_weights['pixMean']<line_sep>conv_layers_to_track,bn_layers_to_track=[0 3 6 9 12 15 18] [1 4 7 10 13 16 19]<line_sep>conv_i,bn_i=0 0<for_stmt>layer all_layer_weights<block_start><if_stmt>'weights'<not><in>layer.dtype.names<block_start><continue><block_end>layer_name=layer[0][0][0][0]<line_sep>layer_value=layer['weights'][0][0][0]<if_stmt>layer_name<eq>'conv'<block_start>conv_layers[conv_layers_to_track[conv_i]]=layer_value<line_sep>conv_i<augadd>1<block_end><elif_stmt>layer_name<eq>'bnormPair'<block_start>bn_layers[bn_layers_to_track[bn_i]]=layer_value<line_sep>bn_i<augadd>1<block_end><block_end><for_stmt>key,value new_state_dict.items()<block_start>layer_number=int(key.split('.')[1])<if_stmt>layer_number<in>conv_layers.keys()<block_start><if_stmt>'weight'<in>key<block_start>new_state_dict[key]=torch.from_numpy(conv_layers[layer_number][0].transpose((3 2 0 1)))<block_end><elif_stmt>'bias'<in>key<block_start>new_state_dict[key]=torch.from_numpy(conv_layers[layer_number][1]).squeeze()<block_end><block_end><elif_stmt>layer_number<in>bn_layers.keys()<block_start><if_stmt>'running_mean'<in>key<block_start>new_state_dict[key]=torch.from_numpy(np.array([x[0]<for>x bn_layers[layer_number][2]])).squeeze()<block_end><elif_stmt>'running_var'<in>key<block_start>new_state_dict[key]=torch.from_numpy(np.array([x[1]<for>x bn_layers[layer_number][2]])<power>2-eps).squeeze()<block_end><elif_stmt>'weight'<in>key<block_start>new_state_dict[key]=torch.from_numpy(np.ones(value.size()[0])).squeeze()<block_end><block_end><else_stmt><block_start><continue><block_end><block_end>l2net_model.load_state_dict(new_state_dict)<line_sep>l2net_model.eval()<line_sep>torch.save(l2net_model.state_dict() 'l2net_ported_weights_lib+.pth')<line_sep># compare desc on test patch with matlab implementation # test_patch_batch = sio.loadmat('test_batch_img.mat')['testPatch'] # check_ported(l2net_model, test_patch_batch, img_mean) # # test_patch_one = sio.loadmat('test_one.mat')['testPatch'] # check_ported(l2net_model, np.expand_dims(np.expand_dims(test_patch_one, axis=2),axis=2), img_mean) <block_end>
# -*- coding: utf-8 -*- <class_stmt>Modifier(object)<block_start><def_stmt>__init__ self<block_start>self.is_disable=<false><line_sep>self.is_show_only=<false><line_sep>self.is_debug=<false><line_sep>self.is_transparent=<false><block_end><def_stmt>turn_on_disable self<block_start>self.is_disable=<true><block_end><def_stmt>turn_off_disable self<block_start>self.is_disable=<false><block_end><def_stmt>turn_on_show_only self<block_start>self.is_show_only=<true><block_end><def_stmt>turn_off_show_only self<block_start>self.is_show_only=<false><block_end><def_stmt>turn_on_debug self<block_start>self.is_debug=<true><block_end><def_stmt>turn_off_debug self<block_start>self.is_debug=<false><block_end><def_stmt>turn_on_transparent self<block_start>self.is_transparent=<true><block_end><def_stmt>turn_off_transparent self<block_start>self.is_transparent=<false><block_end><def_stmt>get_prefix self<block_start>prefix=''<if_stmt>self.is_disable<block_start>prefix<augadd>'*'<block_end><if_stmt>self.is_show_only<block_start>prefix<augadd>'!'<block_end><if_stmt>self.is_debug<block_start>prefix<augadd>'#'<block_end><if_stmt>self.is_transparent<block_start>prefix<augadd>'%'<block_end><return>prefix<block_end><block_end><class_stmt>ModifierMixin(object)<block_start><def_stmt>__init__ self<block_start>super(ModifierMixin self).__init__()<line_sep>self.mod=Modifier()<block_end><def_stmt>turn_on_disable self<block_start>self.mod.is_disable=<true><line_sep><return>self<block_end><def_stmt>turn_off_disable self<block_start>self.mod.is_disable=<false><line_sep><return>self<block_end><def_stmt>turn_on_show_only self<block_start>self.mod.is_show_only=<true><line_sep><return>self<block_end><def_stmt>turn_off_show_only self<block_start>self.mod.is_show_only=<false><line_sep><return>self<block_end><def_stmt>turn_on_debug self<block_start>self.mod.is_debug=<true><line_sep><return>self<block_end><def_stmt>turn_off_debug self<block_start>self.mod.is_debug=<false><line_sep><return>self<block_end><def_stmt>turn_on_transparent self<block_start>self.mod.is_transparent=<true><line_sep><return>self<block_end><def_stmt>turn_off_transparent self<block_start>self.mod.is_transparent=<false><line_sep><return>self<block_end># Shorthand <def_stmt>disable self<block_start><return>self.turn_on_disable()<block_end><def_stmt>show_only self<block_start><return>self.turn_on_show_only()<block_end><def_stmt>debug self<block_start><return>self.turn_on_debug()<block_end><def_stmt>transparent self<block_start><return>self.turn_on_transparent()<block_end><block_end>
"""Event plugin initializer."""<import_from_stmt>. scan<line_sep>REQUIRED_OS="Windows"<line_sep>REQUIRED_ADMIN=<true><line_sep>entrypoint=scan.run<line_sep>
# This file is covered by the LICENSE file in the root of this project. <import_stmt>torch<import_from_stmt>torch.utils.data Dataset<import_stmt>torchvision.transforms<as>transforms<import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_stmt>random<import_stmt>torchvision.transforms.functional<as>TF<import_stmt>cv2<line_sep>''' means(rgb): [0.47037394 0.44669544 0.40731883] stds(rgb): [0.27876515 0.27429348 0.28861644] Num of pixels: 20354514743 Frequency: [1.03595582e-01 8.69684146e-02 1.56773018e-03 5.82611153e-03 4.81058803e-03 3.16223407e-03 7.10428246e-03 7.05528129e-03 5.76380800e-03 2.61561927e-03 6.43652977e-04 1.06484818e-03 1.07875453e-03 6.98690299e-04 3.30846713e-03 1.65507630e-03 6.01311471e-03 4.48253614e-03 3.37169861e-03 1.84147500e-03 2.59677750e-03 4.60398424e-03 1.72642509e-03 3.25079452e-03 3.17922092e-03 9.28004241e-04 3.00187903e-03 1.02122941e-03 7.74191387e-04 3.01174387e-03 3.52895713e-04 3.00067384e-04 3.31869518e-04 1.49010479e-04 6.79291802e-04 1.38228842e-04 1.80938973e-04 5.82766927e-04 1.16591352e-03 5.55644934e-04 1.83246594e-03 9.64564533e-04 2.68603416e-03 3.53508157e-04 4.86584039e-04 3.04124273e-04 6.10763335e-03 2.51745687e-03 1.19416608e-03 3.49547734e-03 1.43915212e-03 1.98661498e-03 8.55161482e-04 1.22814719e-03 8.29490195e-03 2.09027995e-03 3.95652007e-03 6.19389573e-03 5.21590882e-03 2.07798941e-03 9.07128538e-03 2.41144264e-02 3.08866224e-03 3.29269545e-03 3.44996375e-03 2.17966680e-04 5.69893272e-04 1.33344903e-03 1.06328032e-03 9.01832455e-04 3.21914572e-03 5.66035602e-05 1.64377842e-03 3.49153060e-03 2.07557215e-03 1.33823711e-03 1.73024557e-03 3.61442810e-04 3.16915293e-03 3.26746183e-05 1.69843597e-04 2.24706580e-03 1.08037029e-03 1.15556594e-03 2.19738081e-03 2.83867548e-03 4.58330597e-03 6.13085488e-03 5.53305060e-03 1.95223391e-03 1.24932391e-03 2.50343202e-03 4.28674371e-03 1.36921250e-03 3.32965639e-03 1.77840698e-03 5.10465080e-04 2.04364749e-03 1.78148449e-02 2.76140555e-03 5.15718043e-03 2.26026582e-02 1.41155564e-03 9.53189813e-03 2.24532113e-02 2.74807151e-03 1.89481003e-02 1.06579298e-03 7.92184791e-04 7.43852368e-04 5.30637362e-03 2.23005552e-03 8.45400979e-03 6.19471526e-03 4.12920107e-03 1.70490166e-03 9.71786370e-03 6.47590623e-02 1.39815155e-02 8.92733677e-03 8.67340285e-02 8.37997595e-03 1.41617307e-02 1.35923816e-02 2.34834311e-02 7.09260706e-03 4.15174260e-02 1.33029928e-02 4.80344372e-03 7.12591456e-03 3.01482646e-02 4.35955532e-03 6.39422134e-02 6.29973913e-03] ******************************************************************************** Log strategy Weights: [3.30289772 3.44347075 4.45638856 4.38993873 4.40558454 4.43124634 4.3704214 4.37116607 4.39089505 4.43982977 4.47110532 4.46438403 4.4641625 4.47022578 4.42895632 4.45500307 4.38707112 4.4106653 4.42796692 4.45204959 4.4401263 4.40878283 4.45387203 4.42985916 4.43098019 4.46656527 4.43376055 4.46507904 4.46901983 4.43360579 4.47575828 4.47660484 4.47609518 4.47902746 4.47053574 4.47920049 4.47851516 4.47207879 4.4627746 4.47251257 4.45219224 4.46598228 4.43872198 4.47574847 4.47361754 4.47653982 4.3856233 4.44137513 4.46232492 4.42603155 4.45842983 4.44975287 4.46772733 4.46178419 4.35241406 4.44811407 4.41883936 4.38430288 4.39932503 4.44830829 4.34076057 4.12794364 4.43239948 4.42920318 4.42674297 4.4779212 4.47228467 4.4601095 4.464409 4.46698271 4.43035479 4.4805109 4.45518222 4.42609323 4.44834649 4.46003338 4.45381149 4.47562135 4.43113793 4.48089522 4.47869317 4.44563805 4.46413676 4.46293932 4.44642236 4.43632268 4.40910322 4.38526776 4.3944411 4.45029668 4.46144729 4.44159602 4.41370389 4.45954104 4.42862471 4.45304841 4.47323538 4.4488511 4.21416693 4.43753689 4.40023077 4.14827356 4.45886822 4.33387961 4.15029549 4.4377465 4.19835921 4.46436897 4.46873253 4.46950434 4.39793066 4.44590653 4.35002018 4.38429034 4.41615226 4.45421316 4.33110842 3.65425719 4.26863963 4.34291598 3.44555095 4.3511337 4.26604345 4.27425755 4.13640191 4.37059881 3.90903173 4.27844617 4.40569505 4.37009275 4.04897801 4.41257335 3.66257514 4.38268395] Linear strategy Weights: [0.89640442 0.91303159 0.99843227 0.99417389 0.99518941 0.99683777 0.99289572 0.99294472 0.99423619 0.99738438 0.99935635 0.99893515 0.99892125 0.99930131 0.99669153 0.99834492 0.99398689 0.99551746 0.9966283 0.99815853 0.99740322 0.99539602 0.99827357 0.99674921 0.99682078 0.999072 0.99699812 0.99897877 0.99922581 0.99698826 0.9996471 0.99969993 0.99966813 0.99985099 0.99932071 0.99986177 0.99981906 0.99941723 0.99883409 0.99944436 0.99816753 0.99903544 0.99731397 0.99964649 0.99951342 0.99969588 0.99389237 0.99748254 0.99880583 0.99650452 0.99856085 0.99801339 0.99914484 0.99877185 0.9917051 0.99790972 0.99604348 0.9938061 0.99478409 0.99792201 0.99092871 0.97588557 0.99691134 0.9967073 0.99655004 0.99978203 0.99943011 0.99866655 0.99893672 0.99909817 0.99678085 0.9999434 0.99835622 0.99650847 0.99792443 0.99866176 0.99826975 0.99963856 0.99683085 0.99996733 0.99983016 0.99775293 0.99891963 0.99884443 0.99780262 0.99716132 0.99541669 0.99386915 0.99446695 0.99804777 0.99875068 0.99749657 0.99571326 0.99863079 0.99667034 0.99822159 0.99948953 0.99795635 0.98218516 0.99723859 0.99484282 0.97739734 0.99858844 0.9904681 0.97754679 0.99725193 0.9810519 0.99893421 0.99920782 0.99925615 0.99469363 0.99776994 0.99154599 0.99380528 0.9958708 0.9982951 0.99028214 0.93524094 0.98601848 0.99107266 0.91326597 0.99162002 0.98583827 0.98640762 0.97651657 0.99290739 0.95848257 0.98669701 0.99519656 0.99287409 0.96985174 0.99564044 0.93605779 0.99370026] Squared strategy Weights: [0.80354088 0.83362668 0.996867 0.98838172 0.99040197 0.99368553 0.98584191 0.98593921 0.98850561 0.9947756 0.99871311 0.99787144 0.99784365 0.99860311 0.99339401 0.99669259 0.98800993 0.99105502 0.99326797 0.99632044 0.99481319 0.99081323 0.99655013 0.99350898 0.99365167 0.99814485 0.99400525 0.99795858 0.99845222 0.99398558 0.99929433 0.99939996 0.99933637 0.999702 0.99864188 0.99972356 0.99963815 0.99883481 0.99766953 0.99888902 0.99633843 0.9980718 0.99463515 0.99929311 0.99902707 0.99939184 0.98782204 0.99497142 0.99761309 0.99302126 0.99712377 0.99603072 0.99829041 0.99754521 0.983479 0.99582381 0.99210261 0.98765057 0.98959539 0.99584834 0.98193972 0.95235265 0.99383222 0.99342545 0.99311197 0.99956411 0.99886054 0.99733488 0.99787457 0.99819715 0.99357207 0.9998868 0.99671515 0.99302913 0.99585316 0.99732532 0.9965425 0.99927725 0.99367174 0.99993465 0.99966034 0.99551092 0.99784043 0.9976902 0.99561007 0.99433071 0.99085439 0.98777588 0.98896451 0.99609934 0.99750291 0.9949994 0.99144489 0.99726345 0.99335177 0.99644635 0.99897933 0.99591688 0.96468768 0.99448481 0.98971224 0.95530556 0.99717888 0.98102706 0.95559772 0.99451141 0.96246283 0.99786955 0.99841626 0.99851285 0.98941541 0.99554486 0.98316345 0.98764894 0.99175865 0.9965931 0.98065871 0.87467561 0.97223245 0.98222502 0.83405473 0.98331027 0.97187709 0.97299999 0.95358461 0.98586509 0.91868884 0.97357098 0.99041619 0.98579895 0.94061239 0.9912999 0.87620418 0.98744021] 1/w strategy Weights: [9.65292034e+00 1.14984261e+01 6.37860798e+02 1.71640773e+02 2.07874363e+02 3.16231125e+02 1.40759971e+02 1.41737592e+02 1.73496110e+02 3.82317177e+02 1.55360808e+03 9.39092189e+02 9.26986355e+02 1.43122881e+03 3.02253865e+02 6.04198100e+02 1.66302887e+02 2.23087497e+02 2.96585535e+02 5.43039993e+02 3.85091194e+02 2.17202705e+02 5.79228263e+02 3.07616159e+02 3.14541481e+02 1.07756967e+03 3.33123573e+02 9.79202324e+02 1.29165359e+03 3.32032445e+02 2.83361805e+03 3.33247373e+03 3.01314165e+03 6.71048707e+03 1.47209973e+03 7.23385690e+03 5.52641986e+03 1.71592243e+03 8.57689188e+02 1.79967807e+03 5.45709757e+02 1.03672652e+03 3.72294698e+02 2.82870902e+03 2.05510121e+03 3.28802141e+03 1.63729272e+02 3.97224691e+02 8.37397449e+02 2.86083142e+02 6.94848751e+02 5.03366266e+02 1.16935611e+03 8.14228025e+02 1.20555831e+02 4.78402530e+02 2.52746721e+02 1.61449018e+02 1.91720775e+02 4.81232091e+02 1.10237839e+02 4.14689352e+01 3.23763716e+02 3.03701627e+02 2.89857278e+02 4.58764672e+03 1.75468373e+03 7.49929301e+02 9.40476913e+02 1.10884112e+03 3.10640456e+02 1.76636127e+04 6.08350801e+02 2.86406522e+02 4.81792541e+02 7.47246149e+02 5.77949302e+02 2.76661288e+03 3.15540735e+02 3.05954315e+04 5.88742315e+03 4.45022815e+02 9.25600003e+02 8.65369349e+02 4.55085184e+02 3.52275730e+02 2.18182645e+02 1.63109124e+02 1.80731800e+02 5.12231075e+02 8.00426523e+02 3.99450034e+02 2.33276756e+02 7.30341490e+02 3.00330389e+02 5.62297827e+02 1.95895948e+03 4.89318785e+02 5.61329299e+01 3.62133109e+02 1.93904029e+02 4.42425642e+01 7.08433228e+02 1.04910789e+02 4.45370393e+01 3.63890226e+02 5.27757115e+01 9.38259711e+02 1.26231580e+03 1.34433471e+03 1.88452263e+02 4.48417318e+02 1.18286924e+02 1.61427660e+02 2.42177012e+02 5.86540654e+02 1.02903169e+02 1.54418518e+01 7.15229535e+01 1.12015364e+02 1.15294989e+01 1.19331942e+02 7.06127883e+01 7.35705703e+01 4.25831970e+01 1.40991681e+02 2.40862658e+01 7.51709983e+01 2.08183540e+02 1.40332667e+02 3.31693940e+01 2.29380667e+02 1.56391184e+01 1.58736480e+02] '''<line_sep>IMG_EXT=['.jpg']<line_sep>LBL_EXT=['.png']<line_sep>SCALES=[1.0]<class_stmt>ToLabel<block_start><def_stmt>__call__ self label<block_start>label=np.array(label)<line_sep><return>torch.from_numpy(label).long()<block_end><block_end><def_stmt>load_image file<block_start><return>Image.open(file)<block_end><def_stmt>load_label file<block_start><return>Image.open(file)<block_end><def_stmt>is_image filename<block_start><return>any(filename.endswith(ext)<for>ext IMG_EXT)<block_end><def_stmt>is_label filename<block_start><return>any(filename.endswith(ext)<for>ext LBL_EXT)<block_end><def_stmt>resize_and_fit img new_h new_w img_type# check img_type <block_start><assert_stmt>(img_type<is>"RGB"<or>img_type<is>"L")<line_sep># get current size w,h=img.size<line_sep># generate new img out_img=Image.new(img_type (new_w new_h))<line_sep># now do size magic curr_asp_ratio=h/w<line_sep>new_asp_ratio=new_h/new_w<line_sep># do resizing according to aspect ratio <if_stmt>curr_asp_ratio<g>new_asp_ratio# fit h to h <block_start>new_tmp_h=new_h<line_sep>new_tmp_w=int(w<times>new_h/h)<block_end><else_stmt># fit w to w <block_start>new_tmp_w=new_w<line_sep>new_tmp_h=int(h<times>new_w/w)<block_end># resize the original image <if_stmt>img_type<is>"RGB"<block_start>tmp_img=img.resize((new_tmp_w new_tmp_h) Image.BILINEAR)<block_end><else_stmt><block_start>tmp_img=img.resize((new_tmp_w new_tmp_h) Image.NEAREST)<block_end># put in padded image out_img.paste(tmp_img (int((new_w-new_tmp_w)<floordiv>2) int((new_h-new_tmp_h)<floordiv>2)))<line_sep><return>out_img<block_end><class_stmt>MS_COCO(Dataset)<block_start><def_stmt>__init__ self root subset h w means stds crop_h=<none> crop_w=<none><block_start>self.images_root=os.path.join(root subset+"2017")<line_sep>self.labels_root=os.path.join(root "annotations/panoptic_"+subset+"2017_remap")<line_sep>self.subset=subset<assert_stmt>self.subset<eq>'train'<or>self.subset<eq>'val'<line_sep>self.w=w<line_sep>self.h=h<line_sep>self.means=means<line_sep>self.stds=stds<if_stmt>self.subset<eq>'train'<block_start>self.crop_h=crop_h<line_sep>self.crop_w=crop_w<line_sep># check that parameters make sense <assert_stmt>(self.crop_h<le>self.h)<assert_stmt>(self.crop_w<le>self.w)<line_sep>self.resize_crop_img=transforms.Resize((self.crop_h self.crop_w) Image.BILINEAR)<line_sep>self.resize_crop_lbl=transforms.Resize((self.crop_h self.crop_w) Image.NEAREST)<block_end>print("Images from: " self.images_root)<line_sep>print("Labels from: " self.labels_root)<line_sep>self.filenames=[os.path.join(dp f)<for>dp,dn,fn os.walk(os.path.expanduser(self.images_root))<for>f fn<if>is_image(f)]<line_sep>self.filenames.sort()<line_sep>self.filenamesGt=[os.path.join(dp f)<for>dp,dn,fn os.walk(os.path.expanduser(self.labels_root))<for>f fn<if>is_label(f)]<line_sep>self.filenamesGt.sort()<assert_stmt>len(self.filenames)<eq>len(self.filenamesGt)<line_sep># transformations for images self.jitter=transforms.ColorJitter(brightness=0.05 contrast=0.05 saturation=0.05 hue=0.05)<line_sep>self.h_flip=TF.hflip<line_sep>self.crop_param=transforms.RandomCrop.get_params<line_sep>self.crop=TF.crop<line_sep># transformations for tensors self.norm=transforms.Normalize(mean=self.means std=self.stds)<line_sep>self.tensorize_img=transforms.ToTensor()<line_sep>self.tensorize_lbl=ToLabel()<block_end><def_stmt>__getitem__ self index<block_start>filename=self.filenames[index]<line_sep>filenameGt=self.filenamesGt[index]<with_stmt>open(filename 'rb')<as>f<block_start>image=load_image(f).convert('RGB')<block_end><with_stmt>open(filenameGt 'rb')<as>f<block_start>label=load_label(f).convert('L')<block_end># resize (resizing is different if we are in train or valid mode) # generate resizer <if_stmt>self.subset<eq>'train'<block_start>new_h=self.crop_h<line_sep>new_w=self.crop_w<block_end><else_stmt><block_start>new_h=self.h<line_sep>new_w=self.w<block_end>image=resize_and_fit(image new_h new_w "RGB")<line_sep>label=resize_and_fit(label new_h new_w "L")<line_sep># augment data and tensorize <if_stmt>self.subset<eq>'train'# crop randomly sized patches <block_start>scale=SCALES[random.randrange(len(SCALES))]<line_sep>size=(int(self.crop_h<times>scale) int(self.crop_w<times>scale))<line_sep>i,j,h,w=self.crop_param(image output_size=size)<line_sep>image=self.resize_crop_img(self.crop(image i j h w))<line_sep>label=self.resize_crop_lbl(self.crop(label i j h w))<line_sep># flip <if_stmt>random.random()<g>0.5<block_start>image=self.h_flip(image)<line_sep>label=self.h_flip(label)<block_end># jitter <if_stmt>random.random()<g>0.5<block_start>image=self.jitter(image)<block_end># show (set workers = 0) # cv2.imshow("train_img", np.array(image)[:, :, ::-1]) # cv2.imshow("train_lbl", LUT[np.array(label)].astype(np.float32) / 21.0) # cv2.waitKey(0) <block_end># if self.subset == 'val': # show (set workers = 0) # cv2.imshow("valid_img", np.array(image)[:, :, ::-1]) # cv2.waitKey(0) # tensorize image=self.tensorize_img(image)<line_sep>label=self.tensorize_lbl(label)<line_sep># normalize image=self.norm(image)<line_sep><return>image label<block_end><def_stmt>__len__ self<block_start><return>len(self.filenames)<block_end><block_end><class_stmt>Parser()# standard conv, BN, relu <block_start><def_stmt>__init__ self img_prop img_means img_stds classes train location=<none> batch_size=<none> crop_prop=<none> workers=2<block_start>super(Parser self).__init__()<line_sep>self.img_prop=img_prop<line_sep>self.img_means=img_means<line_sep>self.img_stds=img_stds<line_sep>self.classes=classes<line_sep>self.train=train<if_stmt>self.train# if I am training, get the dataset <block_start>self.location=location<line_sep>self.batch_size=batch_size<line_sep>self.crop_prop=crop_prop<line_sep>self.workers=workers<line_sep># Data loading code self.train_dataset=MS_COCO(root=self.location subset='train' h=self.img_prop["height"] w=self.img_prop["width"] means=self.img_means stds=self.img_stds crop_h=self.crop_prop["height"] crop_w=self.crop_prop["width"])<line_sep>self.trainloader=torch.utils.data.DataLoader(self.train_dataset batch_size=self.batch_size shuffle=<true> num_workers=self.workers pin_memory=<true> drop_last=<true>)<assert_stmt>len(self.trainloader)<g>0<line_sep>self.trainiter=iter(self.trainloader)<line_sep># calculate validation batch from train batch and image sizes factor_val_over_train=float(self.img_prop["height"]<times>self.img_prop["width"])/float(self.crop_prop["height"]<times>self.crop_prop["width"])<line_sep>self.val_batch_size=max(1 int(self.batch_size/factor_val_over_train))<line_sep># if gpus are available make val_batch_size at least the number of gpus <if_stmt>torch.cuda.is_available()<and>torch.cuda.device_count()<g>1<block_start>self.val_batch_size=max(self.val_batch_size torch.cuda.device_count())<block_end>print("Inference batch size: " self.val_batch_size)<line_sep>self.valid_dataset=MS_COCO(root=self.location subset='val' h=self.img_prop["height"] w=self.img_prop["width"] means=self.img_means stds=self.img_stds)<line_sep>self.validloader=torch.utils.data.DataLoader(self.valid_dataset batch_size=self.val_batch_size shuffle=<false> num_workers=self.workers pin_memory=<true> drop_last=<true>)<assert_stmt>len(self.validloader)<g>0<line_sep>self.validiter=iter(self.validloader)<block_end><block_end><def_stmt>get_train_batch self<block_start>images,labels=self.trainiter.next()<line_sep><return>images labels<block_end><def_stmt>get_train_set self<block_start><return>self.trainloader<block_end><def_stmt>get_valid_batch self<block_start>images,labels=self.validiter.next()<line_sep><return>images labels<block_end><def_stmt>get_valid_set self<block_start><return>self.validloader<block_end><def_stmt>get_train_size self<block_start><return>len(self.trainloader)<block_end><def_stmt>get_valid_size self<block_start><return>len(self.validloader)<block_end><def_stmt>get_img_size self<block_start>h=self.img_prop["height"]<line_sep>w=self.img_prop["width"]<line_sep>d=self.img_prop["depth"]<line_sep><return>h w d<block_end><def_stmt>get_n_classes self<block_start><return>len(self.classes)<block_end><def_stmt>get_class_string self idx<block_start><return>self.classes[idx]<block_end><def_stmt>get_means_stds self<block_start><return>self.img_means self.img_stds<block_end><block_end>
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. <import_from_future_stmt> absolute_import<import_stmt>os<import_stmt>numpy<as>np<import_stmt>json<import_stmt>pytest<import_stmt>sagemaker<import_from_stmt>sagemaker.pytorch PyTorchModel<import_from_stmt>sagemaker.serializers IdentitySerializer<import_from_stmt>sagemaker.deserializers BytesDeserializer<import_from_stmt>...integration model_neuron_dir resnet_neuron_script resnet_neuron_input resnet_neuron_image_list<import_from_stmt>...integration.sagemaker.timeout timeout_and_delete_endpoint<import_from_stmt>.... invoke_pytorch_helper_function<line_sep>@pytest.mark.model("resnet")@pytest.mark.processor("neuron")@pytest.mark.neuron_test<def_stmt>test_neuron_hosting framework_version ecr_image instance_type sagemaker_regions<block_start>instance_type=instance_type<or>'ml.inf1.xlarge'<line_sep>model_dir=os.path.join(model_neuron_dir 'model-resnet.tar.gz')<line_sep>function_args={'framework_version':framework_version 'instance_type':instance_type 'model_dir':model_dir 'resnet_script':resnet_neuron_script 'resnet_neuron_input':resnet_neuron_input 'resnet_neuron_image_list':resnet_neuron_image_list }<line_sep>invoke_pytorch_helper_function(ecr_image sagemaker_regions _test_resnet_distributed function_args)<block_end><def_stmt>_test_resnet_distributed ecr_image sagemaker_session framework_version instance_type model_dir resnet_script resnet_neuron_input resnet_neuron_image_list accelerator_type=<none><block_start>endpoint_name=sagemaker.utils.unique_name_from_base("sagemaker-pytorch-serving")<line_sep>model_data=sagemaker_session.upload_data(path=model_dir key_prefix="sagemaker-pytorch-serving/models" )<line_sep>pytorch=PyTorchModel(model_data=model_data role='SageMakerRole' entry_point=resnet_script framework_version=framework_version image_uri=ecr_image sagemaker_session=sagemaker_session model_server_workers=4 env={"AWS_NEURON_VISIBLE_DEVICES":"ALL" "NEURONCORE_GROUP_SIZES":"1" "NEURON_RT_VISIBLE_CORES":"0" "NEURON_RT_LOG_LEVEL":"5" "NEURON_RTD_ADDRESS":"run"})<with_stmt>timeout_and_delete_endpoint(endpoint_name sagemaker_session minutes=30)<block_start>predictor=pytorch.deploy(initial_instance_count=1 instance_type=instance_type endpoint_name=endpoint_name serializer=IdentitySerializer() deserializer=BytesDeserializer() )<with_stmt>open(resnet_neuron_input "rb")<as>f<block_start>payload=f.read()<block_end>output=predictor.predict(data=payload)<line_sep>print(output)<line_sep>result=json.loads(output.decode())<line_sep>print(result)<line_sep># Load names for ImageNet classes object_categories={}<with_stmt>open(resnet_neuron_image_list "r")<as>f<block_start><for_stmt>line f<block_start>key,val=line.strip().split(":")<line_sep>object_categories[key]=val<block_end><block_end><assert_stmt>("cat"<in>object_categories[str(np.argmax(result))])<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>isegm.utils misc<class_stmt>NormalizedFocalLossSigmoid(nn.Module)<block_start><def_stmt>__init__ self axis=-1 alpha=0.25 gamma=2 max_mult=-1 eps=1e-12 from_sigmoid=<false> detach_delimeter=<true> batch_axis=0 weight=<none> size_average=<true> ignore_label=-1<block_start>super(NormalizedFocalLossSigmoid self).__init__()<line_sep>self._axis=axis<line_sep>self._alpha=alpha<line_sep>self._gamma=gamma<line_sep>self._ignore_label=ignore_label<line_sep>self._weight=weight<if>weight<is><not><none><else>1.0<line_sep>self._batch_axis=batch_axis<line_sep>self._from_logits=from_sigmoid<line_sep>self._eps=eps<line_sep>self._size_average=size_average<line_sep>self._detach_delimeter=detach_delimeter<line_sep>self._max_mult=max_mult<line_sep>self._k_sum=0<line_sep>self._m_max=0<block_end><def_stmt>forward self pred label<block_start>one_hot=label<g>0.5<line_sep>sample_weight=label<ne>self._ignore_label<if_stmt><not>self._from_logits<block_start>pred=torch.sigmoid(pred)<block_end>alpha=torch.where(one_hot self._alpha<times>sample_weight (1-self._alpha)<times>sample_weight)<line_sep>pt=torch.where(sample_weight 1.0-torch.abs(label-pred) torch.ones_like(pred))<line_sep>beta=(1-pt)<power>self._gamma<line_sep>sw_sum=torch.sum(sample_weight dim=(-2 -1) keepdim=<true>)<line_sep>beta_sum=torch.sum(beta dim=(-2 -1) keepdim=<true>)<line_sep>mult=sw_sum/(beta_sum+self._eps)<if_stmt>self._detach_delimeter<block_start>mult=mult.detach()<block_end>beta=beta<times>mult<if_stmt>self._max_mult<g>0<block_start>beta=torch.clamp_max(beta self._max_mult)<block_end><with_stmt>torch.no_grad()<block_start>ignore_area=torch.sum(label<eq>self._ignore_label dim=tuple(range(1 label.dim()))).cpu().numpy()<line_sep>sample_mult=torch.mean(mult dim=tuple(range(1 mult.dim()))).cpu().numpy()<if_stmt>np.any(ignore_area<eq>0)<block_start>self._k_sum=0.9<times>self._k_sum+0.1<times>sample_mult[ignore_area<eq>0].mean()<line_sep>beta_pmax,_=torch.flatten(beta start_dim=1).max(dim=1)<line_sep>beta_pmax=beta_pmax.mean().item()<line_sep>self._m_max=0.8<times>self._m_max+0.2<times>beta_pmax<block_end><block_end>loss=-alpha<times>beta<times>torch.log(torch.min(pt+self._eps torch.ones(1 dtype=torch.float).to(pt.device)))<line_sep>loss=self._weight<times>(loss<times>sample_weight)<if_stmt>self._size_average<block_start>bsum=torch.sum(sample_weight dim=misc.get_dims_with_exclusion(sample_weight.dim() self._batch_axis))<line_sep>loss=torch.sum(loss dim=misc.get_dims_with_exclusion(loss.dim() self._batch_axis))/(bsum+self._eps)<block_end><else_stmt><block_start>loss=torch.sum(loss dim=misc.get_dims_with_exclusion(loss.dim() self._batch_axis))<block_end><return>loss<block_end><def_stmt>log_states self sw name global_step<block_start>sw.add_scalar(tag=name+'_k' value=self._k_sum global_step=global_step)<line_sep>sw.add_scalar(tag=name+'_m' value=self._m_max global_step=global_step)<block_end><block_end><class_stmt>FocalLoss(nn.Module)<block_start><def_stmt>__init__ self axis=-1 alpha=0.25 gamma=2 from_logits=<false> batch_axis=0 weight=<none> num_class=<none> eps=1e-9 size_average=<true> scale=1.0 ignore_label=-1<block_start>super(FocalLoss self).__init__()<line_sep>self._axis=axis<line_sep>self._alpha=alpha<line_sep>self._gamma=gamma<line_sep>self._ignore_label=ignore_label<line_sep>self._weight=weight<if>weight<is><not><none><else>1.0<line_sep>self._batch_axis=batch_axis<line_sep>self._scale=scale<line_sep>self._num_class=num_class<line_sep>self._from_logits=from_logits<line_sep>self._eps=eps<line_sep>self._size_average=size_average<block_end><def_stmt>forward self pred label sample_weight=<none><block_start>one_hot=label<g>0.5<line_sep>sample_weight=label<ne>self._ignore_label<if_stmt><not>self._from_logits<block_start>pred=torch.sigmoid(pred)<block_end>alpha=torch.where(one_hot self._alpha<times>sample_weight (1-self._alpha)<times>sample_weight)<line_sep>pt=torch.where(sample_weight 1.0-torch.abs(label-pred) torch.ones_like(pred))<line_sep>beta=(1-pt)<power>self._gamma<line_sep>loss=-alpha<times>beta<times>torch.log(torch.min(pt+self._eps torch.ones(1 dtype=torch.float).to(pt.device)))<line_sep>loss=self._weight<times>(loss<times>sample_weight)<if_stmt>self._size_average<block_start>tsum=torch.sum(sample_weight dim=misc.get_dims_with_exclusion(label.dim() self._batch_axis))<line_sep>loss=torch.sum(loss dim=misc.get_dims_with_exclusion(loss.dim() self._batch_axis))/(tsum+self._eps)<block_end><else_stmt><block_start>loss=torch.sum(loss dim=misc.get_dims_with_exclusion(loss.dim() self._batch_axis))<block_end><return>self._scale<times>loss<block_end><block_end><class_stmt>SoftIoU(nn.Module)<block_start><def_stmt>__init__ self from_sigmoid=<false> ignore_label=-1<block_start>super().__init__()<line_sep>self._from_sigmoid=from_sigmoid<line_sep>self._ignore_label=ignore_label<block_end><def_stmt>forward self pred label<block_start>label=label.view(pred.size())<line_sep>sample_weight=label<ne>self._ignore_label<if_stmt><not>self._from_sigmoid<block_start>pred=torch.sigmoid(pred)<block_end>loss=1.0-torch.sum(pred<times>label<times>sample_weight dim=(1 2 3))/(torch.sum(torch.max(pred label)<times>sample_weight dim=(1 2 3))+1e-8)<line_sep><return>loss<block_end><block_end><class_stmt>SigmoidBinaryCrossEntropyLoss(nn.Module)<block_start><def_stmt>__init__ self from_sigmoid=<false> weight=<none> batch_axis=0 ignore_label=-1<block_start>super(SigmoidBinaryCrossEntropyLoss self).__init__()<line_sep>self._from_sigmoid=from_sigmoid<line_sep>self._ignore_label=ignore_label<line_sep>self._weight=weight<if>weight<is><not><none><else>1.0<line_sep>self._batch_axis=batch_axis<block_end><def_stmt>forward self pred label<block_start>label=label.view(pred.size())<line_sep>sample_weight=label<ne>self._ignore_label<line_sep>label=torch.where(sample_weight label torch.zeros_like(label))<if_stmt><not>self._from_sigmoid<block_start>loss=torch.relu(pred)-pred<times>label+F.softplus(-torch.abs(pred))<block_end><else_stmt><block_start>eps=1e-12<line_sep>loss=-(torch.log(pred+eps)<times>label+torch.log(1.-pred+eps)<times>(1.-label))<block_end>loss=self._weight<times>(loss<times>sample_weight)<line_sep><return>torch.mean(loss dim=misc.get_dims_with_exclusion(loss.dim() self._batch_axis))<block_end><block_end>
"""Internal implementation of `~certbot_dns_gehirn.dns_gehirn` plugin."""<line_sep>
# Lint as: python3 # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for rate_curve.py."""<import_from_stmt>absl.testing parameterized<import_stmt>tensorflow.compat.v2<as>tf<import_stmt>tf_quant_finance<as>tff<import_from_stmt>tensorflow.python.framework test_util# pylint: disable=g-direct-tensorflow-import volatility_surface=tff.experimental.pricing_platform.framework.market_data.volatility_surface<line_sep>dateslib=tff.datetime<line_sep>core=tff.experimental.pricing_platform.framework.core<line_sep>InterpolationMethod=core.interpolation_method.InterpolationMethod<line_sep># This function can't be moved to SetUp since that would break graph mode # execution <def_stmt>build_surface dim default_interp=<true><block_start>dtype=tf.float64<line_sep>year=dim<times>[[2021 2022 2023 2025 2050]]<line_sep>month=dim<times>[[2 2 2 2 2]]<line_sep>day=dim<times>[[8 8 8 8 8]]<line_sep>expiries=tff.datetime.dates_from_year_month_day(year month day)<line_sep>valuation_date=[(2020 6 24)]<line_sep>strikes=dim<times>[[[1500 1550 1510] [1500 1550 1510] [1500 1550 1510] [1500 1550 1510] [1500 1550 1510]]]<line_sep>volatilities=dim<times>[[[0.1 0.12 0.13] [0.15 0.2 0.15] [0.1 0.2 0.1] [0.1 0.2 0.1] [0.1 0.1 0.3]]]<line_sep>interpolator=<none><if_stmt><not>default_interp<block_start>expiry_times=tf.cast(tff.datetime.convert_to_date_tensor(valuation_date).days_until(expiries) dtype=dtype)/365.0<line_sep>interpolator_obj=tff.math.interpolation.interpolation_2d.Interpolation2D(expiry_times tf.convert_to_tensor(strikes dtype=dtype) volatilities)<line_sep>interpolator=interpolator_obj.interpolate<block_end><return>volatility_surface.VolatilitySurface(valuation_date expiries strikes volatilities interpolator=interpolator dtype=dtype)<block_end>@test_util.run_all_in_graph_and_eager_modes<class_stmt>VolatilitySurfaceTest(tf.test.TestCase parameterized.TestCase)<block_start><def_stmt>test_volatility_1d self<block_start>vol_surface=build_surface(1)<line_sep>expiry=tff.datetime.dates_from_tuples([(2020 6 16) (2021 6 1) (2025 1 1)])<line_sep>vols=vol_surface.volatility(strike=[[1525 1400 1570]] expiry_dates=expiry.expand_dims(axis=0))<line_sep>self.assertAllClose(self.evaluate(vols) [[0.14046875 0.11547945 0.1]] atol=1e-6)<block_end><def_stmt>test_volatility_2d self<block_start>vol_surface=build_surface(2)<line_sep>expiry=tff.datetime.dates_from_ordinals([[737592 737942 739252] [737592 737942 739252]])<line_sep>vols=vol_surface.volatility(strike=[[1525 1400 1570] [1525 1505 1570]] expiry_dates=expiry)<line_sep>self.assertAllClose(self.evaluate(vols) [[0.14046875 0.11547945 0.1] [0.14046875 0.12300392 0.1]] atol=1e-6)<block_end><def_stmt>test_volatility_2d_interpolation self<block_start>"""Test using externally specified interpolator."""<line_sep>vol_surface=build_surface(2 <false>)<line_sep>expiry=tff.datetime.dates_from_ordinals([[737592 737942 739252] [737592 737942 739252]])<line_sep>vols=vol_surface.volatility(strike=[[1525 1400 1570] [1525 1505 1570]] expiry_dates=expiry)<line_sep>self.assertAllClose(self.evaluate(vols) [[0.14046875 0.11547945 0.1] [0.14046875 0.12300392 0.1]] atol=1e-6)<block_end><def_stmt>test_volatility_2d_floats self<block_start>vol_surface=build_surface(2)<line_sep>expiry=tff.datetime.dates_from_ordinals([[737592 737942 739252] [737592 737942 739252]])<line_sep>valuation_date=tff.datetime.convert_to_date_tensor([(2020 6 24)])<line_sep>expiries=tf.cast(valuation_date.days_until(expiry) dtype=vol_surface._dtype)/365.0<line_sep>vols=vol_surface.volatility(strike=[[1525 1400 1570] [1525 1505 1570]] expiry_times=expiries)<line_sep>self.assertAllClose(self.evaluate(vols) [[0.14046875 0.11547945 0.1] [0.14046875 0.12300392 0.1]] atol=1e-6)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
<import_stmt>pytest<import_stmt>sys<import_stmt>time<import_stmt>json<import_stmt>requests<import_from_stmt>requests.adapters HTTPAdapter<import_from_stmt>requests RequestException ReadTimeout<import_from_stmt>test_adapter ts_call_single<import_from_stmt>test_adapter resource_release<import_from_stmt>const mqtt_device_info<import_from_stmt>const mqtt_testid<import_from_stmt>com.huawei.iotplatform.constant.Constant Constant<import_from_stmt>com.huawei.iotplatform.utils.DictUtil DictUtil<import_from_stmt>com.huawei.iotplatform.client.invokeapi.Authentication Authentication<import_from_stmt>com.huawei.iotplatform.client.invokeapi.DeviceManagement DeviceManagement<import_from_stmt>com.huawei.iotplatform.client.invokeapiTest.DeviceManagementTest DeviceManagementTest<import_from_stmt>com.huawei.iotplatform.client.dto.AuthOutDTO AuthOutDTO<import_from_stmt>com.huawei.iotplatform.client.dto.RegDirectDeviceOutDTO RegDirectDeviceOutDTO<def_stmt>test_mqtt_al_init <block_start>fname=sys._getframe().f_code.co_name<line_sep>result=ts_call_single(mqtt_testid.TEST_MQTT_AL_INIT fname "192.168.1.103" "8883" "test" "test123" "YES" "CAVALID")<line_sep>print(result)<assert_stmt>(result)<assert_stmt>(result.test_id<eq>mqtt_testid.TEST_MQTT_AL_INIT)<assert_stmt>(result.ret_code<eq>0)<block_end><def_stmt>test_mqtt_al_install <block_start>fname=sys._getframe().f_code.co_name<line_sep>result=ts_call_single(mqtt_testid.TEST_MQTT_AL_INSTALL fname)<assert_stmt>(result)<assert_stmt>(result.test_id<eq>mqtt_testid.TEST_MQTT_AL_INSTALL)<assert_stmt>(result.ret_code<eq>0)<block_end><def_stmt>test_mqtt_al_uninstall <block_start>fname=sys._getframe().f_code.co_name<line_sep>result=ts_call_single(mqtt_testid.TEST_MQTT_AL_UNINSTALL fname)<assert_stmt>(result)<assert_stmt>(result.test_id<eq>mqtt_testid.TEST_MQTT_AL_UNINSTALL)<assert_stmt>(result.ret_code<eq>0)<block_end><def_stmt>test_mqtt_al_connect <block_start>fname=sys._getframe().f_code.co_name<line_sep>result=ts_call_single(mqtt_testid.TEST_MQTT_AL_CONNECT fname)<assert_stmt>(result)<assert_stmt>(result.test_id<eq>mqtt_testid.TEST_MQTT_AL_CONNECT)<assert_stmt>(result.ret_code<eq>0)<block_end><def_stmt>test_mqtt_al_disconnect <block_start>fname=sys._getframe().f_code.co_name<line_sep>result=ts_call_single(mqtt_testid.TEST_MQTT_AL_DISCONNECT fname)<assert_stmt>(result)<assert_stmt>(result.test_id<eq>mqtt_testid.TEST_MQTT_AL_DISCONNECT)<assert_stmt>(result.ret_code<eq>0)<block_end><def_stmt>test_mqtt_al_sub <block_start>fname=sys._getframe().f_code.co_name<line_sep>result=ts_call_single(mqtt_testid.TEST_MQTT_AL_SUBSCRIBLE fname "test")<assert_stmt>(result)<assert_stmt>(result.test_id<eq>mqtt_testid.TEST_MQTT_AL_SUBSCRIBLE)<assert_stmt>(result.ret_code<eq>0)<block_end><def_stmt>test_mqtt_al_unsub <block_start>fname=sys._getframe().f_code.co_name<line_sep>result=ts_call_single(mqtt_testid.TEST_MQTT_AL_UNSUBSCRIBLE fname "test")<assert_stmt>(result)<assert_stmt>(result.test_id<eq>mqtt_testid.TEST_MQTT_AL_UNSUBSCRIBLE)<assert_stmt>(result.ret_code<eq>0)<block_end><def_stmt>test_mqtt_al_pub <block_start>fname=sys._getframe().f_code.co_name<line_sep>result=ts_call_single(mqtt_testid.TEST_MQTT_AL_PBULISH fname "test22" "hello world")<assert_stmt>(result)<assert_stmt>(result.test_id<eq>mqtt_testid.TEST_MQTT_AL_PBULISH)<assert_stmt>(result.ret_code<eq>0)<block_end><def_stmt>test_mqtt_al_checkstatus <block_start>fname=sys._getframe().f_code.co_name<line_sep>result=ts_call_single(mqtt_testid.TEST_MQTT_AL_CHECKSTATUS fname)<assert_stmt>(result)<assert_stmt>(result.test_id<eq>mqtt_testid.TEST_MQTT_AL_CHECKSTATUS)<assert_stmt>(result.ret_code<eq>0)<block_end><if_stmt>__name__<eq>'__main__'<block_start>print("hello world")<line_sep>test_mqtt_al_init()<line_sep>test_mqtt_al_install()<line_sep>test_mqtt_al_connect()<line_sep>test_mqtt_al_sub()<line_sep>test_mqtt_al_unsub()<line_sep>test_mqtt_al_pub()<line_sep>test_mqtt_al_checkstatus()<line_sep>test_mqtt_al_disconnect()<line_sep>test_mqtt_al_uninstall()<block_end>
<import_from_stmt>pymedphys._imports numpy<as>np<def_stmt>running_mean x N<block_start>out=np.zeros_like(x dtype=np.float64)<line_sep>dim_len=x.shape[0]<for_stmt>i range(dim_len)<block_start><if_stmt>N%2<eq>0<block_start>a,b=i-(N-1)<floordiv>2 i+(N-1)<floordiv>2+2<block_end><else_stmt><block_start>a,b=i-(N-1)<floordiv>2 i+(N-1)<floordiv>2+1<block_end># cap indices to min and max indices a=max(0 a)<line_sep>b=min(dim_len b)<line_sep>out[i]=np.mean(x[a:b])<block_end><return>out<block_end>
print('first')<line_sep>print('second')<if_stmt><true><block_start>x=1<line_sep>y=2<block_end>print('hi')<line_sep>
<import_stmt>unittest<import_from_stmt>wifipumpkin3.core.common.platforms Linux<import_stmt>wifipumpkin3.core.utility.constants<as>C<import_from_stmt>wifipumpkin3.core.utility.collection SettingsINI<import_stmt>tempfile<import_stmt>requests<import_stmt>os<import_from_stmt>os path<import_from_stmt>zipfile ZipFile<class_stmt>TestDownloadCaptiveFlaskTemplates(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>url="https://github.com/mh4x0f/extra-captiveflask/archive/master.zip"<line_sep>save_path=tempfile.gettempdir()+"/master.zip"<line_sep>chunk_size=128<line_sep>r=requests.get(url stream=<true>)<with_stmt>open(save_path "wb")<as>fd<block_start><for_stmt>chunk r.iter_content(chunk_size=chunk_size)<block_start>fd.write(chunk)<block_end><block_end>self.assertTrue(path.isfile(save_path))<block_end><def_stmt>test_unzip_file self<block_start>path_to_zip_file=tempfile.gettempdir()+"/master.zip"<with_stmt>ZipFile(path_to_zip_file "r")<as>zip_ref<block_start>zip_ref.extractall(tempfile.gettempdir())<block_end>extracted_filepath=tempfile.gettempdir()+"/extra-captiveflask-master"<line_sep>self.assertTrue(path.isdir(extracted_filepath))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# -*- coding: utf-8 -*- # @Time : 2019/12/7 14:46 # @Author : zhoujun <import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>os<import_stmt>random<import_from_stmt>tqdm tqdm<line_sep># calculate means and std train_txt_path='./train_val_list.txt'<line_sep>CNum=10000# 挑选多少图片进行计算 img_h,img_w=640 640<line_sep>imgs=np.zeros([img_w img_h 3 1])<line_sep>means,stdevs=[] []<with_stmt>open(train_txt_path 'r')<as>f<block_start>lines=f.readlines()<line_sep>random.shuffle(lines)# shuffle , 随机挑选图片 <for_stmt>i tqdm(range(CNum))<block_start>img_path=lines[i].split('\t')[0]<line_sep>img=cv2.imread(img_path)<line_sep>img=cv2.resize(img (img_h img_w))<line_sep>img=img[: : : np.newaxis]<line_sep>imgs=np.concatenate((imgs img) axis=3)<block_end><block_end># print(i) imgs=imgs.astype(np.float32)/255.<for_stmt>i tqdm(range(3))<block_start>pixels=imgs[: : i :].ravel()# 拉成一行 means.append(np.mean(pixels))<line_sep>stdevs.append(np.std(pixels))<block_end># cv2 读取的图像格式为BGR,PIL/Skimage读取到的都是RGB不用转 means.reverse()# BGR --> RGB stdevs.reverse()<line_sep>print("normMean = {}".format(means))<line_sep>print("normStd = {}".format(stdevs))<line_sep>print('transforms.Normalize(normMean = {}, normStd = {})'.format(means stdevs))<line_sep>
# Generated by Django 1.11.7 on 2017-12-12 19:08 <import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('scheduling' '0008_timedschedule_event_type') ]<line_sep>operations=[migrations.CreateModel(name='RandomTimedEvent' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('order' models.IntegerField()) ('day' models.IntegerField()) ('time' models.TimeField()) ('window_length' models.PositiveIntegerField()) ('custom_content' models.ForeignKey(null=<true> on_delete=django.db.models.deletion.CASCADE to='scheduling.CustomContent')) ('email_content' models.ForeignKey(null=<true> on_delete=django.db.models.deletion.CASCADE to='scheduling.EmailContent')) ('ivr_survey_content' models.ForeignKey(null=<true> on_delete=django.db.models.deletion.CASCADE to='scheduling.IVRSurveyContent')) ('schedule' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='scheduling.TimedSchedule')) ('sms_content' models.ForeignKey(null=<true> on_delete=django.db.models.deletion.CASCADE to='scheduling.SMSContent')) ('sms_survey_content' models.ForeignKey(null=<true> on_delete=django.db.models.deletion.CASCADE to='scheduling.SMSSurveyContent')) ] options={'abstract':<false> } ) ]<block_end>
# import kratos <import_stmt>KratosMultiphysics<as>Kratos<import_stmt>KratosMultiphysics.RANSApplication<as>KratosRANS<line_sep># import formulation interface <import_from_stmt>KratosMultiphysics.RANSApplication.formulations.rans_formulation RansFormulation<line_sep># import formulations <import_from_stmt>KratosMultiphysics.RANSApplication.formulations.incompressible_potential_flow IncompressiblePotentialFlowRansFormulation<import_from_stmt>KratosMultiphysics.RANSApplication.formulations.turbulence_models.k_omega_sst_rans_formulation KOmegaSSTRansFormulation<import_from_stmt>KratosMultiphysics.RANSApplication.formulations.fractional_step.fractional_step_velocity_pressure_rans_formulation FractionalStepVelocityPressureRansFormulation<class_stmt>FractionalStepKOmegaSSTRansFormulation(RansFormulation)<block_start><def_stmt>__init__ self model_part settings<block_start>super().__init__(model_part settings)<line_sep>default_settings=Kratos.Parameters(r''' { "formulation_name": "fractional_step_k_epsilon", "incompressible_potential_flow_initialization_settings": {}, "fractional_step_flow_solver_settings": {}, "k_omega_sst_solver_settings": {}, "max_iterations": 1 }''')<line_sep>settings.ValidateAndAssignDefaults(default_settings)<if_stmt>(<not>settings["incompressible_potential_flow_initialization_settings"].IsEquivalentTo(Kratos.Parameters("{}")))<block_start>self.incompressible_potential_flow_formulation=IncompressiblePotentialFlowRansFormulation(model_part settings["incompressible_potential_flow_initialization_settings"])<line_sep>self.AddRansFormulation(self.incompressible_potential_flow_formulation)<block_end>self.fractional_step_formulation=FractionalStepVelocityPressureRansFormulation(model_part settings["fractional_step_flow_solver_settings"])<line_sep>self.AddRansFormulation(self.fractional_step_formulation)<line_sep>self.k_omega_sst_formulation=KOmegaSSTRansFormulation(model_part settings["k_omega_sst_solver_settings"])<line_sep>self.AddRansFormulation(self.k_omega_sst_formulation)<line_sep>self.SetMaxCouplingIterations(settings["max_iterations"].GetInt())<block_end><def_stmt>SetConstants self settings<block_start>self.k_omega_sst_formulation.SetConstants(settings)<block_end><def_stmt>Initialize self<block_start>super().Initialize()<line_sep>nut_nodal_update_process=KratosRANS.RansNutNodalUpdateProcess(self.GetBaseModelPart().GetModel() self.GetBaseModelPart().Name self.k_omega_sst_formulation.echo_level)<line_sep>self.k_omega_sst_formulation.AddProcess(nut_nodal_update_process)<block_end><block_end>
<import_from_stmt>wouso.core.tests WousoTest<import_from_stmt>wouso.interface.top.models TopUser<class_stmt>TopTest(WousoTest)<block_start><def_stmt>test_challenges self<block_start>player=self._get_player()<line_sep>top_player=player.get_extension(TopUser)<line_sep>self.assertEqual(top_player.won_challenges 0)<block_end><block_end>
# Copyright 2019 <NAME>. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ========================================================================= """Tests the functions for combining signatures."""<import_stmt>gc<import_stmt>iisignature<import_stmt>pytest<import_stmt>random<import_stmt>torch<import_from_stmt>torch autograd<import_stmt>weakref<import_from_stmt>helpers helpers<as>h<import_from_stmt>helpers validation<as>v<import_from_stmt>helpers reimplementation<as>r<line_sep>tests=['signature_combine' 'multi_signature_combine']<line_sep>depends=[]<line_sep>signatory=v.validate_tests(tests depends)<line_sep># We have to use the iisignature implementation here, rather than our own, as else we end up with a dependency cycle # in the tests, between signatory.signature and signatory.signature_combine. <class_stmt>_IisignatureSignatureFunction(autograd.Function)<block_start>@staticmethod<def_stmt>forward ctx path depth<block_start>ctx.path=path.detach().cpu()<line_sep>ctx.depth=depth<line_sep>ctx.device=path.device<line_sep>ctx.dtype=path.dtype<line_sep><return>torch.tensor(iisignature.sig(ctx.path ctx.depth) device=ctx.device dtype=ctx.dtype)<block_end>@staticmethod<def_stmt>backward ctx grad<block_start><return>torch.tensor(iisignature.sigbackprop(grad.cpu() ctx.path ctx.depth) device=ctx.device dtype=ctx.dtype) <none><block_end><block_end><def_stmt>iisignature_signature path depth stream=<false> basepoint=<false> inverse=<false> scalar_term=<false><block_start>"""Duplicates signatory.signature's functionality using iisignature, for testing purposes."""<def_stmt>fn path depth<block_start>signature=_IisignatureSignatureFunction.apply(path depth)<if_stmt>scalar_term<block_start>out=torch.ones(signature.size(0) 1+signature.size(1) dtype=signature.dtype device=signature.device)<line_sep>out[: 1:]=signature<line_sep>signature=out<block_end><return>signature<block_end><return>r.iisignature_signature_or_logsignature(fn path depth stream basepoint inverse)<block_end><def_stmt>test_forward <block_start>"""Tests that the forward calculation for combing signatures produces the correct values."""<for_stmt>signature_combine,amount ((<true> 2) (<false> 1) (<false> 2) (<false> 3) (<false> 10))<block_start><for_stmt>signature_grad (<false> <true>)<block_start><for_stmt>device h.get_devices()<block_start><for_stmt>batch_size (1 2 5)<block_start>input_stream=2<for_stmt>input_channels (1 2 6)<block_start><for_stmt>depth (1 2 4 6)<block_start><for_stmt>inverse (<false> <true>)<block_start><for_stmt>scalar_term (<false> <true>)<block_start>_test_forward(signature_combine signature_grad amount device batch_size input_stream input_channels depth inverse scalar_term)<block_end><block_end><block_end><block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>_test_forward signature_combine signature_grad amount device batch_size input_stream input_channels depth inverse scalar_term<block_start>paths=[]<for_stmt>_ range(amount)<block_start>paths.append(torch.rand(batch_size input_stream input_channels device=device dtype=torch.double))<block_end>signatures=[]<line_sep>basepoint=<false><for_stmt>path paths<block_start>signature=iisignature_signature(path depth basepoint=basepoint inverse=inverse scalar_term=scalar_term)<if_stmt>signature_grad<block_start>signature.requires_grad_()<block_end>signatures.append(signature)<line_sep>basepoint=path[: -1]<block_end><if_stmt>signature_combine<block_start>combined_signatures=signatory.signature_combine(signatures[0] signatures[1] input_channels depth inverse=inverse scalar_term=scalar_term)<block_end><else_stmt><block_start>combined_signatures=signatory.multi_signature_combine(signatures input_channels depth inverse=inverse scalar_term=scalar_term)<block_end>combined_paths=torch.cat(paths dim=1)<line_sep>true_combined_signatures=iisignature_signature(combined_paths depth inverse=inverse scalar_term=scalar_term)<line_sep>h.diff(combined_signatures true_combined_signatures)<if_stmt>signature_grad<block_start>ctx=combined_signatures.grad_fn<assert_stmt>type(ctx).__name__<eq>'_SignatureCombineFunctionBackward'<line_sep>ref=weakref.ref(ctx)<del_stmt>ctx<del_stmt>combined_signatures<line_sep>gc.collect()<assert_stmt>ref()<is><none><block_end><else_stmt><block_start><assert_stmt>combined_signatures.grad_fn<is><none><block_end><block_end><def_stmt>test_backward <block_start>"""Tests that the backwards calculation for combining signatures produces the correct values."""<for_stmt>signature_combine,amount ((<true> 2) (<false> 1) (<false> 2) (<false> 3) (<false> 10))<block_start><for_stmt>device h.get_devices()<block_start><for_stmt>batch_size,input_stream,input_channels h.random_sizes()<block_start><for_stmt>depth (1 2 4 6)<block_start><for_stmt>scalar_term (<false> <true>)<block_start>inverse=random.choice([<false> <true>])<line_sep>_test_backward(signature_combine amount device batch_size input_stream input_channels depth inverse scalar_term)<block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>_test_backward signature_combine amount device batch_size input_stream input_channels depth inverse scalar_term<block_start>paths=[]<for_stmt>_ range(amount)<block_start>paths.append(torch.rand(batch_size input_stream input_channels device=device dtype=torch.double requires_grad=<true>))<block_end>signatures=[]<line_sep>basepoint=<false><for_stmt>path paths<block_start>signature=iisignature_signature(path depth basepoint=basepoint inverse=inverse scalar_term=scalar_term)<line_sep>signatures.append(signature)<line_sep>basepoint=path[: -1]<block_end># This is the test we'd like to run here, but it takes too long. # Furthermore we'd also prefer to only go backwards through the signature combine, not through the signature, but # we can't really do that with our faster alternative. # # if signature_combine: # def check_fn(*signatures): # return signatory.signature_combine(signatures[0], signatures[1], input_channels, depth, inverse=inverse) # else: # def check_fn(*signatures): # return signatory.multi_signature_combine(signatures, input_channels, depth, inverse=inverse) # try: # autograd.gradcheck(check_fn, tuple(signatures), atol=2e-05, rtol=0.002) # except RuntimeError: # pytest.fail() <if_stmt>signature_combine<block_start>combined_signatures=signatory.signature_combine(signatures[0] signatures[1] input_channels depth inverse=inverse scalar_term=scalar_term)<block_end><else_stmt><block_start>combined_signatures=signatory.multi_signature_combine(signatures input_channels depth inverse=inverse scalar_term=scalar_term)<block_end>grad=torch.rand_like(combined_signatures)<line_sep>combined_signatures.backward(grad)<line_sep>path_grads=[path.grad.clone()<for>path paths]<for_stmt>path paths<block_start>path.grad.zero_()<block_end>true_signature=iisignature_signature(torch.cat(paths dim=1) depth inverse=inverse scalar_term=scalar_term)<line_sep>true_signature.backward(grad)<for_stmt>path_grad,path zip(path_grads paths)<block_start>h.diff(path_grad path.grad)<block_end><block_end><def_stmt>test_no_adjustments <block_start>"""Tests that the calculations for combining signatures don't modify memory they're not supposed to."""<for_stmt>signature_combine,amount ((<true> 2) (<false> 1) (<false> 2) (<false> 3) (<false> 10))<block_start><for_stmt>signature_grad (<false> <true>)<block_start><for_stmt>device h.get_devices()<block_start><for_stmt>batch_size,input_stream,input_channels h.random_sizes()<block_start><for_stmt>depth (1 2 5)<block_start><for_stmt>inverse (<false> <true>)<block_start><for_stmt>scalar_term (<false> <true>)<block_start>_test_no_adjustments(signature_combine amount device batch_size input_stream input_channels depth inverse signature_grad scalar_term)<block_end><block_end><block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>_test_no_adjustments signature_combine amount device batch_size input_stream input_channels depth inverse signature_grad scalar_term<block_start>paths=[]<for_stmt>_ range(amount)<block_start>paths.append(torch.rand(batch_size input_stream input_channels device=device dtype=torch.double))<block_end>signatures=[]<line_sep>signatures_clone=[]<line_sep>basepoint=<false><for_stmt>path paths<block_start>signature=iisignature_signature(path depth basepoint=basepoint inverse=inverse scalar_term=scalar_term)<line_sep>signatures_clone.append(signature.clone())<if_stmt>signature_grad<block_start>signature.requires_grad_()<block_end>signatures.append(signature)<line_sep>basepoint=path[: -1]<block_end><if_stmt>signature_combine<block_start>combined_signatures=signatory.signature_combine(signatures[0] signatures[1] input_channels depth inverse=inverse scalar_term=scalar_term)<block_end><else_stmt><block_start>combined_signatures=signatory.multi_signature_combine(signatures input_channels depth inverse=inverse scalar_term=scalar_term)<block_end><if_stmt>signature_grad<block_start>grad=torch.rand_like(combined_signatures)<line_sep>grad_clone=grad.clone()<line_sep>combined_signatures_clone=combined_signatures.clone()<line_sep>combined_signatures.backward(grad)<block_end><for_stmt>signature,signature_clone zip(signatures signatures_clone)<block_start>h.diff(signature signature_clone)<block_end><if_stmt>signature_grad<block_start>h.diff(grad grad_clone)<line_sep>h.diff(combined_signatures combined_signatures_clone)<block_end><block_end>@pytest.mark.skipif(<not>torch.cuda.is_available() reason='CUDA not available')<def_stmt>test_memory_leaks <block_start>"""Checks that there are no memory leaks."""<for_stmt>signature_combine,amount ((<true> 2) (<false> 1) (<false> 2) (<false> 3) (<false> 10))<block_start><for_stmt>signature_grad (<false> <true>)<block_start><for_stmt>batch_size,input_stream,input_channels h.random_sizes()<block_start><for_stmt>depth (1 2 5)<block_start><for_stmt>inverse (<false> <true>)<block_start><for_stmt>scalar_term (<false> <true>)<block_start>_test_memory_leaks(signature_combine amount batch_size input_stream input_channels depth inverse signature_grad scalar_term)<block_end><block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>_test_memory_leaks signature_combine amount batch_size input_stream input_channels depth inverse signature_grad scalar_term<block_start><def_stmt>one_iteration <block_start>gc.collect()<line_sep>torch.cuda.synchronize()<line_sep>torch.cuda.reset_max_memory_allocated()<line_sep>paths=[]<for_stmt>_ range(amount)<block_start>paths.append(torch.rand(batch_size input_stream input_channels device='cuda' dtype=torch.double))<block_end>signatures=[]<line_sep>basepoint=<false><for_stmt>path paths<block_start>signature=iisignature_signature(path depth basepoint=basepoint inverse=inverse scalar_term=scalar_term)<if_stmt>signature_grad<block_start>signature.requires_grad_()<block_end>signatures.append(signature)<block_end><if_stmt>signature_combine<block_start>combined_signatures=signatory.signature_combine(signatures[0] signatures[1] input_channels depth inverse=inverse scalar_term=scalar_term)<block_end><else_stmt><block_start>combined_signatures=signatory.multi_signature_combine(signatures input_channels depth inverse=inverse scalar_term=scalar_term)<block_end><if_stmt>signature_grad<block_start>grad=torch.rand_like(combined_signatures)<line_sep>combined_signatures.backward(grad)<block_end>torch.cuda.synchronize()<line_sep><return>torch.cuda.max_memory_allocated()<block_end>memory_used=one_iteration()<for_stmt>repeat range(10)<block_start><assert_stmt>one_iteration()<le>memory_used<block_end><block_end>
<import_stmt>logging<import_stmt>boto3<import_from_stmt>botocore.exceptions ClientError<line_sep>lf=boto3.client('lakeformation')<line_sep>permissions=[]<line_sep>permissionResp=lf.list_permissions()<line_sep>permissions.extend(permissionResp['PrincipalResourcePermissions'])<while_stmt>'NextToken'<in>permissionResp<block_start>print(permissionResp)<line_sep>permissionResp=lf.list_permissions(NextToken=permissionResp['NextToken'])<line_sep>permissions.extend(permissionResp['PrincipalResourcePermissions'])<block_end>progress=0<for_stmt>grant permissions<block_start>print("\r"+str(progress)+"/"+str(len(permissions)) end='')<line_sep>progress<augadd>1<if_stmt>(grant['Principal']['DataLakePrincipalIdentifier']<eq>"IAM_ALLOWED_PRINCIPALS")<block_start>lf.revoke_permissions(Principal=grant["Principal"] Resource=grant["Resource"] Permissions=grant["Permissions"] PermissionsWithGrantOption=grant["PermissionsWithGrantOption"])<block_end><block_end>
# coding=utf-8 <import_stmt>unittest<import_from_stmt>hashlib sha512<import_stmt>os<import_stmt>os.path<import_stmt>io<import_from_stmt>base64 b64decode<import_from_stmt>contextlib contextmanager<import_from_stmt>datetime timedelta<import_from_stmt>tempfile mkdtemp mkstemp<import_from_stmt>shutil rmtree<import_from_stmt>subprocess check_call<import_from_stmt>json load<as>load_json_file<import_from_stmt>time time<as>time_now<import_from_stmt>flask_testing TestCase<as>_FlaskTestCase<import_stmt>pygit2<import_from_stmt>restfulgit.app_factory create_app<line_sep>RESTFULGIT_REPO=os.path.abspath(os.path.join(os.path.dirname(__file__) '..'))<line_sep>PARENT_DIR_OF_RESTFULGIT_REPO=os.path.abspath(os.path.join(RESTFULGIT_REPO '..'))<line_sep>CONFIG_FILE=os.path.join(RESTFULGIT_REPO 'example_config.py')<line_sep>TEST_SUBDIR=os.path.join(RESTFULGIT_REPO 'tests')<line_sep>FIXTURES_DIR=os.path.join(TEST_SUBDIR 'fixtures')<line_sep>GIT_MIRROR_DESCRIPTION_FILEPATH=os.path.join(RESTFULGIT_REPO 'description')<line_sep>NORMAL_CLONE_DESCRIPTION_FILEPATH=os.path.join(RESTFULGIT_REPO '.git' 'description')<line_sep>FIRST_COMMIT="07b9bf1540305153ceeb4519a50b588c35a35464"<line_sep>TREE_OF_FIRST_COMMIT="6ca22167185c31554aa6157306e68dfd612d6345"<line_sep>BLOB_FROM_FIRST_COMMIT="ae9d90706c632c26023ce599ac96cb152673da7c"<line_sep>TAG_FOR_FIRST_COMMIT="1dffc031c9beda43ff94c526cbc00a30d231c079"<line_sep>FIFTH_COMMIT="c04112733fe2db2cb2f179fca1a19365cf15fef5"<line_sep>EMPTY_COMMIT="c8ec343d7260ba9577045a05bccd931867644f28"<line_sep>IMPROBABLE_SHA="f"<times>40<def_stmt>delete_file_quietly filepath<block_start><try_stmt><block_start>os.remove(filepath)<block_end><except_stmt>EnvironmentError<as>err<block_start><pass><block_end><block_end><class_stmt>_RestfulGitTestCase(_FlaskTestCase)<block_start><def_stmt>create_app self<block_start>app=create_app()<line_sep>app.config.from_pyfile(CONFIG_FILE)<line_sep>app.config['RESTFULGIT_REPO_BASE_PATH']=PARENT_DIR_OF_RESTFULGIT_REPO<line_sep><return>app<block_end><def_stmt>assertJsonError self resp<block_start>json=resp.json<line_sep>self.assertIsInstance(json dict)<line_sep>self.assertIsInstance(json.get('error') str)<block_end><def_stmt>assertJson400 self resp<block_start>self.assert400(resp)<line_sep>self.assertJsonError(resp)<block_end><def_stmt>assertJson404 self resp<block_start>self.assert404(resp)<line_sep>self.assertJsonError(resp)<block_end><def_stmt>assertContentTypeIsDiff self resp<block_start>self.assertEqual(resp.headers.get_all('Content-Type') ['text/x-diff; charset=utf-8'])<block_end>@contextmanager<def_stmt>config_override self key val<block_start>orig_val=self.app.config[key]<line_sep>self.app.config[key]=val<try_stmt><block_start><yield><block_end><finally_stmt><block_start>self.app.config[key]=orig_val<block_end><block_end><def_stmt>get_fixture_path self filename<block_start><return>os.path.join(FIXTURES_DIR filename)<block_end><def_stmt>_get_fixture_bytes self filename<block_start>filepath=self.get_fixture_path(filename)<with_stmt>open(filepath 'rb')<as>fixture_file<block_start>content=fixture_file.read()<line_sep><return>content<block_end><block_end><def_stmt>assertBytesEqualFixture self text fixture<block_start>self.assertEqual(text self._get_fixture_bytes(fixture))<block_end>@contextmanager<def_stmt>temporary_file self suffix=''<block_start>file_descriptor,filepath=mkstemp(suffix=suffix)<line_sep>file_obj=os.fdopen(file_descriptor 'wb')<try_stmt><block_start><yield>file_obj filepath<block_end><finally_stmt><block_start><if_stmt><not>file_obj.closed<block_start>file_obj.close()<block_end>delete_file_quietly(filepath)<block_end><block_end>@contextmanager<def_stmt>temporary_directory self suffix=''<block_start>temp_dir=mkdtemp(suffix=suffix)<try_stmt><block_start><yield>temp_dir<block_end><finally_stmt><block_start>rmtree(temp_dir)<block_end><block_end><def_stmt>make_nested_dir self extant_parent new_child<block_start>new_dir=os.path.join(extant_parent new_child)<line_sep>os.mkdir(new_dir)<line_sep><return>new_dir<block_end>_MINUTE=60<line_sep>@property<def_stmt>_author self<block_start>sig=pygit2.Signature('Alien Celebrity' '<EMAIL>' time=self._time offset=0)<line_sep>self._time<augadd>self._MINUTE<line_sep><return>sig<block_end><def_stmt>_tree self repo name<block_start>blob_oid=repo.create_blob(name)<line_sep>tree_builder=repo.TreeBuilder()<line_sep>tree_builder.insert(name blob_oid pygit2.GIT_FILEMODE_BLOB)<line_sep>tree_oid=tree_builder.write()<line_sep><return>tree_oid<block_end><def_stmt>_commit self repo name parents=() with_branch=<false><block_start>ref_name=<none><line_sep>commit_oid=repo.create_commit(ref_name self._author self._author name self._tree(repo name) list(parents))<if_stmt>with_branch<block_start>repo.create_branch(name repo[commit_oid])<block_end><return>commit_oid<block_end>@property@contextmanager<def_stmt>_empty_repo self<block_start><with_stmt>self.temporary_directory(suffix='.restfulgit')<as>temp_repos_dir<block_start>self.app.config['RESTFULGIT_REPO_BASE_PATH']=temp_repos_dir<line_sep>repo_dir=os.path.join(temp_repos_dir 'example')<line_sep>os.mkdir(repo_dir)<line_sep>repo=pygit2.init_repository(repo_dir <false>)<line_sep><yield>repo<block_end><block_end>@property@contextmanager<def_stmt>_base_repo_and_commit self<block_start>self._time=0<with_stmt>self._empty_repo<as>repo# first commit A <block_start>a=self._commit(repo "A" with_branch=<true>)<line_sep><yield>repo a<block_end><block_end>@contextmanager<def_stmt>_example_repo self b_before_e=<true><block_start>""" Sets up an example repo with the following commits: [A]--B--C--D--[I aka J] \--E--F--G--/ \---[H] [X]s denote commits that are branch tips """<with_stmt>self._base_repo_and_commit<as>pair<block_start>repo,a=pair<def_stmt>make_bcd <block_start>b=self._commit(repo "B" [a])<line_sep>c=self._commit(repo "C" [b])<line_sep>d=self._commit(repo "D" [c])<line_sep><return>b c d<block_end><def_stmt>make_efg <block_start>e=self._commit(repo "E" [a])<line_sep>f=self._commit(repo "F" [e])<line_sep>g=self._commit(repo "G" [f])<line_sep><return>e f g<block_end><if_stmt>b_before_e<block_start>b,c,d=make_bcd()<line_sep>e,f,g=make_efg()<block_end><else_stmt><block_start>e,f,g=make_efg()<line_sep>b,c,d=make_bcd()<block_end># H branch h=self._commit(repo "H" [e] with_branch=<true>)<line_sep># I branch, from D & G i=self._commit(repo "I" [d g] with_branch=<true>)<line_sep><yield>dict(locals())<block_end><block_end><block_end><class_stmt>RepoKeyTestCase(_RestfulGitTestCase)<block_start><def_stmt>test_nonexistent_directory self<block_start>resp=self.client.get('/repos/this-directory-does-not-exist/git/commits/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_directory_is_not_git_repo self<block_start>self.app.config['RESTFULGIT_REPO_BASE_PATH']=RESTFULGIT_REPO<line_sep>resp=self.client.get('/repos/test/git/commits/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_dot_dot_disallowed self<block_start>self.app.config['RESTFULGIT_REPO_BASE_PATH']=TEST_SUBDIR<line_sep>resp=self.client.get('/repos/../git/commits/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_list_repos self<block_start>resp=self.client.get('/repos/')<line_sep>self.assert200(resp)<line_sep>result=resp.json<line_sep>self.assertIsInstance(result list)<line_sep>repo_list=[repo['name']<for>repo result]<line_sep>self.assertIn('restfulgit' repo_list)<for_stmt>repo result<block_start><if_stmt>repo['name']<eq>'restfulgit'<block_start>self.assertEqual(repo {"name":'restfulgit' "full_name":'restfulgit' "description":<none> "url":'http://localhost/repos/restfulgit/' "branches_url":"http://localhost/repos/restfulgit/branches{/branch}" "tags_url":"http://localhost/repos/restfulgit/tags/" "blobs_url":"http://localhost/repos/restfulgit/git/blobs{/sha}" "git_tags_url":"http://localhost/repos/restfulgit/git/tags{/sha}" "git_refs_url":"http://localhost/repos/restfulgit/git/refs{/sha}" "trees_url":"http://localhost/repos/restfulgit/git/trees{/sha}" # "compare_url": "http://localhost/repos/restfulgit/compare/{base}...{head}", # "contributors_url": "http://localhost/repos/restfulgit/contributors", # "contents_url": "http://localhost/repos/restfulgit/contents/{+path}", "commits_url":"http://localhost/repos/restfulgit/commits{/sha}" "git_commits_url":"http://localhost/repos/restfulgit/git/commits{/sha}" # "size": N (in what units?) # "updated_at": "some timestamp" })<block_end><block_end><block_end><def_stmt>test_deepdir_repos self<block_start><with_stmt>self.temporary_directory(suffix='.restfulgit')<as>temp_repos_dir<block_start>self.app.config['RESTFULGIT_REPO_BASE_PATH']=temp_repos_dir<line_sep>pygit2.init_repository(os.path.join(temp_repos_dir 'onedir/bare.git') bare=<true>)<line_sep>pygit2.init_repository(os.path.join(temp_repos_dir 'second/more/nested/repo'))<line_sep>resp=self.client.get('/repos/')<line_sep>repo_names={repo['name']<for>repo resp.json}<line_sep>self.assertEquals(repo_names {'onedir/bare.git' 'second/more/nested/repo'})<block_end><block_end><block_end><class_stmt>SHAConverterTestCase(_RestfulGitTestCase)<block_start><def_stmt>test_empty_sha_rejected self<block_start>resp=self.client.get('/repos/restfulgit/git/trees/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_too_long_sha_rejected self<block_start>resp=self.client.get('/repos/restfulgit/git/trees/{}0/'.format(TREE_OF_FIRST_COMMIT))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_malformed_sha_rejected self<block_start>resp=self.client.get('/repos/restfulgit/git/trees/0123456789abcdefghijklmnopqrstuvwxyzABCD/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_full_sha_accepted self<block_start>resp=self.client.get('/repos/restfulgit/git/trees/{}/'.format(TREE_OF_FIRST_COMMIT))<line_sep>self.assert200(resp)<block_end><def_stmt>test_partial_sha_accepted self<block_start>resp=self.client.get('/repos/restfulgit/git/trees/{}/'.format(TREE_OF_FIRST_COMMIT[:35]))<line_sep>self.assert200(resp)<block_end><block_end><class_stmt>CommitsTestCase(_RestfulGitTestCase)<block_start>"""Tests the "commits" endpoint."""<def_stmt>test_empty_repo self<block_start><with_stmt>self._empty_repo<block_start>resp=self.client.get('/repos/example/git/commits/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json [])<block_end><block_end><def_stmt>test_nonexistent_start_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/?start_sha=1234567890abcdef')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_non_commit_start_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/?start_sha={}'.format(TREE_OF_FIRST_COMMIT))<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_malformed_start_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/?start_sha=thisIsNotHexHash')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_start_sha_works_basic self<block_start>resp=self.client.get('/repos/restfulgit/git/commits?start_sha={}'.format(FIRST_COMMIT) follow_redirects=<true>)<line_sep>self.assert200(resp)<block_end><def_stmt>test_nonexistent_ref_name self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/?ref_name=doesNotExist')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_ref_name_works self<block_start>resp=self.client.get('/repos/restfulgit/git/commits?ref_name=master' follow_redirects=<true>)<line_sep>self.assert200(resp)<line_sep># FIXME: should be more thorough <block_end><def_stmt>test_non_integer_limit_rejected self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/?limit=abc123')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_negative_limit_rejected self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/?limit=-1')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_limit_works_basic self<block_start>resp=self.client.get('/repos/restfulgit/git/commits?limit=3' follow_redirects=<true>)<line_sep>self.assert200(resp)<block_end><def_stmt>test_limit_and_start_sha_work_full self<block_start>resp=self.client.get('/repos/restfulgit/git/commits?limit=3&start_sha={}'.format(FIFTH_COMMIT) follow_redirects=<true>)<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json [{'author':{'date':'2013-02-27T03:14:13Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-02-27T03:14:13Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'add file mode' 'parents':[{'sha':'326d80cd68ec3413fe6eaca99c52c59ca428a0d0' 'url':'http://localhost/repos/restfulgit/git/commits/326d80cd68ec3413fe6eaca99c52c59ca428a0d0/'}] 'sha':'c04112733fe2db2cb2f179fca1a19365cf15fef5' 'tree':{'sha':'3fdeafb3d2f69a4f7d8bb499b81f836aa10b06eb' 'url':'http://localhost/repos/restfulgit/git/trees/3fdeafb3d2f69a4f7d8bb499b81f836aa10b06eb/'} 'url':'http://localhost/repos/restfulgit/git/commits/c04112733fe2db2cb2f179fca1a19365cf15fef5/'} {'author':{'date':'2013-02-26T09:15:35Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-02-26T09:15:35Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'Now using a jsonify decorator which returns the correct content-type' 'parents':[{'sha':'1f51b91ac383806df9d322ae67bbad3364f50811' 'url':'http://localhost/repos/restfulgit/git/commits/1f51b91ac383806df9d322ae67bbad3364f50811/'}] 'sha':'326d80cd68ec3413fe6eaca99c52c59ca428a0d0' 'tree':{'sha':'3f4b1282d80af3f8a51000993968897330635e4f' 'url':'http://localhost/repos/restfulgit/git/trees/3f4b1282d80af3f8a51000993968897330635e4f/'} 'url':'http://localhost/repos/restfulgit/git/commits/326d80cd68ec3413fe6eaca99c52c59ca428a0d0/'} {'author':{'date':'2013-02-25T12:35:29Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-02-25T12:35:29Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'Support submodule in tree-listings' 'parents':[{'sha':'ff6405b71273b5c2c50d5c33d5cf962af5390542' 'url':'http://localhost/repos/restfulgit/git/commits/ff6405b71273b5c2c50d5c33d5cf962af5390542/'}] 'sha':'1f51b91ac383806df9d322ae67bbad3364f50811' 'tree':{'sha':'1404e1766a3269f5a73b3d2ec8c81b7ea3ad6e09' 'url':'http://localhost/repos/restfulgit/git/trees/1404e1766a3269f5a73b3d2ec8c81b7ea3ad6e09/'} 'url':'http://localhost/repos/restfulgit/git/commits/1f51b91ac383806df9d322ae67bbad3364f50811/'}])<block_end>#FIXME: test combos <block_end><class_stmt>MergeBaseTestCase(_RestfulGitTestCase)# NOTE: RestfulGit extension <block_start>_INITIAL_COMMIT_JSON={'author':{'date':'2013-02-24T13:25:46Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-02-24T13:25:46Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'Initial support for read-only REST api for Git plumbing' 'parents':[] 'sha':'07b9bf1540305153ceeb4519a50b588c35a35464' 'tree':{'sha':'6ca22167185c31554aa6157306e68dfd612d6345' 'url':'http://localhost/repos/restfulgit/git/trees/6ca22167185c31554aa6157306e68dfd612d6345/'} 'url':'http://localhost/repos/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464/'}<def_stmt>_make_another_initial_commit self<block_start>repo=pygit2.Repository(RESTFULGIT_REPO)<line_sep>blob_oid=repo.create_blob("First post!")<line_sep>tree_builder=repo.TreeBuilder()<line_sep>tree_builder.insert("FirstPost.txt" blob_oid pygit2.GIT_FILEMODE_BLOB)<line_sep>tree_oid=tree_builder.write()<line_sep>author=pygit2.Signature('Alien Celebrity' '<EMAIL>' time=int(time_now()) offset=0)<line_sep>ref_name=<none><line_sep>parents=[]<line_sep>evil_twin_genesis_commit_oid=repo.create_commit(ref_name author author "Other initial commit" tree_oid parents)<line_sep><return>evil_twin_genesis_commit_oid<block_end><def_stmt>test_nonexistent_sha_404s self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/{0}/merge-base/{0}/'.format(IMPROBABLE_SHA))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_unrelateds_is_200_but_null self<block_start>other_unrelated_initial_commit_oid=self._make_another_initial_commit()<line_sep>resp=self.client.get('/repos/restfulgit/git/commits/{}/merge-base/{}/'.format(FIRST_COMMIT str(other_unrelated_initial_commit_oid)))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json <none>)<block_end><def_stmt>test_left self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/{}/merge-base/{}/'.format(FIRST_COMMIT FIFTH_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json self._INITIAL_COMMIT_JSON)<block_end><def_stmt>test_right self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/{}/merge-base/{}/'.format(FIFTH_COMMIT FIRST_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json self._INITIAL_COMMIT_JSON)<block_end><def_stmt>test_branch_siblings self<block_start><with_stmt>self._example_repo()<as>commits<block_start>d=str(commits['d'])<line_sep>g=str(commits['g'])<line_sep>resp=self.client.get('/repos/example/git/commits/{}/merge-base/{}/'.format(d g))<block_end>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'author':{'date':'1970-01-01T00:00:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:01:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'A' 'parents':[] 'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'tree':{'sha':'617601c79811cbbae338512798318b4e5b70c9ac' 'url':'http://localhost/repos/example/git/trees/617601c79811cbbae338512798318b4e5b70c9ac/'} 'url':'http://localhost/repos/example/git/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'})<block_end><def_stmt>test_same_commit_twice_results_in_same self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/{0}/merge-base/{0}/'.format(FIFTH_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'author':{'date':'2013-02-27T03:14:13Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-02-27T03:14:13Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'add file mode' 'parents':[{'sha':'326d80cd68ec3413fe6eaca99c52c59ca428a0d0' 'url':'http://localhost/repos/restfulgit/git/commits/326d80cd68ec3413fe6eaca99c52c59ca428a0d0/'}] 'sha':'c04112733fe2db2cb2f179fca1a19365cf15fef5' 'tree':{'sha':'3fdeafb3d2f69a4f7d8bb499b81f836aa10b06eb' 'url':'http://localhost/repos/restfulgit/git/trees/3fdeafb3d2f69a4f7d8bb499b81f836aa10b06eb/'} 'url':'http://localhost/repos/restfulgit/git/commits/c04112733fe2db2cb2f179fca1a19365cf15fef5/'})<block_end><block_end><class_stmt>SimpleSHATestCase(_RestfulGitTestCase)<block_start>_INITIAL_COMMIT_TREE_JSON={"sha":"6ca22167185c31554aa6157306e68dfd612d6345" "url":"http://localhost/repos/restfulgit/git/trees/6ca22167185c31554aa6157306e68dfd612d6345/" "tree":[{"mode":"100644" "type":"blob" "sha":"ae9d90706c632c26023ce599ac96cb152673da7c" "path":"api.py" "size":5543 "url":"http://localhost/repos/restfulgit/git/blobs/ae9d90706c632c26023ce599ac96cb152673da7c/"}]}<def_stmt>test_get_commit_with_non_commit_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/{}/'.format(BLOB_FROM_FIRST_COMMIT))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_tree_with_blob_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/trees/{}/'.format(BLOB_FROM_FIRST_COMMIT))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_tree_with_commit_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/trees/{}/'.format(FIRST_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json self._INITIAL_COMMIT_TREE_JSON)<block_end><def_stmt>test_get_tree_with_tag_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/trees/{}/'.format(TAG_FOR_FIRST_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json self._INITIAL_COMMIT_TREE_JSON)<block_end><def_stmt>test_get_blob_with_non_blob_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/blobs/{}/'.format(FIRST_COMMIT))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_tag_with_non_tag_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/tags/{}/'.format(BLOB_FROM_FIRST_COMMIT))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_commit_with_nonexistent_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/commits/{}/'.format(IMPROBABLE_SHA))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_tree_with_nonexistent_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/trees/{}/'.format(IMPROBABLE_SHA))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_blob_with_nonexistent_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/blobs/{}/'.format(IMPROBABLE_SHA))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_tag_with_nonexistent_sha self<block_start>resp=self.client.get('/repos/restfulgit/git/tags/{}/'.format(IMPROBABLE_SHA))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_git_commit_works self# From https://api.github.com/repos/hulu/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464 with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/git/commits/{}/'.format(FIRST_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {"sha":"07b9bf1540305153ceeb4519a50b588c35a35464" "url":"http://localhost/repos/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464/" "author":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-02-24T13:25:46Z"} "committer":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-02-24T13:25:46Z"} "tree":{"sha":"6ca22167185c31554aa6157306e68dfd612d6345" "url":"http://localhost/repos/restfulgit/git/trees/6ca22167185c31554aa6157306e68dfd612d6345/"} "message":"Initial support for read-only REST api for Git plumbing" "parents":[]})<block_end><def_stmt>test_get_empty_git_commit_works self# From https://api.github.com/repos/hulu/restfulgit/git/commits/c8ec343d7260ba9577045a05bccd931867644f28 with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/git/commits/{}/'.format(EMPTY_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {"sha":"c8ec343d7260ba9577045a05bccd931867644f28" "url":"http://localhost/repos/restfulgit/git/commits/c8ec343d7260ba9577045a05bccd931867644f28/" "author":{"name":"<NAME>" "email":"<EMAIL>" "date":"2015-01-07T20:15:08Z"} "committer":{"name":"<NAME>" "email":"<EMAIL>" "date":"2015-01-07T20:15:08Z"} "tree":{"sha":"9268fd675df04e7c09bceddaf9dfc38fb78787d2" "url":"http://localhost/repos/restfulgit/git/trees/9268fd675df04e7c09bceddaf9dfc38fb78787d2/"} "message":"Merge pull request #96 from hulu/empty-commit\n\nAdd deliberately empty commit for testing purposes" "parents":[{"sha":"4fb38539d25983c9b9b99588901a1025658d05d4" "url":"http://localhost/repos/restfulgit/git/commits/4fb38539d25983c9b9b99588901a1025658d05d4/" } {"sha":"6f4fa9af844f69137bfee3c247feec0fb03a3913" "url":"http://localhost/repos/restfulgit/git/commits/6f4fa9af844f69137bfee3c247feec0fb03a3913/" }]})<block_end><def_stmt>test_get_tree_works self# From https://api.github.com/repos/hulu/restfulgit/git/trees/6ca22167185c31554aa6157306e68dfd612d6345 with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/git/trees/{}/'.format(TREE_OF_FIRST_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json self._INITIAL_COMMIT_TREE_JSON)<block_end><def_stmt>test_get_nested_tree_works self# From https://api.github.com/repos/hulu/restfulgit/git/trees/fc0fddc986c93f8444d754c7ec93c8b87f3d7c7e with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/git/trees/fc0fddc986c93f8444d754c7ec93c8b87f3d7c7e/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {"sha":"fc0fddc986c93f8444d754c7ec93c8b87f3d7c7e" "url":"http://localhost/repos/restfulgit/git/trees/fc0fddc986c93f8444d754c7ec93c8b87f3d7c7e/" "tree":[{"mode":"100644" "type":"blob" "sha":"b5d2ce6a7246f37aaa41e7ce3403b5acd6369914" "path":".coveragerc" "size":65 "url":"http://localhost/repos/restfulgit/git/blobs/b5d2ce6a7246f37aaa41e7ce3403b5acd6369914/"} {"mode":"100644" "type":"blob" "sha":"cae6643e19e7a8198a26a449f556db6d1909aec8" "path":".gitignore" "size":22 "url":"http://localhost/repos/restfulgit/git/blobs/cae6643e19e7a8198a26a449f556db6d1909aec8/"} {"mode":"100644" "type":"blob" "sha":"f93712aaf5fcc4c0d44dc472d86abad40fdb0ec3" "path":".pep8" "size":19 "url":"http://localhost/repos/restfulgit/git/blobs/f93712aaf5fcc4c0d44dc472d86abad40fdb0ec3/"} {"mode":"100644" "type":"blob" "sha":"14e6bf5b229127a5495d9c176f50e3ef1922f0f2" "path":".travis.yml" "size":985 "url":"http://localhost/repos/restfulgit/git/blobs/14e6bf5b229127a5495d9c176f50e3ef1922f0f2/"} {"mode":"100644" "type":"blob" "sha":"bb27aa0a502f73c19837b96d1bd514ba95e0d404" "path":"LICENSE.md" "size":1056 "url":"http://localhost/repos/restfulgit/git/blobs/bb27aa0a502f73c19837b96d1bd514ba95e0d404/"} {"mode":"100644" "type":"blob" "sha":"342f0ffead9243f5a3514505b83b918e61247ae2" "path":"README.md" "size":5655 "url":"http://localhost/repos/restfulgit/git/blobs/342f0ffead9243f5a3514505b83b918e61247ae2/"} {"mode":"100644" "type":"blob" "sha":"20ff5b895391daa7335cc55be7e3a4da601982da" "path":"config.conf" "size":398 "url":"http://localhost/repos/restfulgit/git/blobs/20ff5b895391daa7335cc55be7e3a4da601982da/"} {"mode":"100644" "type":"blob" "sha":"3e4025298468787af1123191bdddfb72df19061a" "path":"pylint.rc" "size":8529 "url":"http://localhost/repos/restfulgit/git/blobs/3e4025298468787af1123191bdddfb72df19061a/"} {"mode":"100644" "type":"blob" "sha":"77b71e4967983b090aef88ba358724ef4703b01b" "path":"requirements.txt" "size":29 "url":"http://localhost/repos/restfulgit/git/blobs/77b71e4967983b090aef88ba358724ef4703b01b/"} {"mode":"040000" "type":"tree" "sha":"dd8a3571820936595e553c9ba9f776a5c77b1a53" "path":"restfulgit" "url":"http://localhost/repos/restfulgit/git/trees/dd8a3571820936595e553c9ba9f776a5c77b1a53/"} {"mode":"040000" "type":"tree" "sha":"bdcb3627ba5b29da20f01d9c4571b0ebc6a8b2bd" "path":"tests" "url":"http://localhost/repos/restfulgit/git/trees/bdcb3627ba5b29da20f01d9c4571b0ebc6a8b2bd/"}]})<block_end><def_stmt>test_get_recursive_tree_works self# From https://api.github.com/repos/hulu/restfulgit/git/trees/fc36ceb418b0b9e945ffd3706dd8544dd988500a?recursive=1 with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/git/trees/fc36ceb418b0b9e945ffd3706dd8544dd988500a/?recursive=1')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {"sha":"fc36ceb418b0b9e945ffd3706dd8544dd988500a" "url":"http://localhost/repos/restfulgit/git/trees/fc36ceb418b0b9e945ffd3706dd8544dd988500a/" "tree":[{"mode":"100644" "type":"blob" "sha":"b5d2ce6a7246f37aaa41e7ce3403b5acd6369914" "path":".coveragerc" "size":65 "url":"http://localhost/repos/restfulgit/git/blobs/b5d2ce6a7246f37aaa41e7ce3403b5acd6369914/"} {"mode":"100644" "type":"blob" "sha":"cae6643e19e7a8198a26a449f556db6d1909aec8" "path":".gitignore" "size":22 "url":"http://localhost/repos/restfulgit/git/blobs/cae6643e19e7a8198a26a449f556db6d1909aec8/"} {"mode":"100644" "type":"blob" "sha":"f93712aaf5fcc4c0d44dc472d86abad40fdb0ec3" "path":".pep8" "size":19 "url":"http://localhost/repos/restfulgit/git/blobs/f93712aaf5fcc4c0d44dc472d86abad40fdb0ec3/"} {"mode":"100644" "type":"blob" "sha":"b3e1e0f2b569fef46e7413cadb6778504c19c87f" "path":".travis.yml" "size":1008 "url":"http://localhost/repos/restfulgit/git/blobs/b3e1e0f2b569fef46e7413cadb6778504c19c87f/"} {"mode":"100644" "type":"blob" "sha":"bb27aa0a502f73c19837b96d1bd514ba95e0d404" "path":"LICENSE.md" "size":1056 "url":"http://localhost/repos/restfulgit/git/blobs/bb27aa0a502f73c19837b96d1bd514ba95e0d404/"} {"mode":"100644" "type":"blob" "sha":"ee655c4baa251fad0a67dd74b2c390b4a4f9ac53" "path":"README.md" "size":7855 "url":"http://localhost/repos/restfulgit/git/blobs/ee655c4baa251fad0a67dd74b2c390b4a4f9ac53/"} {"mode":"100644" "type":"blob" "sha":"7186d8fab5c4bb492cbcfe1383b2270651e13c2e" "path":"example_config.py" "size":489 "url":"http://localhost/repos/restfulgit/git/blobs/7186d8fab5c4bb492cbcfe1383b2270651e13c2e/"} {"mode":"100644" "type":"blob" "sha":"abb1a23bc0fad8f7fe1dc5996a8e4c7c4cb9903e" "path":"pylint.rc" "size":8517 "url":"http://localhost/repos/restfulgit/git/blobs/abb1a23bc0fad8f7fe1dc5996a8e4c7c4cb9903e/"} {"mode":"100644" "type":"blob" "sha":"77b71e4967983b090aef88ba358724ef4703b01b" "path":"requirements.txt" "size":29 "url":"http://localhost/repos/restfulgit/git/blobs/77b71e4967983b090aef88ba358724ef4703b01b/"} {"mode":"040000" "type":"tree" "sha":"c0dcf8f58a3c5bf42f07e880d5e442ef124c9370" "path":"restfulgit" "url":"http://localhost/repos/restfulgit/git/trees/c0dcf8f58a3c5bf42f07e880d5e442ef124c9370/"} {"mode":"100644" "type":"blob" "sha":"7fe178c5687eae1e2c04d9d21b6a429c93a28e6a" "path":"restfulgit/__init__.py" "size":15986 "url":"http://localhost/repos/restfulgit/git/blobs/7fe178c5687eae1e2c04d9d21b6a429c93a28e6a/"} {"mode":"100644" "type":"blob" "sha":"e067d7f361bd3b0f227ba1914c227ebf9539f59d" "path":"restfulgit/__main__.py" "size":110 "url":"http://localhost/repos/restfulgit/git/blobs/e067d7f361bd3b0f227ba1914c227ebf9539f59d/"} {"mode":"040000" "type":"tree" "sha":"803c8592dd96cb0a6fc041ebb6af71fbf1f7551c" "path":"tests" "url":"http://localhost/repos/restfulgit/git/trees/803c8592dd96cb0a6fc041ebb6af71fbf1f7551c/"} {"mode":"100644" "type":"blob" "sha":"2d500fea50b6c1a38d972c1a22b5cb5b5673167a" "path":"tests/test_restfulgit.py" "size":26725 "url":"http://localhost/repos/restfulgit/git/blobs/2d500fea50b6c1a38d972c1a22b5cb5b5673167a/"}]})<block_end><def_stmt>test_get_blob_works self# From https://api.github.com/repos/hulu/restfulgit/git/blobs/ae9d90706c632c26023ce599ac96cb152673da7c with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/git/blobs/{}/'.format(BLOB_FROM_FIRST_COMMIT))<line_sep>self.assert200(resp)<line_sep>json=resp.json<line_sep>self.assertIsInstance(json dict)<line_sep>self.assertIn("content" json)<line_sep>self.assertEqual(sha512(json["content"].encode()).hexdigest() '1c846bb4d44c08073c487316a7dc02d97d825aecf50546caf9bf10277c01d17e19860d5f86de877268dd969bd081c7595991c325e0ab492374b956e3a6c9967f')<del_stmt>json["content"]<line_sep>self.assertEqual(json {"url":"http://localhost/repos/restfulgit/git/blobs/ae9d90706c632c26023ce599ac96cb152673da7c/" "sha":"ae9d90706c632c26023ce599ac96cb152673da7c" "encoding":"utf-8" # NOTE: RestfulGit extension "size":5543})<block_end><def_stmt>test_get_binary_blob_works self# From https://api.github.com/repos/hulu/restfulgit/git/blobs/79fbf74e9d9f752c901c956e958845a308c44283 with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/git/blobs/79fbf74e9d9f752c901c956e958845a308c44283/')<line_sep>self.assert200(resp)<line_sep>json=resp.json<line_sep>self.assertIsInstance(json dict)<line_sep>self.assertIn('content' json)<line_sep>content=json['content']<del_stmt>json['content']<line_sep>self.assertBytesEqualFixture(b64decode(content) 'example.png')<line_sep>self.assertEqual(json {"sha":"79fbf74e9d9f752c901c956e958845a308c44283" "size":1185 "url":"http://localhost/repos/restfulgit/git/blobs/79fbf74e9d9f752c901c956e958845a308c44283/" "encoding":"base64"})<block_end><def_stmt>test_get_tag_works self# From https://api.github.com/repos/hulu/restfulgit/git/tags/1dffc031c9beda43ff94c526cbc00a30d231c079 with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/git/tags/{}/'.format(TAG_FOR_FIRST_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {"sha":"1dffc031c9beda43ff94c526cbc00a30d231c079" "url":"http://localhost/repos/restfulgit/git/tags/1dffc031c9beda43ff94c526cbc00a30d231c079/" "tagger":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-09-28T01:14:09Z"} "object":{"sha":"07b9bf1540305153ceeb4519a50b588c35a35464" "type":"commit" "url":"http://localhost/repos/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464/"} "tag":"initial" "message":"initial commit\n"})<block_end><def_stmt>test_get_repos_tag_works self# NOTE: RestfulGit extension <block_start>resp=self.client.get('/repos/restfulgit/tags/initial/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commit':{'author':{'date':'2013-02-24T13:25:46Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'2013-02-24T13:25:46Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-02-24T13:25:46Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'Initial support for read-only REST api for Git plumbing' 'parents':[] 'sha':'07b9bf1540305153ceeb4519a50b588c35a35464' 'tree':{'sha':'6ca22167185c31554aa6157306e68dfd612d6345' 'url':'http://localhost/repos/restfulgit/git/trees/6ca22167185c31554aa6157306e68dfd612d6345/'} 'url':'http://localhost/repos/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464/'} 'committer':{'date':'2013-02-24T13:25:46Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[] 'sha':'07b9bf1540305153ceeb4519a50b588c35a35464' 'url':'http://localhost/repos/restfulgit/commits/07b9bf1540305153ceeb4519a50b588c35a35464/'} 'name':'initial' 'tag':{'message':'initial commit\n' 'object':{'sha':'07b9bf1540305153ceeb4519a50b588c35a35464' 'type':'commit' 'url':'http://localhost/repos/restfulgit/git/commits/07b9bf1540305153ceeb4519a50b588c35a35464/'} 'sha':'1dffc031c9beda43ff94c526cbc00a30d231c079' 'tag':'initial' 'tagger':{'date':'2013-09-28T01:14:09Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'url':'http://localhost/repos/restfulgit/git/tags/1dffc031c9beda43ff94c526cbc00a30d231c079/'} 'url':'http://localhost/repos/restfulgit/tags/initial/'})<block_end><def_stmt>test_get_repos_tag_with_nonexistent_tag self# NOTE: RestfulGit extension <block_start>resp=self.client.get('/repos/restfulgit/tags/this-tag-does-not-exist/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_repo_tags_works self# From https://api.github.com/repos/hulu/restfulgit/tags with necessary adjustments <block_start>reference_tag={"name":"initial" "commit":{"sha":"07b9bf1540305153ceeb4519a50b588c35a35464" "url":"http://localhost/repos/restfulgit/commits/07b9bf1540305153ceeb4519a50b588c35a35464/"} "url":"http://localhost/repos/restfulgit/tags/initial/" # NOTE: RestfulGit extension }<line_sep>resp=self.client.get('/repos/restfulgit/tags/')<line_sep>self.assert200(resp)<line_sep>json=resp.json<line_sep>self.assertIsInstance(json list)<for_stmt>tag json<block_start>self.assertIsInstance(tag dict)<line_sep>self.assertIn('name' tag)<block_end>initial_tags=[tag<for>tag json<if>tag['name']<eq>'initial']<line_sep>self.assertEqual(len(initial_tags) 1)<line_sep>initial_tag=initial_tags[0]<line_sep>self.assertEqual(reference_tag initial_tag)<block_end><def_stmt>test_get_repo_tags_with_nonexistent_repo self<block_start>resp=self.client.get('/repos/this-repo-does-not-exist/tags/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_repo_tags_with_empty_repo self<block_start><with_stmt>self._empty_repo<block_start>resp=self.client.get('/repos/example/tags/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json [])<block_end><block_end><def_stmt>test_get_repo_branches_works self# From https://api.github.com/repos/hulu/restfulgit/branches with necessary adjustments <block_start>reference_branch={"name":"ambiguous" "commit":{"sha":"1f51b91ac383806df9d322ae67bbad3364f50811" "url":"http://localhost/repos/restfulgit/commits/1f51b91ac383806df9d322ae67bbad3364f50811/"}}<line_sep>resp=self.client.get('/repos/restfulgit/branches/')<line_sep>self.assert200(resp)<line_sep>json=resp.json<line_sep>self.assertIsInstance(json list)<for_stmt>branch json<block_start>self.assertIsInstance(branch dict)<line_sep>self.assertIn('name' branch)<block_end>ambiguous_branches=[branch<for>branch json<if>branch['name']<eq>'ambiguous']<line_sep>self.assertEqual(len(ambiguous_branches) 1)<line_sep>ambiguous_branch=ambiguous_branches[0]<line_sep>self.assertEqual(reference_branch ambiguous_branch)<block_end><def_stmt>test_get_repo_branches_with_nonexistent_repo self<block_start>resp=self.client.get('/repos/this-repo-does-not-exist/branches/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_repo_branches_with_empty_repo self<block_start><with_stmt>self._empty_repo<block_start>resp=self.client.get('/repos/example/branches/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json [])<block_end><block_end><def_stmt>test_get_repo_branch_works self# From https://api.github.com/repos/hulu/restfulgit/branches/ambiguous with necessary adjustments <block_start>reference={"name":"ambiguous" "commit":{"sha":"1f51b91ac383806df9d322ae67bbad3364f50811" "commit":{"author":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-02-25T12:35:29Z"} "committer":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-02-25T12:35:29Z"} "message":"Support submodule in tree-listings" "tree":{"sha":"1404e1766a3269f5a73b3d2ec8c81b7ea3ad6e09" "url":"http://localhost/repos/restfulgit/git/trees/1404e1766a3269f5a73b3d2ec8c81b7ea3ad6e09/"} "url":"http://localhost/repos/restfulgit/git/commits/1f51b91ac383806df9d322ae67bbad3364f50811/" "sha":"1f51b91ac383806df9d322ae67bbad3364f50811" # NOTE: RestfulGit extension "parents":[# NOTE: RestfulGit extension {"sha":"ff6405b71273b5c2c50d5c33d5cf962af5390542" "url":"http://localhost/repos/restfulgit/commits/ff6405b71273b5c2c50d5c33d5cf962af5390542/" }]} "url":"http://localhost/repos/restfulgit/commits/1f51b91ac383806df9d322ae67bbad3364f50811/" "author":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-02-25T12:35:29Z"} "committer":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-02-25T12:35:29Z"} "parents":[{"sha":"ff6405b71273b5c2c50d5c33d5cf962af5390542" "url":"http://localhost/repos/restfulgit/commits/ff6405b71273b5c2c50d5c33d5cf962af5390542/" }]} "_links":{"self":"http://localhost/repos/restfulgit/branches/ambiguous/" } 'url':'http://localhost/repos/restfulgit/branches/ambiguous/'}<line_sep>resp=self.client.get('/repos/restfulgit/branches/ambiguous/')<line_sep>self.assert200(resp)<line_sep>json=resp.json<line_sep>self.assertEqual(reference json)<block_end><def_stmt>test_get_repo_branch_with_nonexistent_branch self<block_start>resp=self.client.get('/repos/restfulgit/branches/this-branch-does-not-exist/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_merged_branches_inclusion self<block_start>resp=self.client.get('/repos/restfulgit/branches/master/merged/')<line_sep>self.assert200(resp)<line_sep>json=resp.json<line_sep>self.assertIsInstance(json list)<for_stmt>item json<block_start>self.assertIsInstance(item dict)<line_sep>self.assertIn('name' item)<block_end>branch_names={item['name']<for>item json}<line_sep>self.assertIn('ambiguous' branch_names)<block_end><def_stmt>test_get_merged_branches_format self<block_start>resp=self.client.get('/repos/restfulgit/branches/master/merged/')<line_sep>self.assert200(resp)<line_sep>json=resp.json<line_sep>self.assertIsInstance(json list)<for_stmt>item json<block_start>self.assertIsInstance(item dict)<line_sep>self.assertIn('name' item)<block_end>name_to_branch={item['name']:item<for>item json}<line_sep>reference={"name":"ambiguous" "commit":{"sha":"1f51b91ac383806df9d322ae67bbad3364f50811" "url":"http://localhost/repos/restfulgit/commits/1f51b91ac383806df9d322ae67bbad3364f50811/" }}<line_sep>self.assertEqual(reference name_to_branch.get('ambiguous'))<block_end><def_stmt>test_get_merged_branches_exclusion self<block_start>resp=self.client.get('/repos/restfulgit/branches/ambiguous/merged/')<line_sep>self.assert200(resp)<line_sep>branches={branch['name']<for>branch resp.json}<line_sep>self.assertNotIn('master' branches)<block_end><def_stmt>test_get_merged_branches_with_nonexistent_branch self<block_start>resp=self.client.get('/repos/restfulgit/branches/this-branch-does-not-exist/merged/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_repo_commit_works self# From https://api.github.com/repos/hulu/restfulgit/commits/d408fc2428bc6444cabd7f7b46edbe70b6992b16 with necessary adjustments <block_start>reference={"sha":"d408fc2428bc6444cabd7f7b46edbe70b6992b16" "commit":{"author":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-04-21T11:20:14Z"} "committer":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-04-21T11:20:14Z"} "message":"Added requirements.txt + more README" "tree":{"sha":"e49e456564f8d852f430c1d0028a9d6560e3f3e9" "url":"http://localhost/repos/restfulgit/git/trees/e49e456564f8d852f430c1d0028a9d6560e3f3e9/"} "url":"http://localhost/repos/restfulgit/git/commits/d408fc2428bc6444cabd7f7b46edbe70b6992b16/" "sha":"d408fc2428bc6444cabd7f7b46edbe70b6992b16" # NOTE: RestfulGit extension "parents":[# NOTE: RestfulGit extension {"sha":"c92de24597eff312bbdd5a70059665a2e3000590" "url":"http://localhost/repos/restfulgit/commits/c92de24597eff312bbdd5a70059665a2e3000590/" }] } "url":"http://localhost/repos/restfulgit/commits/d408fc2428bc6444cabd7f7b46edbe70b6992b16/" "author":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-04-21T11:20:14Z"} "committer":{"name":"<NAME>" "email":"<EMAIL>" "date":"2013-04-21T11:20:14Z"} "parents":[{"sha":"c92de24597eff312bbdd5a70059665a2e3000590" "url":"http://localhost/repos/restfulgit/commits/c92de24597eff312bbdd5a70059665a2e3000590/" }] "stats":{"total":10 "additions":10 "deletions":0} "files":[{"sha":"c65dc8c22cc3dc5d37a1c39e5a9f336f1dd6fe34" "filename":"README.md" "old_filename":"README.md" # NOTE: RestfulGit extension "status":"modified" "additions":5 "deletions":0 "changes":5 "raw_url":"http://localhost/repos/restfulgit/raw/d408fc2428bc6444cabd7f7b46edbe70b6992b16/README.md" "contents_url":"http://localhost/repos/restfulgit/contents/README.md?ref=d408fc2428bc6444cabd7f7b46edbe70b6992b16" "patch":"@@ -4,6 +4,11 @@ REST API for Git data\n Provides a read-only restful interface for accessing data from Git repositories (local to the server).\n Modeled off the GitHub Git DB API for compatibility (see http://developer.github.com/v3/git/).\n \n+Requires: flask, pygit2 (>= 0.18.1), libgit2 (>= 0.18).\n+Must modify: REPO_BASE (root path for repositories, note only repositories immediately under this path are currently supported).\n+\n+api.py is a valid WSGI application.\n+\n --\n \n All of these routes return JSON unless otherwise specified."} {"sha":"da23f6c1cf961369faa90c8c4f4c242a09205ce6" "filename":"requirements.txt" "old_filename":"requirements.txt" # NOTE: RestfulGit extension "status":"added" "additions":5 "deletions":0 "changes":5 "raw_url":"http://localhost/repos/restfulgit/raw/d408fc2428bc6444cabd7f7b46edbe70b6992b16/requirements.txt" "contents_url":"http://localhost/repos/restfulgit/contents/requirements.txt?ref=d408fc2428bc6444cabd7f7b46edbe70b6992b16" "patch":"@@ -0,0 +1,5 @@\n+Flask==0.9\n+Jinja2==2.6\n+Werkzeug==0.8.3\n+pygit2==0.18.1\n+wsgiref==0.1.2"}]}<line_sep>resp=self.client.get('/repos/restfulgit/commits/d408fc2428bc6444cabd7f7b46edbe70b6992b16/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(reference resp.json)<block_end><def_stmt>test_get_empty_repo_commit self# From https://api.github.com/repos/hulu/restfulgit/commits/c8ec343d7260ba9577045a05bccd931867644f28 with necessary adjustments <block_start>reference={"sha":"c8ec343d7260ba9577045a05bccd931867644f28" "commit":{"author":{"name":"<NAME>" "email":"<EMAIL>" "date":"2015-01-07T20:15:08Z"} "committer":{"name":"<NAME>" "email":"<EMAIL>" "date":"2015-01-07T20:15:08Z"} "message":"Merge pull request #96 from hulu/empty-commit\n\nAdd deliberately empty commit for testing purposes" "parents":[{"sha":"4fb38539d25983c9b9b99588901a1025658d05d4" "url":"http://localhost/repos/restfulgit/commits/4fb38539d25983c9b9b99588901a1025658d05d4/"} {"sha":"6f4fa9af844f69137bfee3c247feec0fb03a3913" "url":"http://localhost/repos/restfulgit/commits/6f4fa9af844f69137bfee3c247feec0fb03a3913/"}] "sha":"c8ec343d7260ba9577045a05bccd931867644f28" "tree":{"sha":"9268fd675df04e7c09bceddaf9dfc38fb78787d2" "url":"http://localhost/repos/restfulgit/git/trees/9268fd675df04e7c09bceddaf9dfc38fb78787d2/"} "url":"http://localhost/repos/restfulgit/git/commits/c8ec343d7260ba9577045a05bccd931867644f28/" } "url":"http://localhost/repos/restfulgit/commits/c8ec343d7260ba9577045a05bccd931867644f28/" "author":{"name":"<NAME>" "email":"<EMAIL>" "date":"2015-01-07T20:15:08Z"} "committer":{"name":"<NAME>" "email":"<EMAIL>" "date":"2015-01-07T20:15:08Z"} "parents":[{"sha":"4fb38539d25983c9b9b99588901a1025658d05d4" "url":"http://localhost/repos/restfulgit/commits/4fb38539d25983c9b9b99588901a1025658d05d4/" } {"sha":"6f4fa9af844f69137bfee3c247feec0fb03a3913" "url":"http://localhost/repos/restfulgit/commits/6f4fa9af844f69137bfee3c247feec0fb03a3913/" }] "stats":{"total":0 "additions":0 "deletions":0} "files":[]}<line_sep>resp=self.client.get('/repos/restfulgit/commits/{}/'.format(EMPTY_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertEqual(reference resp.json)<block_end><def_stmt>test_get_repo_commit_involving_file_rename_works self<block_start>self.maxDiff=<none><line_sep># From https://api.github.com/repos/hulu/restfulgit/commits/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e with necessary adjustments reference={"sha":"d3ebb7b3eec6ce13fbe77025c8b0e0240031379e" "commit":{"author":{"name":"<NAME>" "email":"<EMAIL>" "date":"2014-06-27T22:39:07Z"} "committer":{"name":"<NAME>" "email":"<EMAIL>" "date":"2014-06-27T22:39:07Z"} "message":"somewhat arbitrarily rename one of the test fixtures" 'sha':'d3ebb7b3eec6ce13fbe77025c8b0e0240031379e' 'parents':[{'sha':'e8617a0c479f44e0b677481c2223995b5a8fa623' 'url':'http://localhost/repos/restfulgit/commits/e8617a0c479f44e0b677481c2223995b5a8fa623/'}] "tree":{"sha":"fffee3c6675060068f95c1c61ca5fa4db8595c0e" "url":"http://localhost/repos/restfulgit/git/trees/fffee3c6675060068f95c1c61ca5fa4db8595c0e/"} "url":"http://localhost/repos/restfulgit/git/commits/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e/" } "url":"http://localhost/repos/restfulgit/commits/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e/" "author":{"name":"<NAME>" "email":"<EMAIL>" "date":"2014-06-27T22:39:07Z"} "committer":{'date':'2014-06-27T22:39:07Z' 'email':'<EMAIL>' 'name':'<NAME>'} "parents":[{"sha":"e8617a0c479f44e0b677481c2223995b5a8fa623" "url":"http://localhost/repos/restfulgit/commits/e8617a0c479f44e0b677481c2223995b5a8fa623/" }] "stats":{"total":2 "additions":1 "deletions":1} "files":[{"sha":"45a751524f43f703d5e776d48a1c495ae9e34b3e" "filename":"tests/fixtures/initial-c04112733fe2db2cb2f179fca1a19365cf15fef5-context-1.diff" 'old_filename':'tests/fixtures/initial_c04112733fe2db2cb2f179fca1a19365cf15fef5_context_1.diff' # NOTE: RestfulGit extension "status":"renamed" "additions":0 "deletions":0 "changes":0 "raw_url":"http://localhost/repos/restfulgit/raw/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e/tests/fixtures/initial-c04112733fe2db2cb2f179fca1a19365cf15fef5-context-1.diff" "contents_url":"http://localhost/repos/restfulgit/contents/tests/fixtures/initial-c04112733fe2db2cb2f179fca1a19365cf15fef5-context-1.diff?ref=d3ebb7b3eec6ce13fbe77025c8b0e0240031379e"} {"sha":"d6d92aa58b97f090596c2b5afe30ac40e4f8e0b3" "filename":"tests/test_restfulgit.py" "old_filename":"tests/test_restfulgit.py" # NOTE: RestfulGit extension "status":"modified" "additions":1 "deletions":1 "changes":2 "raw_url":"http://localhost/repos/restfulgit/raw/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e/tests/test_restfulgit.py" "contents_url":"http://localhost/repos/restfulgit/contents/tests/test_restfulgit.py?ref=d3ebb7b3eec6ce13fbe77025c8b0e0240031379e" "patch":"@@ -2274,7 +2274,7 @@ class CompareTestCase(_RestfulGitTestCase):\n resp = self.client.get('/repos/restfulgit/compare/{}...{}.diff?context=1'.format('initial', FIFTH_COMMIT))\n self.assert200(resp)\n self.assertContentTypeIsDiff(resp)\n- self.assertBytesEqualFixture(resp.get_data(), 'initial_c04112733fe2db2cb2f179fca1a19365cf15fef5_context_1.diff')\n+ self.assertBytesEqualFixture(resp.get_data(), 'initial-c04112733fe2db2cb2f179fca1a19365cf15fef5-context-1.diff')\n \n \n class ContributorsTestCase(_RestfulGitTestCase):"}]}<line_sep>resp=self.client.get('/repos/restfulgit/commits/d3ebb7b3eec6ce13fbe77025c8b0e0240031379e/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(reference resp.json)<block_end><def_stmt>test_get_repo_commit_with_nonexistent_sha self<block_start>resp=self.client.get('/repos/restfulgit/commits/{}/'.format(IMPROBABLE_SHA))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_diff_works self<block_start>resp=self.client.get('/repos/restfulgit/commit/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff')<line_sep>self.assert200(resp)<line_sep>self.assertContentTypeIsDiff(resp)<line_sep>self.assertBytesEqualFixture(resp.get_data() 'd408fc2428bc6444cabd7f7b46edbe70b6992b16.diff')<block_end><def_stmt>test_get_diff_of_empty_commit self<block_start>resp=self.client.get('/repos/restfulgit/commit/{}.diff'.format(EMPTY_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertContentTypeIsDiff(resp)<line_sep># Verified against https://github.com/hulu/restfulgit/commit/c8ec343d7260ba9577045a05bccd931867644f28.diff self.assertEqual(resp.get_data() b'')<block_end><def_stmt>test_get_diff_with_parentless_commit self# NOTE: RestfulGit extension; GitHub gives a 404 in this case <block_start>resp=self.client.get('/repos/restfulgit/commit/07b9bf1540305153ceeb4519a50b588c35a35464.diff')<line_sep>self.assert200(resp)<line_sep>self.assertContentTypeIsDiff(resp)<line_sep>self.assertBytesEqualFixture(resp.get_data() '07b9bf1540305153ceeb4519a50b588c35a35464.diff')<block_end><def_stmt>test_get_diff_with_nonexistent_sha self<block_start>resp=self.client.get('/repos/restfulgit/commit/{}.diff'.format(IMPROBABLE_SHA))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_get_diff_involving_binary_file self# From https://github.com/hulu/restfulgit/commit/88edac1a3a55c04646ccc963fdada0e194ed5926.diff <block_start>resp=self.client.get('/repos/restfulgit/commit/88edac1a3a55c04646ccc963fdada0e194ed5926.diff')<line_sep>self.assert200(resp)<line_sep>self.assertContentTypeIsDiff(resp)<line_sep>self.assertBytesEqualFixture(resp.get_data() '88edac1a3a55c04646ccc963fdada0e194ed5926.diff')<block_end><def_stmt>test_get_diff_with_merge_commit self<block_start><pass><block_end><block_end><class_stmt>RefsTestCase(_RestfulGitTestCase)<block_start><def_stmt>test_get_refs_works self# From https://api.github.com/repos/hulu/restfulgit/git/refs with necessary adjustments <block_start>reference_initial_tag_ref={"ref":"refs/tags/initial" "url":"http://localhost/repos/restfulgit/git/refs/tags/initial" "object":{"sha":"1dffc031c9beda43ff94c526cbc00a30d231c079" "type":"tag" "url":"http://localhost/repos/restfulgit/git/tags/1dffc031c9beda43ff94c526cbc00a30d231c079/"}}<line_sep>reference_ambiguous_branch_ref={"ref":"refs/heads/ambiguous" "url":"http://localhost/repos/restfulgit/git/refs/heads/ambiguous" "object":{"sha":"1f51b91ac383806df9d322ae67bbad3364f50811" "type":"commit" "url":"http://localhost/repos/restfulgit/git/commits/1f51b91ac383806df9d322ae67bbad3364f50811/"}}<line_sep>resp=self.client.get('/repos/restfulgit/git/refs/')<line_sep>self.assert200(resp)<line_sep>ref_list=resp.json<line_sep>self.assertIsInstance(ref_list list)<line_sep>self.assertIn(reference_initial_tag_ref ref_list)<line_sep>self.assertIn(reference_ambiguous_branch_ref ref_list)<block_end><def_stmt>test_empty_repo self<block_start><with_stmt>self._empty_repo<block_start>resp=self.client.get('/repos/example/git/refs/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json [])<block_end><block_end><def_stmt>test_invalid_ref_path self<block_start>resp=self.client.get('/repos/restfulgit/git/refs/this_ref/path_does/not_exist')<line_sep>self.assert200(resp)<line_sep>self.assertEqual([] resp.json)<block_end><def_stmt>test_valid_specific_ref_path self# Frpm https://api.github.com/repos/hulu/restfulgit/git/refs/tags/initial with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/git/refs/tags/initial')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {"url":"http://localhost/repos/restfulgit/git/refs/tags/initial" "object":{"url":"http://localhost/repos/restfulgit/git/tags/1dffc031c9beda43ff94c526cbc00a30d231c079/" "sha":"1dffc031c9beda43ff94c526cbc00a30d231c079" "type":"tag"} "ref":"refs/tags/initial"})<block_end><block_end><class_stmt>RawFileTestCase(_RestfulGitTestCase)<block_start><def_stmt>test_nonexistent_branch self<block_start>resp=self.client.get('/repos/restfulgit/raw/this-branch-does-not-exist/LICENSE.md')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_nonexistent_file_path self<block_start>resp=self.client.get('/repos/restfulgit/raw/master/this_path/does_not/exist.txt')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_mime_type_logic self# FIXME: implement <block_start><pass><block_end><def_stmt>test_tags_trump_branches self# branch "ambiguous" = commit 1f51b91 # api.py's SHA-512 = e948e8d0b0d0703d972279382a002c90040ff19d636e96927262d63e1f1429526539ea781744d2f3a65a5938b59e0c5f57adadc26f797480efcfc6f7dcff3d81 # tag "ambiguous" = commit ff6405b # api.py's SHA-512 = a50e02753d282c0e35630bbbc16a525ea4e0b2e2af668135b603c8e1467c7269bcbe9075886baf3f08ce195a7eab1e0b8179080af08a2c0f3eda3b9518650fa1 <block_start>resp=self.client.get("/repos/restfulgit/raw/ambiguous/api.py")<line_sep>self.assert200(resp)<line_sep>self.assertEqual('a50e02753d282c0e35630bbbc16a525ea4e0b2e2af668135b603c8e1467c7269bcbe9075886baf3f08ce195a7eab1e0b8179080af08a2c0f3eda3b9518650fa1' sha512(resp.data).hexdigest())<block_end><def_stmt>test_sha_works self<block_start>resp=self.client.get('/repos/restfulgit/raw/326d80cd68ec3413fe6eaca99c52c59ca428a0d0/api.py')<line_sep>self.assert200(resp)<line_sep>self.assertEqual('0229e0a11f6a3c8c9b84c50ecbd54d476edf5c0767137e37526d1961210530aa6bd93f67a70bd4ea1998d65cdbe74c7fd8b90482ef5cbdf244cc697e3135e497' sha512(resp.data).hexdigest())<block_end><def_stmt>test_tag_works self<block_start>resp=self.client.get('/repos/restfulgit/raw/initial/api.py')<line_sep>self.assert200(resp)<line_sep>self.assertEqual('1c846bb4d44c08073c487316a7dc02d97d825aecf50546caf9bf10277c01d17e19860d5f86de877268dd969bd081c7595991c325e0ab492374b956e3a6c9967f' sha512(resp.data).hexdigest())<block_end><def_stmt>test_branch_works self<block_start>resp=self.client.get('/repos/restfulgit/raw/master/LICENSE.md')<line_sep>self.assert200(resp)<line_sep>self.assertEqual('7201955547d83fb4e740adf52d95c3044591ec8b60e4a136f5486a05d1dfaac2bd44d4546830cf0f32d05b40ce5928d0b3f71e0b2628488ea0db1427a6dd2988' sha512(resp.data).hexdigest())<block_end><block_end><class_stmt>RepositoryInfoCase(_RestfulGitTestCase)<block_start><def_stmt>test_no_description_file self<block_start>delete_file_quietly(NORMAL_CLONE_DESCRIPTION_FILEPATH)<line_sep>delete_file_quietly(GIT_MIRROR_DESCRIPTION_FILEPATH)<line_sep>resp=self.client.get('/repos/restfulgit/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'blobs_url':'http://localhost/repos/restfulgit/git/blobs{/sha}' 'branches_url':'http://localhost/repos/restfulgit/branches{/branch}' 'commits_url':'http://localhost/repos/restfulgit/commits{/sha}' 'description':<none> 'full_name':'restfulgit' 'git_commits_url':'http://localhost/repos/restfulgit/git/commits{/sha}' 'git_refs_url':'http://localhost/repos/restfulgit/git/refs{/sha}' 'git_tags_url':'http://localhost/repos/restfulgit/git/tags{/sha}' 'name':'restfulgit' 'tags_url':'http://localhost/repos/restfulgit/tags/' 'trees_url':'http://localhost/repos/restfulgit/git/trees{/sha}' 'url':'http://localhost/repos/restfulgit/' })<block_end><def_stmt>test_default_description_file self<block_start><with_stmt>io.open(NORMAL_CLONE_DESCRIPTION_FILEPATH mode='wt' encoding='utf-8')<as>description_file<block_start>description_file.write("Unnamed repository; edit this file 'description' to name the repository.\n")<block_end><try_stmt><block_start>resp=self.client.get('/repos/restfulgit/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'blobs_url':'http://localhost/repos/restfulgit/git/blobs{/sha}' 'branches_url':'http://localhost/repos/restfulgit/branches{/branch}' 'commits_url':'http://localhost/repos/restfulgit/commits{/sha}' 'description':<none> 'full_name':'restfulgit' 'git_commits_url':'http://localhost/repos/restfulgit/git/commits{/sha}' 'git_refs_url':'http://localhost/repos/restfulgit/git/refs{/sha}' 'git_tags_url':'http://localhost/repos/restfulgit/git/tags{/sha}' 'name':'restfulgit' 'tags_url':'http://localhost/repos/restfulgit/tags/' 'trees_url':'http://localhost/repos/restfulgit/git/trees{/sha}' 'url':'http://localhost/repos/restfulgit/' })<block_end><finally_stmt><block_start>delete_file_quietly(NORMAL_CLONE_DESCRIPTION_FILEPATH)<block_end><block_end><def_stmt>test_dot_dot_disallowed self<block_start>self.app.config['RESTFULGIT_REPO_BASE_PATH']=TEST_SUBDIR<line_sep>resp=self.client.get('/repos/../')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_nonexistent_repo self<block_start>self.app.config['RESTFULGIT_REPO_BASE_PATH']=RESTFULGIT_REPO<line_sep>resp=self.client.get('/repos/test/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_works_normal_clone self<block_start>description="REST API for Git data\n"<with_stmt>io.open(NORMAL_CLONE_DESCRIPTION_FILEPATH mode='wt' encoding='utf-8')<as>description_file<block_start>description_file.write(description)<block_end><try_stmt><block_start>resp=self.client.get('/repos/restfulgit/')<line_sep>self.assertEqual(resp.json {'blobs_url':'http://localhost/repos/restfulgit/git/blobs{/sha}' 'branches_url':'http://localhost/repos/restfulgit/branches{/branch}' 'commits_url':'http://localhost/repos/restfulgit/commits{/sha}' 'description':description 'full_name':'restfulgit' 'git_commits_url':'http://localhost/repos/restfulgit/git/commits{/sha}' 'git_refs_url':'http://localhost/repos/restfulgit/git/refs{/sha}' 'git_tags_url':'http://localhost/repos/restfulgit/git/tags{/sha}' 'name':'restfulgit' 'tags_url':'http://localhost/repos/restfulgit/tags/' 'trees_url':'http://localhost/repos/restfulgit/git/trees{/sha}' 'url':'http://localhost/repos/restfulgit/' })<block_end><finally_stmt><block_start>delete_file_quietly(NORMAL_CLONE_DESCRIPTION_FILEPATH)<block_end><block_end><def_stmt>test_works_git_mirror self<block_start>description="REST API for Git data\n"<with_stmt>io.open(GIT_MIRROR_DESCRIPTION_FILEPATH mode='wt' encoding='utf-8')<as>description_file<block_start>description_file.write(description)<block_end><try_stmt><block_start>resp=self.client.get('/repos/restfulgit/')<line_sep>self.assertEqual(resp.json {'blobs_url':'http://localhost/repos/restfulgit/git/blobs{/sha}' 'branches_url':'http://localhost/repos/restfulgit/branches{/branch}' 'commits_url':'http://localhost/repos/restfulgit/commits{/sha}' 'description':description 'full_name':'restfulgit' 'git_commits_url':'http://localhost/repos/restfulgit/git/commits{/sha}' 'git_refs_url':'http://localhost/repos/restfulgit/git/refs{/sha}' 'git_tags_url':'http://localhost/repos/restfulgit/git/tags{/sha}' 'name':'restfulgit' 'tags_url':'http://localhost/repos/restfulgit/tags/' 'trees_url':'http://localhost/repos/restfulgit/git/trees{/sha}' 'url':'http://localhost/repos/restfulgit/' })<block_end><finally_stmt><block_start>delete_file_quietly(GIT_MIRROR_DESCRIPTION_FILEPATH)<block_end><block_end><block_end><class_stmt>CorsTestCase(_RestfulGitTestCase)<block_start>@property@contextmanager<def_stmt>cors_enabled self<block_start><with_stmt>self.config_override('RESTFULGIT_ENABLE_CORS' <true>)<block_start><yield><block_end><block_end>@property<def_stmt>arbitrary_response self<block_start>resp=self.client.get('/repos/restfulgit/raw/master/LICENSE.md')<line_sep>self.assert200(resp)<line_sep><return>resp<block_end><def_stmt>assert_header_equal self header value<block_start>resp=self.arbitrary_response<line_sep>headers=resp.headers<line_sep>self.assertIn(header headers)<if_stmt>header<eq>'Access-Control-Allow-Methods'<block_start>expected_methods=set(value.split(', '))<line_sep>actual_methods=set(headers[header].split(', '))<line_sep>self.assertEqual(actual_methods expected_methods)<block_end><else_stmt><block_start>self.assertEqual(headers[header] value)<block_end><block_end><def_stmt>assert_cors_enabled_for self resp<block_start>self.assertIn('Access-Control-Allow-Methods' resp.headers)<line_sep>self.assertIn('Access-Control-Allow-Origin' resp.headers)<line_sep>self.assertIn('Access-Control-Allow-Credentials' resp.headers)<block_end><def_stmt>assert_cors_disabled_for self resp<block_start><for_stmt>header list(resp.headers.keys())<block_start>self.assertFalse(header.lower().startswith('access-control') msg="CORS-related header present")<block_end><block_end><def_stmt>test_disabled_really_disables self<block_start><with_stmt>self.config_override('RESTFULGIT_ENABLE_CORS' <false>)<block_start>self.assert_cors_disabled_for(self.arbitrary_response)<block_end><block_end><def_stmt>test_enabled_really_enables self<block_start><with_stmt>self.config_override('RESTFULGIT_ENABLE_CORS' <true>)<block_start>self.assert_cors_enabled_for(self.arbitrary_response)<block_end><block_end><def_stmt>test_disabled_disables_preflight self<block_start><with_stmt>self.config_override('RESTFULGIT_ENABLE_CORS' <false>)<block_start>resp=self.client.options('/repos/restfulgit/raw/master/LICENSE.md')<line_sep>self.assert200(resp)<line_sep>self.assert_cors_disabled_for(resp)<block_end><block_end><def_stmt>test_enabled_enables_preflight self<block_start><with_stmt>self.config_override('RESTFULGIT_ENABLE_CORS' <true>)<block_start>resp=self.client.options('/repos/restfulgit/raw/master/LICENSE.md')<line_sep>self.assert200(resp)<line_sep>self.assert_cors_enabled_for(resp)<block_end><block_end><def_stmt>test_specific_allowed_origin_honored self<block_start>origin='https://foo.bar.baz:90'<with_stmt>self.cors_enabled<block_start><with_stmt>self.config_override('RESTFULGIT_CORS_ALLOWED_ORIGIN' origin)<block_start>self.assert_header_equal('Access-Control-Allow-Origin' origin)<block_end><block_end><block_end><def_stmt>test_star_allowed_origin_honored self<block_start><with_stmt>self.cors_enabled<block_start><with_stmt>self.config_override('RESTFULGIT_CORS_ALLOWED_ORIGIN' '*')<block_start>self.assert_header_equal('Access-Control-Allow-Origin' '*')<block_end><block_end><block_end><def_stmt>test_max_age_honored self<block_start>max_age=timedelta(minutes=427)<with_stmt>self.cors_enabled<block_start><with_stmt>self.config_override('RESTFULGIT_CORS_MAX_AGE' max_age)<block_start>self.assert_header_equal('Access-Control-Max-Age' str(int(max_age.total_seconds())))<block_end><block_end><block_end><def_stmt>test_enabled_allow_credentials_honored self<block_start><with_stmt>self.cors_enabled<block_start><with_stmt>self.config_override('RESTFULGIT_CORS_ALLOW_CREDENTIALS' <true>)<block_start>self.assert_header_equal('Access-Control-Allow-Credentials' 'true')<block_end><block_end><block_end><def_stmt>test_disabled_allow_credentials_honored self<block_start><with_stmt>self.cors_enabled<block_start><with_stmt>self.config_override('RESTFULGIT_CORS_ALLOW_CREDENTIALS' <false>)<block_start>self.assert_header_equal('Access-Control-Allow-Credentials' 'false')<block_end><block_end><block_end><def_stmt>test_allowed_headers_honored self<block_start><with_stmt>self.cors_enabled<block_start><with_stmt>self.config_override('RESTFULGIT_CORS_ALLOWED_HEADERS' ['X-Foo' 'X-Bar'])<block_start>self.assert_header_equal('Access-Control-Allow-Headers' "X-Foo, X-Bar")<block_end><block_end><block_end><def_stmt>test_allowed_methods self<block_start><with_stmt>self.cors_enabled<block_start>self.assert_header_equal('Access-Control-Allow-Methods' 'HEAD, OPTIONS, GET')<block_end><block_end><block_end><class_stmt>ArchiveDownloadTestCase(_RestfulGitTestCase)<block_start><def_stmt>run_command_quietly self args<block_start><with_stmt>open(os.devnull 'wb')<as>blackhole<block_start>check_call(args stdout=blackhole)<block_end><block_end><def_stmt>_only_subdirectory_in self directory<block_start>names=os.listdir(directory)<line_sep>self.assertEqual(len(names) 1)<line_sep>subdir=os.path.join(directory names[0])<line_sep>self.assertTrue(os.path.isdir(subdir))<line_sep><return>subdir<block_end><def_stmt>assertFilesEqual self filepath_one filepath_two including_permissions=<false><block_start><if_stmt>including_permissions<block_start>self.assertEqualPermissions(filepath_one filepath_two)<block_end><with_stmt>open(filepath_one 'rb')<as>file_one open(filepath_two 'rb')<as>file_two<block_start>self.assertEqual(file_one.read() file_two.read())<block_end><block_end><def_stmt>assertEqualPermissions self path_one path_two<block_start>stat_one=os.stat(path_one)<line_sep>stat_two=os.stat(path_two)<line_sep>self.assertEqual(stat_one.st_mode stat_two.st_mode)<line_sep>self.assertEqual(stat_one.st_uid stat_two.st_uid)<line_sep>self.assertEqual(stat_one.st_gid stat_two.st_gid)<block_end><def_stmt>assertDirectoriesEqual self dir_one dir_two including_permissions=<false><block_start><for_stmt>dirpath_one,dirnames_one,filenames_one os.walk(dir_one)<block_start>dirnames_one=frozenset(dirnames_one)<line_sep>filenames_one=frozenset(filenames_one)<line_sep>dirpath_two=dirpath_one.replace(dir_one dir_two 1)<line_sep>self.assertTrue(os.path.isdir(dirpath_two))<line_sep>children_two=os.listdir(dirpath_two)<line_sep>dirnames_two=frozenset(name<for>name children_two<if>os.path.isdir(os.path.join(dirpath_two name)))<line_sep>filenames_two=frozenset(name<for>name children_two<if>os.path.isfile(os.path.join(dirpath_two name)))<if_stmt>including_permissions<block_start>self.assertEqualPermissions(dirpath_one dirpath_two)<block_end>self.assertEqual(dirnames_one dirnames_two)<line_sep>self.assertEqual(filenames_one filenames_two)<for_stmt>filename filenames_one<block_start>filepath_one=os.path.join(dirpath_one filename)<line_sep>filepath_two=os.path.join(dirpath_two filename)<line_sep>self.assertFilesEqual(filepath_one filepath_two including_permissions=including_permissions)<block_end><block_end><block_end><def_stmt>assertIsAttachment self resp<block_start>self.assertTrue(resp.headers.get('Content-Disposition' '').startswith('attachment;'))<block_end><def_stmt>test_zipball_contents self<block_start>commit='<PASSWORD>'# 1st commit in the repo that has multiple levels of subdirectories <with_stmt>self.temporary_directory(suffix='.restfulgit')<as>temp_dir<block_start>actual_dir=self.make_nested_dir(temp_dir 'actual')<line_sep>reference_dir=self.make_nested_dir(temp_dir 'reference')<line_sep>self.run_command_quietly(['unzip' self.get_fixture_path('{}.zip'.format(commit)) '-d' reference_dir])<with_stmt>self.temporary_file(suffix='restfulgit_actual_zipball.zip')<as>pair<block_start>actual_zip_file,actual_zip_filepath=pair<with_stmt>actual_zip_file<block_start>resp=self.client.get('/repos/restfulgit/zipball/{}/'.format(commit))<line_sep>self.assert200(resp)<line_sep>actual_zip_file.write(resp.data)<block_end>self.run_command_quietly(['unzip' actual_zip_filepath '-d' actual_dir])<block_end>reference_wrapper_dir=self._only_subdirectory_in(reference_dir)<line_sep>actual_wrapper_dir=self._only_subdirectory_in(actual_dir)<line_sep>self.assertDirectoriesEqual(reference_wrapper_dir actual_wrapper_dir)<block_end><block_end><def_stmt>test_zipball_headers self<block_start>resp=self.client.get('/repos/restfulgit/zipball/7da1a61e2f566cf3094c2fea4b18b111d2638a8f/')<line_sep>self.assertIsAttachment(resp)<line_sep>self.assertTrue(resp.headers.get('Content-Disposition' '').endswith('filename=restfulgit-7da1a61e2f566cf3094c2fea4b18b111d2638a8f.zip'))<line_sep>self.assertEqual(resp.headers.get('Content-Type') 'application/zip')<line_sep>self.assertIn('max-age=0' resp.headers.get('Cache-Control' ''))<block_end><def_stmt>test_zipball_on_nonexistent_repo self<block_start>resp=self.client.get('/repos/this-repo-does-not-exist/zipball/master/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_zipball_on_nonexistent_ref self<block_start>resp=self.client.get('/repos/restfulgit/zipball/{}/'.format(IMPROBABLE_SHA))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_tarball_contents self<block_start>commit='<PASSWORD>'# 1st commit in the repo that has multiple levels of subdirectories <with_stmt>self.temporary_directory(suffix='.restfulgit')<as>temp_dir<block_start>actual_dir=self.make_nested_dir(temp_dir 'actual')<line_sep>reference_dir=self.make_nested_dir(temp_dir 'reference')<line_sep>self.run_command_quietly(['tar' 'xf' self.get_fixture_path('{}.tar.gz'.format(commit)) '-C' reference_dir])<with_stmt>self.temporary_file(suffix='restfulgit_actual_tarball.tar.gz')<as>pair<block_start>actual_tar_file,actual_tar_filepath=pair<with_stmt>actual_tar_file<block_start>resp=self.client.get('/repos/restfulgit/tarball/{}/'.format(commit))<line_sep>self.assert200(resp)<line_sep>actual_tar_file.write(resp.data)<block_end>self.run_command_quietly(['tar' 'xf' actual_tar_filepath '-C' actual_dir])<block_end>reference_wrapper_dir=self._only_subdirectory_in(reference_dir)<line_sep>actual_wrapper_dir=self._only_subdirectory_in(actual_dir)<line_sep>self.assertDirectoriesEqual(reference_wrapper_dir actual_wrapper_dir including_permissions=<true>)<block_end><block_end><def_stmt>test_tarball_headers self<block_start>resp=self.client.get('/repos/restfulgit/tarball/7da1a61e2f566cf3094c2fea4b18b111d2638a8f/')<line_sep>self.assertIsAttachment(resp)<line_sep>self.assertTrue(resp.headers.get('Content-Disposition' '').endswith('filename=restfulgit-7da1a61e2f566cf3094c2fea4b18b111d2638a8f.tar.gz'))<line_sep>self.assertIn(resp.headers.get('Content-Type') {'application/x-gzip' 'application/x-tar'})<line_sep>self.assertIn('max-age=0' resp.headers.get('Cache-Control' ''))<block_end><def_stmt>test_tarball_on_nonexistent_repo self<block_start>resp=self.client.get('/repos/this-repo-does-not-exist/tarball/master/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_tarball_on_nonexistent_ref self<block_start>resp=self.client.get('/repos/restfulgit/tarball/{}/'.format(IMPROBABLE_SHA))<line_sep>self.assertJson404(resp)<block_end><block_end><class_stmt>BlameTestCase(_RestfulGitTestCase)# NOTE: This API is a RestfulGit extension <block_start><def_stmt>test_nonexistent_repo self<block_start>resp=self.client.get('/repos/this-repo-does-not-exist/blame/master/README')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_nonexistent_ref self<block_start>resp=self.client.get('/repos/restfulgit/blame/this-branch-does-not-exist/README')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_nonexistent_file self<block_start>resp=self.client.get('/repos/restfulgit/blame/master/this-file-does-not-exist')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_directory_with_trailing_slash self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_directory_without_trailing_slash self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_first_line_out_of_bounds self# relevant file is 1027 lines long <block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=1028')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_last_line_out_of_bounds self# relevant file is 1027 lines long <block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?lastLine=1028')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_malformed_line_range self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=2&lastLine=1')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_zero_first_line self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=0')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_zero_last_line self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?lastLine=0')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_non_integer_first_line self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=abc')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_non_integer_last_line self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?lastLine=abc')<line_sep>self.assertJson400(resp)<block_end><def_stmt>test_basic_works self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py')<line_sep>self.assert200(resp)<with_stmt>io.open(self.get_fixture_path('da55cbf2f13c2ec019bf02f080bc47cc4f83318c-__init__.py-blame.json') mode='rt' encoding='utf-8')<as>reference_file<block_start>reference=load_json_file(reference_file)<block_end>self.assertEqual(reference resp.json)<block_end><def_stmt>test_first_line_only self# relevant file is 1027 lines long <block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=1025')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {"commits":{"090750eec2fe5f120ad1010fc2204d06fc3ca91e":{"committer":{"date":"2013-05-20T19:12:03Z" "name":"<NAME>" "email":"<EMAIL>"} "author":{"date":"2013-05-20T19:12:03Z" "name":"<NAME>" "email":"<EMAIL>"} "url":"http://localhost/repos/restfulgit/git/commits/090750eec2fe5f120ad1010fc2204d06fc3ca91e/" "tree":{"url":"http://localhost/repos/restfulgit/git/trees/288a19807d25403221c3f5260f4c172ec820b621/" "sha":"288a19807d25403221c3f5260f4c172ec820b621"} "sha":"090750eec2fe5f120ad1010fc2204d06fc3ca91e" "parents":[{"url":"http://localhost/repos/restfulgit/git/commits/cff4955ef40cfce35efe282e196c840619c518f2/" "sha":"cff4955ef40cfce35efe282e196c840619c518f2"}] "message":"PEP-8 minor cleanup"} "ebaa594a5b689d1cb552e15753bcd109f60b0a10":{"committer":{"date":"2013-10-06T23:44:52Z" "name":"<NAME>" "email":"<EMAIL>"} "author":{"date":"2013-10-05T04:15:22Z" "name":"<NAME>" "email":"<EMAIL>"} "url":"http://localhost/repos/restfulgit/git/commits/ebaa594a5b689d1cb552e15753bcd109f60b0a10/" "tree":{"url":"http://localhost/repos/restfulgit/git/trees/16507999f5b925211a48e3c97b242577b14bfc71/" "sha":"16507999f5b925211a48e3c97b242577b14bfc71"} "sha":"ebaa594a5b689d1cb552e15753bcd109f60b0a10" "parents":[{"url":"http://localhost/repos/restfulgit/git/commits/caccc35a6f5d8e9b9a7e23d4a2ad60f4b4155739/" "sha":"caccc35a6f5d8e9b9a7e23d4a2ad60f4b4155739"}] "message":"use a blueprint to enhance embedability/reuseability/modularity; fixes #25\n\nURL converter registration inspired by http://blog.adrianschreyer.eu/post/adding-custom-url-map-converters-to-flask-blueprint-objects"}} "lines":[{"commit":"<KEY>" "line":"app.register_blueprint(restfulgit)" "origPath":"gitapi.py" "lineNum":1025} {"commit":"<KEY>" "line":"" "origPath":"gitapi.py" "lineNum":1026} {"commit":"<PASSWORD>" "line":"application = app" "origPath":"api.py" "lineNum":1027}]})<block_end><def_stmt>test_last_line_only self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?lastLine=2')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commits':{'34f85950f3fcc662338593bbd43ad3bebc8cbf22':{'author':{'date':'2013-09-24T04:42:40Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-09-24T04:42:40Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'add PEP-263 encoding declaration' 'parents':[{'sha':'fadadc122ac7357816d6d57515c36ed8dddfadb5' 'url':'http://localhost/repos/restfulgit/git/commits/fadadc122ac7357816d6d57515c36ed8dddfadb5/'}] 'sha':'34f85950f3fcc662338593bbd43ad3bebc8cbf22' 'tree':{'sha':'029c2787239825668f3619eb02bf5a336720f5e9' 'url':'http://localhost/repos/restfulgit/git/trees/029c2787239825668f3619eb02bf5a336720f5e9/'} 'url':'http://localhost/repos/restfulgit/git/commits/34f85950f3fcc662338593bbd43ad3bebc8cbf22/'} 'ffefa5a12812d65ba4f55adeaa5bbd8131ea0c69':{'author':{'date':'2013-09-26T07:46:16Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-09-26T07:46:16Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'improve config loading error reporting & squelch last W0702' 'parents':[{'sha':'1f6787c238ef12413dca5305b8254c26c299718f' 'url':'http://localhost/repos/restfulgit/git/commits/1f6787c238ef12413dca5305b8254c26c299718f/'}] 'sha':'ffefa5a12812d65ba4f55adeaa5bbd8131ea0c69' 'tree':{'sha':'60859aa5e7ef3ba15006bd33f6ace219a3049ea5' 'url':'http://localhost/repos/restfulgit/git/trees/60859aa5e7ef3ba15006bd33f6ace219a3049ea5/'} 'url':'http://localhost/repos/restfulgit/git/commits/ffefa5a12812d65ba4f55adeaa5bbd8131ea0c69/'}} 'lines':[{'commit':'<KEY>' 'line':'# coding=utf-8' 'lineNum':1 'origPath':'gitapi.py'} {'commit':'ffefa5a12812d65ba4f55adeaa5bbd8131ea0c69' 'line':'from __future__ import print_function' 'lineNum':2 'origPath':'gitapi.py'}]})<block_end><def_stmt>test_first_line_just_within_bounds self# relevant file is 1027 lines long <block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=1027')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commits':{'090750eec2fe5f120ad1010fc2204d06fc3ca91e':{'author':{'date':'2013-05-20T19:12:03Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-05-20T19:12:03Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'PEP-8 minor cleanup' 'parents':[{'sha':'cff4955ef40cfce35efe282e196c840619c518f2' 'url':'http://localhost/repos/restfulgit/git/commits/cff4955ef40cfce35efe282e196c840619c518f2/'}] 'sha':'090750eec2fe5f120ad1010fc2204d06fc3ca91e' 'tree':{'sha':'288a19807d25403221c3f5260f4c172ec820b621' 'url':'http://localhost/repos/restfulgit/git/trees/288a19807d25403221c3f5260f4c172ec820b621/'} 'url':'http://localhost/repos/restfulgit/git/commits/090750eec2fe5f120ad1010fc2204d06fc3ca91e/'}} 'lines':[{'commit':'090750<PASSWORD>2fe5f120ad1010fc2204d06fc3ca91e' 'line':'application = app' 'lineNum':1027 'origPath':'api.py'}]})<block_end><def_stmt>test_last_line_just_within_bounds self# relevant file is 1027 lines long <block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?lastLine=1027&firstLine=1026')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commits':{'090750eec2fe5f120ad1010fc2204d06fc3ca91e':{'author':{'date':'2013-05-20T19:12:03Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-05-20T19:12:03Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'PEP-8 minor cleanup' 'parents':[{'sha':'cff4955ef40cfce35efe282e196c840619c518f2' 'url':'http://localhost/repos/restfulgit/git/commits/cff4955ef40cfce35efe282e196c840619c518f2/'}] 'sha':'090750eec2fe5f120ad1010fc2204d06fc3ca91e' 'tree':{'sha':'288a19807d25403221c3f5260f4c172ec820b621' 'url':'http://localhost/repos/restfulgit/git/trees/288a19807d25403221c3f5260f4c172ec820b621/'} 'url':'http://localhost/repos/restfulgit/git/commits/090750eec2fe5f120ad1010fc2204d06fc3ca91e/'} 'ebaa594a5b689d1cb552e15753bcd109f60b0a10':{'author':{'date':'2013-10-05T04:15:22Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-10-06T23:44:52Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'use a blueprint to enhance embedability/reuseability/modularity; fixes #25\n\nURL converter registration inspired by http://blog.adrianschreyer.eu/post/adding-custom-url-map-converters-to-flask-blueprint-objects' 'parents':[{'sha':'caccc35a6f5d8e9b9a7e23d4a2ad60f4b4155739' 'url':'http://localhost/repos/restfulgit/git/commits/caccc35a6f5d8e9b9a7e23d4a2ad60f4b4155739/'}] 'sha':'ebaa594a5b689d1cb552e15753bcd109f60b0a10' 'tree':{'sha':'16507999f5b925211a48e3c97b242577b14bfc71' 'url':'http://localhost/repos/restfulgit/git/trees/16507999f5b925211a48e3c97b242577b14bfc71/'} 'url':'http://localhost/repos/restfulgit/git/commits/ebaa594a5b689d1cb552e15753bcd109f60b0a10/'}} 'lines':[{'commit':'ebaa594a5b689d1cb552e15753bcd109f60b0a10' 'line':'' 'lineNum':1026 'origPath':'gitapi.py'} {'commit':'090750eec2fe5f120ad1010fc2204d06fc3ca91e' 'line':'application = app' 'lineNum':1027 'origPath':'api.py'} ]})<block_end><def_stmt>test_first_and_last_line_works self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=4&lastLine=6')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commits':{'13e9ff41ba4704d6ca91988f9216adeeee8c79b5':{'author':{'date':'2013-12-23T04:16:14Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-12-30T20:01:35Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'implement tarball & zipball downloads; fixes #62\n\nReference zipball from https://github.com/hulu/restfulgit/zipball/7da1a61e2f566cf3094c2fea4b18b111d2638a8f\nReference tarball from https://github.com/hulu/restfulgit/tarball/7da1a61e2f566cf3094c2fea4b18b111d2638a8f' 'parents':[{'sha':'129458e24667a9c32db4cb1a0549e3554bff0965' 'url':'http://localhost/repos/restfulgit/git/commits/129458e24667a9c32db4cb1a0549e3554bff0965/'}] 'sha':'13e9ff41ba4704d6ca91988f9216adeeee8c79b5' 'tree':{'sha':'a611bc827047055a6b8e9cbf7ee2827767b27328' 'url':'http://localhost/repos/restfulgit/git/trees/a611bc827047055a6b8e9cbf7ee2827767b27328/'} 'url':'http://localhost/repos/restfulgit/git/commits/13e9ff41ba4704d6ca91988f9216adeeee8c79b5/'} 'a8e4af2d7f30492bfef34ccb1c2c167df54512ba':{'author':{'date':'2013-12-10T03:32:32Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-12-10T03:59:40Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'use JSON error pages; fixes #39' 'parents':[{'sha':'493431d90a21109290e4a8ab8978e523ec957531' 'url':'http://localhost/repos/restfulgit/git/commits/493431d90a21109290e4a8ab8978e523ec957531/'}] 'sha':'a8e4af2d7f30492bfef34ccb1c2c167df54512ba' 'tree':{'sha':'b08d1b792ecba9ebb06bc8f2dad5d0877a9a42ec' 'url':'http://localhost/repos/restfulgit/git/trees/b08d1b792ecba9ebb06bc8f2dad5d0877a9a42ec/'} 'url':'http://localhost/repos/restfulgit/git/commits/a8e4af2d7f30492bfef34ccb1c2c167df54512ba/'} 'ba3f032dbd2ead6a6610f3bf3b66f05cb628f579':{'author':{'date':'2013-09-12T04:26:31Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-09-12T06:16:37Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'use a custom Werkzeug converter for commit SHAs; fixes #1' 'parents':[{'sha':'98b873f9d87b110a48628b8493de2cb0383eb391' 'url':'http://localhost/repos/restfulgit/git/commits/98b873f9d87b110a48628b8493de2cb0383eb391/'}] 'sha':'<KEY>' 'tree':{'sha':'a6fb2a953ab675c8da0f3776faa160101ac301f9' 'url':'http://localhost/repos/restfulgit/git/trees/a6fb2a953ab675c8da0f3776faa160101ac301f9/'} 'url':'http://localhost/repos/restfulgit/git/commits/ba3f032dbd2ead6a6610f3bf3b66f05cb628f579/'}} 'lines':[{'commit':'<PASSWORD>' 'line':'from flask import Flask, url_for, request, Response, current_app, Blueprint, safe_join, send_from_directory, make_response, send_file' 'lineNum':4 'origPath':'restfulgit/__init__.py'} {'commit':'<KEY>' 'line':'from werkzeug.exceptions import NotFound, BadRequest, HTTPException, default_exceptions' 'lineNum':5 'origPath':'restfulgit/__init__.py'} {'commit':'<KEY>' 'line':'from werkzeug.routing import BaseConverter' 'lineNum':6 'origPath':'gitapi.py'}]})<block_end><def_stmt>test_single_line_works self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?firstLine=1027&lastLine=1027')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commits':{'090750eec2fe5f120ad1010fc2204d06fc3ca91e':{'author':{'date':'2013-05-20T19:12:03Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'2013-05-20T19:12:03Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'PEP-8 minor cleanup' 'parents':[{'sha':'cff4955ef40cfce35efe282e196c840619c518f2' 'url':'http://localhost/repos/restfulgit/git/commits/cff4955ef40cfce35efe282e196c840619c518f2/'}] 'sha':'090750eec2fe5f120ad1010fc2204d06fc3ca91e' 'tree':{'sha':'288a19807d25403221c3f5260f4c172ec820b621' 'url':'http://localhost/repos/restfulgit/git/trees/288a19807d25403221c3f5260f4c172ec820b621/'} 'url':'http://localhost/repos/restfulgit/git/commits/090750eec2fe5f120ad1010fc2204d06fc3ca91e/'}} 'lines':[{'commit':'090750eec2fe5f120ad1010fc2204d06fc3ca91e' 'line':'application = app' 'lineNum':1027 'origPath':'api.py'}]})<block_end><def_stmt>test_oldest_with_nonexistent_ref self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?oldest={}'.format(IMPROBABLE_SHA))<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_oldest_works self<block_start>resp=self.client.get('/repos/restfulgit/blame/da55cbf2f13c2ec019bf02f080bc47cc4f83318c/restfulgit/__init__.py?oldest=129458e24667a9c32db4cb1a0549e3554bff0965')<line_sep>self.assert200(resp)<line_sep>json=resp.json<line_sep>relevant_commits={'129458e24667a9c32db4cb1a0549e3554bff0965' '13e9ff41ba4704d6ca91988f9216adeeee8c79b5'}<line_sep>self.assertEqual(relevant_commits set(json['commits'].keys()))<line_sep>self.assertEqual(relevant_commits {line['commit']<for>line json['lines']})<block_end><block_end><class_stmt>RepoContentsTestCase(_RestfulGitTestCase)<block_start><def_stmt>test_nonexistent_repo self<block_start>resp=self.client.get('/repos/this-repo-does-not-exist/contents/README.md')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_nonexistent_ref self<block_start>resp=self.client.get('/repos/restfulgit/contents/README.md?ref=this-branch-does-not-exist')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_ref_is_optional self<block_start>resp=self.client.get('/repos/restfulgit/contents/README.md')<line_sep>self.assert200(resp)<block_end><def_stmt>test_extant_file self<block_start>resp=self.client.get('/repos/restfulgit/contents/tests/fixtures/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f')<line_sep>self.assert200(resp)<line_sep>json=resp.json<line_sep>content=json.pop('content')<line_sep>self.assertEqual(sha512(content.encode()).hexdigest() '1966b04df26b4b9168d9c294d12ff23794fc36ba7bd7e96997541f5f31814f0d2f640dd6f0c0fe719a74815439154890df467ec5b9c4322d785902b18917fecc')<line_sep># From https://api.github.com/repos/hulu/restfulgit/contents/tests/fixtures/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f with necessary adjustments self.assertEqual(json {"name":"d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff" "path":"tests/fixtures/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff" "sha":"40c739b1166f47c791e87f747f0061739b49af0e" "size":853 "url":"http://localhost/repos/restfulgit/contents/tests/fixtures/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git_url":"http://localhost/repos/restfulgit/git/blobs/40c739b1166f47c791e87f747f0061739b49af0e/" "type":"file" "encoding":"utf-8" "_links":{"self":"http://localhost/repos/restfulgit/contents/tests/fixtures/d408fc2428bc6444cabd7f7b46edbe70b6992b16.diff?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git":"http://localhost/repos/restfulgit/git/blobs/40c739b1166f47c791e87f747f0061739b49af0e/" }})<block_end><def_stmt>test_nonexistent_file self<block_start>resp=self.client.get('/repos/restfulgit/contents/this-file-does-not-exist')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_extant_directory_without_trailing_slash self# From https://api.github.com/repos/hulu/restfulgit/contents/restfulgit?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/contents/restfulgit?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json [{"name":"__init__.py" "path":"restfulgit/__init__.py" "sha":"db36c03e5649e6e6d23fd431deff3a52ec1faaba" "size":24099 "url":"http://localhost/repos/restfulgit/contents/restfulgit/__init__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git_url":"http://localhost/repos/restfulgit/git/blobs/db36c03e5649e6e6d23fd431deff3a52ec1faaba/" "type":"file" "_links":{"self":"http://localhost/repos/restfulgit/contents/restfulgit/__init__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git":"http://localhost/repos/restfulgit/git/blobs/db36c03e5649e6e6d23fd431deff3a52ec1faaba/" }} {"name":"__main__.py" "path":"restfulgit/__main__.py" "sha":"e067d7f361bd3b0f227ba1914c227ebf9539f59d" "size":110 "url":"http://localhost/repos/restfulgit/contents/restfulgit/__main__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git_url":"http://localhost/repos/restfulgit/git/blobs/e067d7f361bd3b0f227ba1914c227ebf9539f59d/" "type":"file" "_links":{"self":"http://localhost/repos/restfulgit/contents/restfulgit/__main__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git":"http://localhost/repos/restfulgit/git/blobs/e067d7f361bd3b0f227ba1914c227ebf9539f59d/" }}])<block_end><def_stmt>test_extant_directory_with_trailing_slash self# From https://api.github.com/repos/hulu/restfulgit/contents/restfulgit?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/contents/restfulgit/?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json [{"name":"__init__.py" "path":"restfulgit/__init__.py" "sha":"db36c03e5649e6e6d23fd431deff3a52ec1faaba" "size":24099 "url":"http://localhost/repos/restfulgit/contents/restfulgit/__init__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git_url":"http://localhost/repos/restfulgit/git/blobs/db36c03e5649e6e6d23fd431deff3a52ec1faaba/" "type":"file" "_links":{"self":"http://localhost/repos/restfulgit/contents/restfulgit/__init__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git":"http://localhost/repos/restfulgit/git/blobs/db36c03e5649e6e6d23fd431deff3a52ec1faaba/" }} {"name":"__main__.py" "path":"restfulgit/__main__.py" "sha":"e067d7f361bd3b0f227ba1914c227ebf9539f59d" "size":110 "url":"http://localhost/repos/restfulgit/contents/restfulgit/__main__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git_url":"http://localhost/repos/restfulgit/git/blobs/e067d7f361bd3b0f227ba1914c227ebf9539f59d/" "type":"file" "_links":{"self":"http://localhost/repos/restfulgit/contents/restfulgit/__main__.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git":"http://localhost/repos/restfulgit/git/blobs/e067d7f361bd3b0f227ba1914c227ebf9539f59d/" }}])<block_end><def_stmt>test_root_directory self<block_start>resp=self.client.get('/repos/restfulgit/contents/?ref=initial')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json [{'name':'api.py' 'url':'http://localhost/repos/restfulgit/contents/api.py?ref=initial' 'sha':'ae9d90706c632c26023ce599ac96cb152673da7c' '_links':{'self':'http://localhost/repos/restfulgit/contents/api.py?ref=initial' 'git':'http://localhost/repos/restfulgit/git/blobs/ae9d90706c632c26023ce599ac96cb152673da7c/'} 'git_url':'http://localhost/repos/restfulgit/git/blobs/ae9d90706c632c26023ce599ac96cb152673da7c/' 'path':'api.py' 'type':'file' 'size':5543}])<block_end><def_stmt>test_directory_with_subdirectories self# From https://api.github.com/repos/hulu/restfulgit/contents/tests?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f with necessary adjustments <block_start>resp=self.client.get('/repos/restfulgit/contents/tests?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json [{"name":"fixtures" "path":"tests/fixtures" "sha":"7a62b2e0c7e25dc66d110380844c477abf13b91f" "size":0 "url":"http://localhost/repos/restfulgit/contents/tests/fixtures?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git_url":"http://localhost/repos/restfulgit/git/trees/7a62b2e0c7e25dc66d110380844c477abf13b91f/" "type":"dir" "_links":{"self":"http://localhost/repos/restfulgit/contents/tests/fixtures?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git":"http://localhost/repos/restfulgit/git/trees/7a62b2e0c7e25dc66d110380844c477abf13b91f/" }} {"name":"test_restfulgit.py" "path":"tests/test_restfulgit.py" "sha":"3da8fd332d44b67ecd9910f5392c73cb62a76a4d" "size":47069 "url":"http://localhost/repos/restfulgit/contents/tests/test_restfulgit.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git_url":"http://localhost/repos/restfulgit/git/blobs/3da8fd332d44b67ecd9910f5392c73cb62a76a4d/" "type":"file" "_links":{"self":"http://localhost/repos/restfulgit/contents/tests/test_restfulgit.py?ref=7da1a61e2f566cf3094c2fea4b18b111d2638a8f" "git":"http://localhost/repos/restfulgit/git/blobs/3da8fd332d44b67ecd9910f5392c73cb62a76a4d/" }}])<block_end><def_stmt>test_nonexistent_directory self<block_start>resp=self.client.get('/repos/restfulgit/contents/this-directory-does-not-exist/')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_symlink self# FIXME: implement <block_start><pass><block_end><def_stmt>test_submodule self# FIXME: implement <block_start><pass><block_end><block_end><class_stmt>CompareTestCase(_RestfulGitTestCase)<block_start><def_stmt>test_works self<block_start>resp=self.client.get('/repos/restfulgit/compare/{}...{}.diff'.format('initial' FIFTH_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertContentTypeIsDiff(resp)<line_sep>self.assertBytesEqualFixture(resp.get_data() 'initial_c04112733fe2db2cb2f179fca1a19365cf15fef5.diff')<block_end><def_stmt>test_empty_diff self<block_start>resp=self.client.get('/repos/restfulgit/compare/initial...initial.diff')<line_sep>self.assert200(resp)<line_sep>self.assertContentTypeIsDiff(resp)<line_sep>self.assertEqual(resp.get_data() b'')<block_end># From https://github.com/hulu/restfulgit/compare/initial...initial.diff <def_stmt>test_nonexistent_refspec_404 self<block_start>resp=self.client.get('/repos/restfulgit/compare/initial...this-branch-does-not-exist.diff')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_empty_left_refspec_rejected self<block_start>resp=self.client.get('/repos/restfulgit/compare/...initial.diff')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_right_empty_refspec_rejected self<block_start>resp=self.client.get('/repos/restfulgit/compare/initial....diff')<line_sep>self.assertJson404(resp)<block_end><def_stmt>test_branch_names_with_dots self<block_start><pass><block_end><def_stmt>test_non_integer_context_rejected self# NOTE: `context` is a RestfulGit extension <block_start>resp=self.client.get('/repos/restfulgit/compare/{}...{}.diff?context=abcdef'.format('initial' FIFTH_COMMIT))<line_sep>self.assert400(resp)<block_end><def_stmt>test_negative_context_rejected self# NOTE: `context` is a RestfulGit extension <block_start>resp=self.client.get('/repos/restfulgit/compare/{}...{}.diff?context=-1'.format('initial' FIFTH_COMMIT))<line_sep>self.assert400(resp)<block_end><def_stmt>test_context_is_honored self# NOTE: `context` is a RestfulGit extension <block_start>resp=self.client.get('/repos/restfulgit/compare/{}...{}.diff?context=1'.format('initial' FIFTH_COMMIT))<line_sep>self.assert200(resp)<line_sep>self.assertContentTypeIsDiff(resp)<line_sep>self.assertBytesEqualFixture(resp.get_data() 'initial-c04112733fe2db2cb2f179fca1a19365cf15fef5-context-1.diff')<block_end><block_end><class_stmt>ContributorsTestCase(_RestfulGitTestCase)<block_start><def_stmt>test_nonexistent_repo self<block_start>resp=self.client.get('/repos/this-repo-does-not-exist/contributors/')<line_sep>self.assert404(resp)<block_end><def_stmt>test_empty_repo self<block_start><with_stmt>self._empty_repo<block_start>resp=self.client.get('/repos/example/contributors/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json [])<block_end><block_end><def_stmt>test_results_well_formed self<block_start>resp=self.client.get('/repos/restfulgit/contributors/')<line_sep>self.assert200(resp)<line_sep>contributors=resp.json<for_stmt>contributor contributors<block_start>self.assertIsInstance(contributor dict)<line_sep>self.assertIsInstance(contributor.get('name') str)<line_sep>self.assertIsInstance(contributor.get('email') str)<line_sep>count=contributor.get('contributions')<line_sep>self.assertIsInstance(count int)<line_sep>self.assertGreater(count 0)<block_end>counts=[contributor['contributions']<for>contributor contributors]<line_sep>sorted_counts=sorted(counts reverse=<true>)<line_sep>self.assertEqual(sorted_counts counts)<block_end><block_end><class_stmt>CommitsUniqueToBranchTestCase(_RestfulGitTestCase)# NOTE: This API is a RestfulGit extension <block_start><def_stmt>test_invalid_sort_404s self<block_start><with_stmt>self._base_repo_and_commit<block_start>resp=self.client.get('/repos/example/branches/A/unique-commits/sorted/astrological/')<line_sep>self.assertJson404(resp)<block_end><block_end><def_stmt>test_first_commit self<block_start><with_stmt>self._base_repo_and_commit<block_start>resp=self.client.get('/repos/example/branches/A/unique-commits/sorted/topological/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commits':[{'author':{'date':'1970-01-01T00:00:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:00:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:01:00Z' 'email':'<EMAIL>' 'name':'<NAME>brity'} 'message':'A' 'parents':[] 'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'tree':{'sha':'617601c79811cbbae338512798318b4e5b70c9ac' 'url':'http://localhost/repos/example/git/trees/617601c79811cbbae338512798318b4e5b70c9ac/'} 'url':'http://localhost/repos/example/git/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'} 'committer':{'date':'1970-01-01T00:01:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[] 'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'url':'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'}]})<block_end><block_end><def_stmt>test_only_branch self<block_start><with_stmt>self._base_repo_and_commit<as>pair<block_start>repo,a=pair<line_sep>b=self._commit(repo "B" [a])<line_sep>repo.create_branch("A" repo[b] <true>)# overwrite A resp=self.client.get('/repos/example/branches/A/unique-commits/sorted/topological/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commits':[{'author':{'date':'1970-01-01T00:00:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:00:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:01:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'A' 'parents':[] 'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'tree':{'sha':'617601c79811cbbae338512798318b4e5b70c9ac' 'url':'http://localhost/repos/example/git/trees/617601c79811cbbae338512798318b4e5b70c9ac/'} 'url':'http://localhost/repos/example/git/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'} 'committer':{'date':'1970-01-01T00:01:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[] 'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'url':'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'} {'author':{'date':'1970-01-01T00:02:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:02:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:03:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'B' 'parents':[{'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'url':'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'}] 'sha':'e11c39e288519302f75f281b8b9a5ab585f678db' 'tree':{'sha':'1a321342ee655cb18be26a1a9632bb9629fb3642' 'url':'http://localhost/repos/example/git/trees/1a321342ee655cb18be26a1a9632bb9629fb3642/'} 'url':'http://localhost/repos/example/git/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'} 'committer':{'date':'1970-01-01T00:03:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'url':'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'}] 'sha':'e11c39e288519302f75f281b8b9a5ab585f678db' 'url':'http://localhost/repos/example/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'}]})<block_end><block_end><def_stmt>test_duplicate_is_empty self<block_start><with_stmt>self._example_repo()<as>commits<block_start>repo=commits['repo']<line_sep># J branch = I branch repo.create_branch("J" repo[commits['i']])<line_sep>resp=self.client.get('/repos/example/branches/J/unique-commits/sorted/topological/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commits':[]})<block_end><block_end><def_stmt>test_inclusion_exclusion_topological_ordering self# B,C,D,,F,G,I <block_start><with_stmt>self._example_repo()<as>commits<block_start>resp=self.client.get('/repos/example/branches/I/unique-commits/sorted/topological/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commits':[{'author':{'date':'1970-01-01T00:02:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:02:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:03:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'B' 'parents':[{'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'url':'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'}] 'sha':'e11c39e288519302f75f281b8b9a5ab585f678db' 'tree':{'sha':'1a321342ee655cb18be26a1a9632bb9629fb3642' 'url':'http://localhost/repos/example/git/trees/1a321342ee655cb18be26a1a9632bb9629fb3642/'} 'url':'http://localhost/repos/example/git/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'} 'committer':{'date':'1970-01-01T00:03:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'url':'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'}] 'sha':'e11c39e288519302f75f281b8b9a5ab585f678db' 'url':'http://localhost/repos/example/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'} {'author':{'date':'1970-01-01T00:04:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:04:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:05:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'C' 'parents':[{'sha':'e11c39e288519302f75f281b8b9a5ab585f678db' 'url':'http://localhost/repos/example/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'}] 'sha':'d982d1115b558a8abe447f8bf46cc3ab8761e19f' 'tree':{'sha':'adaf4189f869749deba4ed69005ece57a4c2f19c' 'url':'http://localhost/repos/example/git/trees/adaf4189f869749deba4ed69005ece57a4c2f19c/'} 'url':'http://localhost/repos/example/git/commits/d982d1115b558a8abe447f8bf46cc3ab8761e19f/'} 'committer':{'date':'1970-01-01T00:05:00Z' 'email':'<EMAIL>' 'name':'Alien Celebrity'} 'parents':[{'sha':'e11c39e288519302f75f281b8b9a5ab585f678db' 'url':'http://localhost/repos/example/commits/e11c39e288519302f75f281b8b9a5ab585f678db/'}] 'sha':'d982d1115b558a8abe447f8bf46cc3ab8761e19f' 'url':'http://localhost/repos/example/commits/d982d1115b558a8abe447f8bf46cc3ab8761e19f/'} {'author':{'date':'1970-01-01T00:06:00Z' 'email':'<EMAIL>' 'name':'Alien Celebrity'} 'commit':{'author':{'date':'1970-01-01T00:06:00Z' 'email':'<EMAIL>' 'name':'Alien Celebrity'} 'committer':{'date':'1970-01-01T00:07:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'D' 'parents':[{'sha':'d982d1115b558a8abe447f8bf46cc3ab8761e19f' 'url':'http://localhost/repos/example/commits/d982d1115b558a8abe447f8bf46cc3ab8761e19f/'}] 'sha':'24c57d21498c2fc3b44153713308510d86142fe6' 'tree':{'sha':'c1397c050b9d4341e85d04f4b311a1cf382d7961' 'url':'http://localhost/repos/example/git/trees/c1397c050b9d4341e85d04f4b311a1cf382d7961/'} 'url':'http://localhost/repos/example/git/commits/24c57d21498c2fc3b44153713308510d86142fe6/'} 'committer':{'date':'1970-01-01T00:07:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'d982d1115b558a8abe447f8bf46cc3ab8761e19f' 'url':'http://localhost/repos/example/commits/d982d1115b558a8abe447f8bf46cc3ab8761e19f/'}] 'sha':'24c57d21498c2fc3b44153713308510d86142fe6' 'url':'http://localhost/repos/example/commits/24c57d21498c2fc3b44153713308510d86142fe6/'} {'author':{'date':'1970-01-01T00:10:00Z' 'email':'<EMAIL>' 'name':'Ali<NAME>elebrity'} 'commit':{'author':{'date':'1970-01-01T00:10:00Z' 'email':'<EMAIL>' 'name':'Alien Celebrity'} 'committer':{'date':'1970-01-01T00:11:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'F' 'parents':[{'sha':'3423be4854bc3316c12f3a9c699e6cd2209fd8ea' 'url':'http://localhost/repos/example/commits/3423be4854bc3316c12f3a9c699e6cd2209fd8ea/'}] 'sha':'1d040035bfb8936bd760ff226cb5c9f2c2b817a3' 'tree':{'sha':'ab220b156431b575f3cb3607644d05954d5e859a' 'url':'http://localhost/repos/example/git/trees/ab220b156431b575f3cb3607644d05954d5e859a/'} 'url':'http://localhost/repos/example/git/commits/1d040035bfb8936bd760ff226cb5c9f2c2b817a3/'} 'committer':{'date':'1970-01-01T00:11:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'3423be4854bc3316c12f3a9c699e6cd2209fd8ea' 'url':'http://localhost/repos/example/commits/3423be4854bc3316c12f3a9c699e6cd2209fd8ea/'}] 'sha':'1d040035bfb8936bd760ff226cb5c9f2c2b817a3' 'url':'http://localhost/repos/example/commits/1d040035bfb8936bd760ff226cb5c9f2c2b817a3/'} {'author':{'date':'1970-01-01T00:12:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:12:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:13:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'G' 'parents':[{'sha':'1d040035bfb8936bd760ff226cb5c9f2c2b817a3' 'url':'http://localhost/repos/example/commits/1d040035bfb8936bd760ff226cb5c9f2c2b817a3/'}] 'sha':'cb46532dc9a103d31a0183b5d8a5a08c09f4b94d' 'tree':{'sha':'95146dee2fed3d5783f625fe4e48202dae4606ef' 'url':'http://localhost/repos/example/git/trees/95146dee2fed3d5783f625fe4e48202dae4606ef/'} 'url':'http://localhost/repos/example/git/commits/cb46532dc9a103d31a0183b5d8a5a08c09f4b94d/'} 'committer':{'date':'1970-01-01T00:13:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'1d040035bfb8936bd760ff226cb5c9f2c2b817a3' 'url':'http://localhost/repos/example/commits/1d040035bfb8936bd760ff226cb5c9f2c2b817a3/'}] 'sha':'cb46532dc9a103d31a0183b5d8a5a08c09f4b94d' 'url':'http://localhost/repos/example/commits/cb46532dc9a103d31a0183b5d8a5a08c09f4b94d/'} {'author':{'date':'1970-01-01T00:16:00Z' 'email':'<EMAIL>' 'name':'<NAME>brity'} 'commit':{'author':{'date':'1970-01-01T00:16:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:17:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'I' 'parents':[{'sha':'24c57d21498c2fc3b44153713308510d86142fe6' 'url':'http://localhost/repos/example/commits/24c57d21498c2fc3b44153713308510d86142fe6/'} {'sha':'cb46532dc9a103d31a0183b5d8a5a08c09f4b94d' 'url':'http://localhost/repos/example/commits/cb46532dc9a103d31a0183b5d8a5a08c09f4b94d/'}] 'sha':'1622ddc3695d1263c629bdbb0bcdf235510ee068' 'tree':{'sha':'0ed66f14f8548241624bcbd1d39d3d06f277a9b4' 'url':'http://localhost/repos/example/git/trees/0ed66f14f8548241624bcbd1d39d3d06f277a9b4/'} 'url':'http://localhost/repos/example/git/commits/1622ddc3695d1263c629bdbb0bcdf235510ee068/'} 'committer':{'date':'1970-01-01T00:17:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'24c57d21498c2fc3b44153713308510d86142fe6' 'url':'http://localhost/repos/example/commits/24c57d21498c2fc3b44153713308510d86142fe6/'} {'sha':'cb46532dc9a103d31a0183b5d8a5a08c09f4b94d' 'url':'http://localhost/repos/example/commits/cb46532dc9a103d31a0183b5d8a5a08c09f4b94d/'}] 'sha':'1622ddc3695d1263c629bdbb0bcdf235510ee068' 'url':'http://localhost/repos/example/commits/1622ddc3695d1263c629bdbb0bcdf235510ee068/'}]})<block_end><block_end><def_stmt>test_inclusion_exclusion_chronological_ordering self<block_start><with_stmt>self._example_repo(b_before_e=<false>)<as>commits<block_start>resp=self.client.get('/repos/example/branches/I/unique-commits/sorted/chronological/')<line_sep>self.assert200(resp)<line_sep>self.assertEqual(resp.json {'commits':[{'author':{'date':'1970-01-01T00:04:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:04:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:05:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'F' 'parents':[{'sha':'2d5ea1e6bf086d0ce420180f892dbf9e08d9835e' 'url':'http://localhost/repos/example/commits/2d5ea1e6bf086d0ce420180f892dbf9e08d9835e/'}] 'sha':'6ccf9dc00992617fa4206ff67ffed2dcb895135c' 'tree':{'sha':'ab220b156431b575f3cb3607644d05954d5e859a' 'url':'http://localhost/repos/example/git/trees/ab220b156431b575f3cb3607644d05954d5e859a/'} 'url':'http://localhost/repos/example/git/commits/6ccf9dc00992617fa4206ff67ffed2dcb895135c/'} 'committer':{'date':'1970-01-01T00:05:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'2d5ea1e6bf086d0ce420180f892dbf9e08d9835e' 'url':'http://localhost/repos/example/commits/2d5ea1e6bf086d0ce420180f892dbf9e08d9835e/'}] 'sha':'6ccf9dc00992617fa4206ff67ffed2dcb895135c' 'url':'http://localhost/repos/example/commits/6ccf9dc00992617fa4206ff67ffed2dcb895135c/'} {'author':{'date':'1970-01-01T00:06:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:06:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:07:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'G' 'parents':[{'sha':'6ccf9dc00992617fa4206ff67ffed2dcb895135c' 'url':'http://localhost/repos/example/commits/6ccf9dc00992617fa4206ff67ffed2dcb895135c/'}] 'sha':'aea62655228b0b0d33f6226cf038607cfc3db8bd' 'tree':{'sha':'95146dee2fed3d5783f625fe4e48202dae4606ef' 'url':'http://localhost/repos/example/git/trees/95146dee2fed3d5783f625fe4e48202dae4606ef/'} 'url':'http://localhost/repos/example/git/commits/aea62655228b0b0d33f6226cf038607cfc3db8bd/'} 'committer':{'date':'1970-01-01T00:07:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'6ccf9dc00992617fa4206ff67ffed2dcb895135c' 'url':'http://localhost/repos/example/commits/6ccf9dc00992617fa4206ff67ffed2dcb895135c/'}] 'sha':'aea62655228b0b0d33f6226cf038607cfc3db8bd' 'url':'http://localhost/repos/example/commits/aea62655228b0b0d33f6226cf038607cfc3db8bd/'} {'author':{'date':'1970-01-01T00:08:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:08:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:09:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'B' 'parents':[{'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'url':'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'}] 'sha':'8e994dcc81dd6d4aee6d627f946ef326363360f3' 'tree':{'sha':'1a321342ee655cb18be26a1a9632bb9629fb3642' 'url':'http://localhost/repos/example/git/trees/1a321342ee655cb18be26a1a9632bb9629fb3642/'} 'url':'http://localhost/repos/example/git/commits/8e994dcc81dd6d4aee6d627f946ef326363360f3/'} 'committer':{'date':'1970-01-01T00:09:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'c655dffe0fed2a78dc5f38c1bc8e5628e2605017' 'url':'http://localhost/repos/example/commits/c655dffe0fed2a78dc5f38c1bc8e5628e2605017/'}] 'sha':'8e994dcc81dd6d4aee6d627f946ef326363360f3' 'url':'http://localhost/repos/example/commits/8e994dcc81dd6d4aee6d627f946ef326363360f3/'} {'author':{'date':'1970-01-01T00:10:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:10:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:11:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'C' 'parents':[{'sha':'8e994dcc81dd6d4aee6d627f946ef326363360f3' 'url':'http://localhost/repos/example/commits/8e994dcc81dd6d4aee6d627f946ef326363360f3/'}] 'sha':'729ef1b328ac57d209a608c89734043128e3be3a' 'tree':{'sha':'adaf4189f869749deba4ed69005ece57a4c2f19c' 'url':'http://localhost/repos/example/git/trees/adaf4189f869749deba4ed69005ece57a4c2f19c/'} 'url':'http://localhost/repos/example/git/commits/729ef1b328ac57d209a608c89734043128e3be3a/'} 'committer':{'date':'1970-01-01T00:11:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'8e994dcc81dd6d4aee6d627f946ef326363360f3' 'url':'http://localhost/repos/example/commits/8e994dcc81dd6d4aee6d627f946ef326363360f3/'}] 'sha':'729ef1b328ac57d209a608c89734043128e3be3a' 'url':'http://localhost/repos/example/commits/729ef1b328ac57d209a608c89734043128e3be3a/'} {'author':{'date':'1970-01-01T00:12:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:12:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:13:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'D' 'parents':[{'sha':'729ef1b328ac57d209a608c89734043128e3be3a' 'url':'http://localhost/repos/example/commits/729ef1b328ac57d209a608c89734043128e3be3a/'}] 'sha':'842548f32658e940fc054f8c328c7639fc5c9053' 'tree':{'sha':'c1397c050b9d4341e85d04f4b311a1cf382d7961' 'url':'http://localhost/repos/example/git/trees/c1397c050b9d4341e85d04f4b311a1cf382d7961/'} 'url':'http://localhost/repos/example/git/commits/842548f32658e940fc054f8c328c7639fc5c9053/'} 'committer':{'date':'1970-01-01T00:13:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'729ef1b328ac57d209a608c89734043128e3be3a' 'url':'http://localhost/repos/example/commits/729ef1b328ac57d209a608c89734043128e3be3a/'}] 'sha':'842548f32658e940fc054f8c328c7639fc5c9053' 'url':'http://localhost/repos/example/commits/842548f32658e940fc054f8c328c7639fc5c9053/'} {'author':{'date':'1970-01-01T00:16:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'commit':{'author':{'date':'1970-01-01T00:16:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'committer':{'date':'1970-01-01T00:17:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'message':'I' 'parents':[{'sha':'842548f32658e940fc054f8c328c7639fc5c9053' 'url':'http://localhost/repos/example/commits/842548f32658e940fc054f8c328c7639fc5c9053/'} {'sha':'aea62655228b0b0d33f6226cf038607cfc3db8bd' 'url':'http://localhost/repos/example/commits/aea62655228b0b0d33f6226cf038607cfc3db8bd/'}] 'sha':'02d221a9d6f0619a77cbf1be4ac8a27057c2b4da' 'tree':{'sha':'0ed66f14f8548241624bcbd1d39d3d06f277a9b4' 'url':'http://localhost/repos/example/git/trees/0ed66f14f8548241624bcbd1d39d3d06f277a9b4/'} 'url':'http://localhost/repos/example/git/commits/02d221a9d6f0619a77cbf1be4ac8a27057c2b4da/'} 'committer':{'date':'1970-01-01T00:17:00Z' 'email':'<EMAIL>' 'name':'<NAME>'} 'parents':[{'sha':'842548f32658e940fc054f8c328c7639fc5c9053' 'url':'http://localhost/repos/example/commits/842548f32658e940fc054f8c328c7639fc5c9053/'} {'sha':'aea62655228b0b0d33f6226cf038607cfc3db8bd' 'url':'http://localhost/repos/example/commits/aea62655228b0b0d33f6226cf038607cfc3db8bd/'}] 'sha':'02d221a9d6f0619a77cbf1be4ac8a27057c2b4da' 'url':'http://localhost/repos/example/commits/02d221a9d6f0619a77cbf1be4ac8a27057c2b4da/'}]})<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# from __future__ import print_function <import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_stmt>inspect re<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>os<import_stmt>collections<import_from_stmt>torch.optim lr_scheduler<import_stmt>torch.nn.init<as>init<line_sep># Converts a Tensor into a Numpy array # |imtype|: the desired type of the converted numpy array <def_stmt>tensor2im image_tensor imtype=np.uint8<block_start>image_numpy=image_tensor[0].cpu().float().numpy()<line_sep>image_numpy=(np.transpose(image_numpy (1 2 0))+1)/2.0<times>255.0<line_sep>image_numpy=np.maximum(image_numpy 0)<line_sep>image_numpy=np.minimum(image_numpy 255)<line_sep><return>image_numpy.astype(imtype)<block_end><def_stmt>atten2im image_tensor imtype=np.uint8<block_start>image_tensor=image_tensor[0]<line_sep>image_tensor=torch.cat((image_tensor image_tensor image_tensor) 0)<line_sep>image_numpy=image_tensor.cpu().float().numpy()<line_sep>image_numpy=(np.transpose(image_numpy (1 2 0)))<times>255.0<line_sep>image_numpy=image_numpy/(image_numpy.max()/255.0)<line_sep><return>image_numpy.astype(imtype)<block_end><def_stmt>latent2im image_tensor imtype=np.uint8# image_tensor = (image_tensor - torch.min(image_tensor))/(torch.max(image_tensor)-torch.min(image_tensor)) <block_start>image_numpy=image_tensor[0].cpu().float().numpy()<line_sep>image_numpy=(np.transpose(image_numpy (1 2 0)))<times>255.0<line_sep>image_numpy=np.maximum(image_numpy 0)<line_sep>image_numpy=np.minimum(image_numpy 255)<line_sep><return>image_numpy.astype(imtype)<block_end><def_stmt>max2im image_1 image_2 imtype=np.uint8<block_start>image_1=image_1[0].cpu().float().numpy()<line_sep>image_2=image_2[0].cpu().float().numpy()<line_sep>image_1=(np.transpose(image_1 (1 2 0))+1)/2.0<times>255.0<line_sep>image_2=(np.transpose(image_2 (1 2 0)))<times>255.0<line_sep>output=np.maximum(image_1 image_2)<line_sep>output=np.maximum(output 0)<line_sep>output=np.minimum(output 255)<line_sep><return>output.astype(imtype)<block_end><def_stmt>variable2im image_tensor imtype=np.uint8<block_start>image_numpy=image_tensor[0].data.cpu().float().numpy()<line_sep>image_numpy=(np.transpose(image_numpy (1 2 0))+1)/2.0<times>255.0<line_sep><return>image_numpy.astype(imtype)<block_end><def_stmt>diagnose_network net name='network'<block_start>mean=0.0<line_sep>count=0<for_stmt>param net.parameters()<block_start><if_stmt>param.grad<is><not><none><block_start>mean<augadd>torch.mean(torch.abs(param.grad.data))<line_sep>count<augadd>1<block_end><block_end><if_stmt>count<g>0<block_start>mean=mean/count<block_end>print(name)<line_sep>print(mean)<block_end><def_stmt>save_image image_numpy image_path<block_start>image_pil=Image.fromarray(image_numpy)<line_sep>image_pil.save(image_path)<block_end><def_stmt>info object spacing=10 collapse=1<block_start>"""Print methods and doc strings. Takes module, class, list, dictionary, or string."""<line_sep>methodList=[e<for>e dir(object)<if>isinstance(getattr(object e) collections.Callable)]<line_sep>processFunc=collapse<and>(<lambda>s:" ".join(s.split()))<or>(<lambda>s:s)<line_sep>print("\n".join(["%s %s"%(method.ljust(spacing) processFunc(str(getattr(object method).__doc__)))<for>method methodList]))<block_end><def_stmt>varname p<block_start><for_stmt>line inspect.getframeinfo(inspect.currentframe().f_back)[3]<block_start>m=re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)' line)<if_stmt>m<block_start><return>m.group(1)<block_end><block_end><block_end><def_stmt>print_numpy x val=<true> shp=<false><block_start>x=x.astype(np.float64)<if_stmt>shp<block_start>print('shape,' x.shape)<block_end><if_stmt>val<block_start>x=x.flatten()<line_sep>print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f'%(np.mean(x) np.min(x) np.max(x) np.median(x) np.std(x)))<block_end><block_end><def_stmt>mkdirs paths<block_start><if_stmt>isinstance(paths list)<and><not>isinstance(paths str)<block_start><for_stmt>path paths<block_start>mkdir(path)<block_end><block_end><else_stmt><block_start>mkdir(paths)<block_end><block_end><def_stmt>mkdir path<block_start><if_stmt><not>os.path.exists(path)<block_start>os.makedirs(path)<block_end><block_end><def_stmt>get_model_list dirname key<block_start><if_stmt>os.path.exists(dirname)<is><false><block_start><return><none><block_end>gen_models=[os.path.join(dirname f)<for>f os.listdir(dirname)<if>os.path.isfile(os.path.join(dirname f))<and>key<in>f<and>".pt"<in>f]<if_stmt>gen_models<is><none><block_start><return><none><block_end>gen_models.sort()<line_sep>last_model_name=gen_models[-1]<line_sep><return>last_model_name<block_end><def_stmt>load_vgg16 model_dir<block_start>""" Use the model from https://github.com/abhiskk/fast-neural-style/blob/master/neural_style/utils.py """<if_stmt><not>os.path.exists(model_dir)<block_start>os.mkdir(model_dir)<block_end><if_stmt><not>os.path.exists(os.path.join(model_dir 'vgg16.weight'))<block_start><if_stmt><not>os.path.exists(os.path.join(model_dir 'vgg16.t7'))<block_start>os.system('wget https://www.dropbox.com/s/76l3rt4kyi3s8x7/vgg16.t7?dl=1 -O '+os.path.join(model_dir 'vgg16.t7'))<block_end>vgglua=load_lua(os.path.join(model_dir 'vgg16.t7'))<line_sep>vgg=Vgg16()<for_stmt>(src dst) zip(vgglua.parameters()[0] vgg.parameters())<block_start>dst.data[:]=src<block_end>torch.save(vgg.state_dict() os.path.join(model_dir 'vgg16.weight'))<block_end>vgg=Vgg16()<line_sep>vgg.load_state_dict(torch.load(os.path.join(model_dir 'vgg16.weight')))<line_sep><return>vgg<block_end><def_stmt>vgg_preprocess batch<block_start>tensortype=type(batch.data)<line_sep>(r g b)=torch.chunk(batch 3 dim=1)<line_sep>batch=torch.cat((b g r) dim=1)# convert RGB to BGR batch=(batch+1)<times>255<times>0.5# [-1, 1] -> [0, 255] mean=tensortype(batch.data.size())<line_sep>mean[: 0 : :]=103.939<line_sep>mean[: 1 : :]=116.779<line_sep>mean[: 2 : :]=123.680<line_sep>batch=batch.sub(Variable(mean))# subtract mean <return>batch<block_end><def_stmt>get_scheduler optimizer hyperparameters iterations=-1<block_start><if_stmt>'lr_policy'<not><in>hyperparameters<or>hyperparameters['lr_policy']<eq>'constant'<block_start>scheduler=<none># constant scheduler <block_end><elif_stmt>hyperparameters['lr_policy']<eq>'step'<block_start>scheduler=lr_scheduler.StepLR(optimizer step_size=hyperparameters['step_size'] gamma=hyperparameters['gamma'] last_epoch=iterations)<block_end><else_stmt><block_start><return>NotImplementedError('learning rate policy [%s] is not implemented' hyperparameters['lr_policy'])<block_end><return>scheduler<block_end><def_stmt>weights_init init_type='gaussian'<block_start><def_stmt>init_fun m<block_start>classname=m.__class__.__name__<if_stmt>(classname.find('Conv')<eq>0<or>classname.find('Linear')<eq>0)<and>hasattr(m 'weight')# print m.__class__.__name__ <block_start><if_stmt>init_type<eq>'gaussian'<block_start>init.normal(m.weight.data 0.0 0.02)<block_end><elif_stmt>init_type<eq>'xavier'<block_start>init.xavier_normal(m.weight.data gain=math.sqrt(2))<block_end><elif_stmt>init_type<eq>'kaiming'<block_start>init.kaiming_normal(m.weight.data a=0 mode='fan_in')<block_end><elif_stmt>init_type<eq>'orthogonal'<block_start>init.orthogonal(m.weight.data gain=math.sqrt(2))<block_end><elif_stmt>init_type<eq>'default'<block_start><pass><block_end><else_stmt><block_start><assert_stmt>0 "Unsupported initialization: {}".format(init_type)<block_end><if_stmt>hasattr(m 'bias')<and>m.bias<is><not><none><block_start>init.constant(m.bias.data 0.0)<block_end><block_end><block_end><return>init_fun<block_end>
# Generated by Django 3.1.4 on 2021-01-02 18:09 <import_from_stmt>django.db migrations<def_stmt>drop_old_contenttypes apps schema_editor<block_start>apps.get_model('contenttypes.ContentType').objects.filter(app_label='onetime').delete()<block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('app' '0001_CourseMailLog') ]<line_sep>operations=[migrations.RunPython(drop_old_contenttypes) migrations.RunSQL('DROP TABLE IF EXISTS onetime_token;') ]<block_end>
<import_from_stmt>conans ConanFile CMake<class_stmt>DI(ConanFile)<block_start>name="DI"<line_sep>version="latest"<line_sep>url="https://github.com/boost-ext/di"<line_sep>license="Boost"<line_sep>description="[Boost::ext].DI - C++14 Dependency Injection Library"<line_sep>settings="os" "compiler" "arch" "build_type"<line_sep>exports_sources="include/*"<line_sep>no_copy_source=<true><def_stmt>package self<block_start>self.copy("*.hpp")<block_end><block_end>
# This code is part of Qiskit. # # (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Tests for core modules of pulse drawer."""<import_from_stmt>qiskit pulse<import_from_stmt>qiskit.test QiskitTestCase<import_from_stmt>qiskit.visualization.pulse_v2 layouts device_info<class_stmt>TestChannelArrangement(QiskitTestCase)<block_start>"""Tests for channel mapping functions."""<def_stmt>setUp self<arrow><none><block_start>super().setUp()<line_sep>self.channels=[pulse.DriveChannel(0) pulse.DriveChannel(1) pulse.DriveChannel(2) pulse.MeasureChannel(1) pulse.MeasureChannel(2) pulse.AcquireChannel(1) pulse.AcquireChannel(2) pulse.ControlChannel(0) pulse.ControlChannel(2) pulse.ControlChannel(5) ]<line_sep>self.formatter={"control.show_acquire_channel":<true>}<line_sep>self.device=device_info.OpenPulseBackendInfo(name="test" dt=1 channel_frequency_map={pulse.DriveChannel(0):5.0e9 pulse.DriveChannel(1):5.1e9 pulse.DriveChannel(2):5.2e9 pulse.MeasureChannel(1):7.0e9 pulse.MeasureChannel(1):7.1e9 pulse.MeasureChannel(2):7.2e9 pulse.ControlChannel(0):5.0e9 pulse.ControlChannel(1):5.1e9 pulse.ControlChannel(2):5.2e9 pulse.ControlChannel(3):5.3e9 pulse.ControlChannel(4):5.4e9 pulse.ControlChannel(5):5.5e9 } qubit_channel_map={0:[pulse.DriveChannel(0) pulse.MeasureChannel(0) pulse.AcquireChannel(0) pulse.ControlChannel(0) ] 1:[pulse.DriveChannel(1) pulse.MeasureChannel(1) pulse.AcquireChannel(1) pulse.ControlChannel(1) ] 2:[pulse.DriveChannel(2) pulse.MeasureChannel(2) pulse.AcquireChannel(2) pulse.ControlChannel(2) pulse.ControlChannel(3) pulse.ControlChannel(4) ] 3:[pulse.DriveChannel(3) pulse.MeasureChannel(3) pulse.AcquireChannel(3) pulse.ControlChannel(5) ] } )<block_end><def_stmt>test_channel_type_grouped_sort self<block_start>"""Test channel_type_grouped_sort."""<line_sep>out_layout=layouts.channel_type_grouped_sort(self.channels formatter=self.formatter device=self.device)<line_sep>ref_channels=[[pulse.DriveChannel(0)] [pulse.DriveChannel(1)] [pulse.DriveChannel(2)] [pulse.ControlChannel(0)] [pulse.ControlChannel(2)] [pulse.ControlChannel(5)] [pulse.MeasureChannel(1)] [pulse.MeasureChannel(2)] [pulse.AcquireChannel(1)] [pulse.AcquireChannel(2)] ]<line_sep>ref_names=["D0" "D1" "D2" "U0" "U2" "U5" "M1" "M2" "A1" "A2"]<line_sep>ref=list(zip(ref_names ref_channels))<line_sep>self.assertListEqual(list(out_layout) ref)<block_end><def_stmt>test_channel_index_sort self<block_start>"""Test channel_index_grouped_sort."""<line_sep>out_layout=layouts.channel_index_grouped_sort(self.channels formatter=self.formatter device=self.device)<line_sep>ref_channels=[[pulse.DriveChannel(0)] [pulse.ControlChannel(0)] [pulse.DriveChannel(1)] [pulse.MeasureChannel(1)] [pulse.AcquireChannel(1)] [pulse.DriveChannel(2)] [pulse.ControlChannel(2)] [pulse.MeasureChannel(2)] [pulse.AcquireChannel(2)] [pulse.ControlChannel(5)] ]<line_sep>ref_names=["D0" "U0" "D1" "M1" "A1" "D2" "U2" "M2" "A2" "U5"]<line_sep>ref=list(zip(ref_names ref_channels))<line_sep>self.assertListEqual(list(out_layout) ref)<block_end><def_stmt>test_channel_index_sort_grouped_control self<block_start>"""Test channel_index_grouped_sort_u."""<line_sep>out_layout=layouts.channel_index_grouped_sort_u(self.channels formatter=self.formatter device=self.device)<line_sep>ref_channels=[[pulse.DriveChannel(0)] [pulse.DriveChannel(1)] [pulse.MeasureChannel(1)] [pulse.AcquireChannel(1)] [pulse.DriveChannel(2)] [pulse.MeasureChannel(2)] [pulse.AcquireChannel(2)] [pulse.ControlChannel(0)] [pulse.ControlChannel(2)] [pulse.ControlChannel(5)] ]<line_sep>ref_names=["D0" "D1" "M1" "A1" "D2" "M2" "A2" "U0" "U2" "U5"]<line_sep>ref=list(zip(ref_names ref_channels))<line_sep>self.assertListEqual(list(out_layout) ref)<block_end><def_stmt>test_channel_qubit_index_sort self<block_start>"""Test qubit_index_sort."""<line_sep>out_layout=layouts.qubit_index_sort(self.channels formatter=self.formatter device=self.device)<line_sep>ref_channels=[[pulse.DriveChannel(0) pulse.ControlChannel(0)] [pulse.DriveChannel(1) pulse.MeasureChannel(1)] [pulse.DriveChannel(2) pulse.MeasureChannel(2) pulse.ControlChannel(2)] [pulse.ControlChannel(5)] ]<line_sep>ref_names=["Q0" "Q1" "Q2" "Q3"]<line_sep>ref=list(zip(ref_names ref_channels))<line_sep>self.assertListEqual(list(out_layout) ref)<block_end><block_end><class_stmt>TestHorizontalAxis(QiskitTestCase)<block_start>"""Tests for horizontal axis mapping functions."""<def_stmt>test_time_map_in_ns self<block_start>"""Test for time_map_in_ns."""<line_sep>time_window=(0 1000)<line_sep>breaks=[(100 200)]<line_sep>dt=1e-9<line_sep>haxis=layouts.time_map_in_ns(time_window=time_window axis_breaks=breaks dt=dt)<line_sep>self.assertListEqual(list(haxis.window) [0 900])<line_sep>self.assertListEqual(list(haxis.axis_break_pos) [100])<line_sep>ref_axis_map={0.0:"0" 180.0:"280" 360.0:"460" 540.0:"640" 720.0:"820" 900.0:"1000" }<line_sep>self.assertDictEqual(haxis.axis_map ref_axis_map)<line_sep>self.assertEqual(haxis.label "Time (ns)")<block_end><def_stmt>test_time_map_in_without_dt self<block_start>"""Test for time_map_in_ns when dt is not provided."""<line_sep>time_window=(0 1000)<line_sep>breaks=[(100 200)]<line_sep>dt=<none><line_sep>haxis=layouts.time_map_in_ns(time_window=time_window axis_breaks=breaks dt=dt)<line_sep>self.assertListEqual(list(haxis.window) [0 900])<line_sep>self.assertListEqual(list(haxis.axis_break_pos) [100])<line_sep>ref_axis_map={0.0:"0" 180.0:"280" 360.0:"460" 540.0:"640" 720.0:"820" 900.0:"1000" }<line_sep>self.assertDictEqual(haxis.axis_map ref_axis_map)<line_sep>self.assertEqual(haxis.label "System cycle time (dt)")<block_end><block_end><class_stmt>TestFigureTitle(QiskitTestCase)<block_start>"""Tests for figure title generation."""<def_stmt>setUp self<arrow><none><block_start>super().setUp()<line_sep>self.device=device_info.OpenPulseBackendInfo(name="test_backend" dt=1e-9)<line_sep>self.prog=pulse.Schedule(name="test_sched")<line_sep>self.prog.insert(0 pulse.Play(pulse.Constant(100 0.1) pulse.DriveChannel(0)) inplace=<true>)<block_end><def_stmt>detail_title self<block_start>"""Test detail_title layout function."""<line_sep>ref_title="Name: test_sched, Duration: 100.0 ns, Backend: test_backend"<line_sep>out=layouts.detail_title(self.prog self.device)<line_sep>self.assertEqual(out ref_title)<block_end><def_stmt>empty_title self<block_start>"""Test empty_title layout function."""<line_sep>ref_title=""<line_sep>out=layouts.detail_title(self.prog self.device)<line_sep>self.assertEqual(out ref_title)<block_end><block_end>
""" Fuzz tests an object after the default construction to make sure it does not crash lldb. """<import_stmt>lldb<def_stmt>fuzz_obj obj<block_start>obj.GetStartAddress()<line_sep>obj.GetEndAddress()<line_sep>obj.GetFileSpec()<line_sep>obj.GetLine()<line_sep>obj.GetColumn()<line_sep>obj.GetDescription(lldb.SBStream())<block_end>
<import_from_stmt>pydantic BaseModel validator<import_from_stmt>app.excpetions.ParamsException ParamsError<class_stmt>UserDto(BaseModel)<block_start>name:str<line_sep>password:str<line_sep>username:str<line_sep>email:str<line_sep>@validator('name' 'password' 'username' 'email')<def_stmt>field_not_empty cls v<block_start><if_stmt>isinstance(v str)<and>len(v.strip())<eq>0<block_start><raise>ParamsError("不能为空")<block_end><return>v<block_end><block_end><class_stmt>UserForm(BaseModel)<block_start>username:str<line_sep>password:str<line_sep>@validator('password' 'username')<def_stmt>name_not_empty cls v<block_start><if_stmt>isinstance(v str)<and>len(v.strip())<eq>0<block_start><raise>ParamsError("不能为空")<block_end><return>v<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>admin_interface.compat gettext_lazy<as>_<import_from_stmt>admin_interface.models Theme<import_from_stmt>django.contrib admin<class_stmt>ThemeAdmin(admin.ModelAdmin)<block_start>list_display=('name' 'active' )<line_sep>list_editable=('active' )<line_sep>list_per_page=100<line_sep>show_full_result_count=<false><line_sep>fieldsets=((<none> {'classes':('wide' ) 'fields':('name' 'active' )}) (_('Environment') {'classes':('wide' ) 'fields':('env_name' 'env_color' 'env_visible_in_header' 'env_visible_in_favicon' )}) (_('Language chooser') {'classes':('wide' ) 'fields':('language_chooser_active' 'language_chooser_display' )}) (_('Logo') {'classes':('wide' ) 'fields':('logo' 'logo_max_width' 'logo_max_height' 'logo_color' 'logo_visible' )}) (_('Favicon') {'classes':('wide' ) 'fields':('favicon' )}) (_('Title') {'classes':('wide' ) 'fields':('title' 'title_color' 'title_visible' )}) (_('Header') {'classes':('wide' ) 'fields':('css_header_background_color' 'css_header_text_color' 'css_header_link_color' 'css_header_link_hover_color' )}) (_('Breadcrumbs / Module headers') {'classes':('wide' ) 'fields':('css_module_background_color' 'css_module_background_selected_color' 'css_module_text_color' 'css_module_link_color' 'css_module_link_selected_color' 'css_module_link_hover_color' 'css_module_rounded_corners' )}) (_('Generic Links') {'classes':('wide' ) 'fields':('css_generic_link_color' 'css_generic_link_hover_color' )}) (_('Save Buttons') {'classes':('wide' ) 'fields':('css_save_button_background_color' 'css_save_button_background_hover_color' 'css_save_button_text_color' )}) (_('Delete Buttons') {'classes':('wide' ) 'fields':('css_delete_button_background_color' 'css_delete_button_background_hover_color' 'css_delete_button_text_color' )}) (_('Related Modal') {'classes':('wide' ) 'fields':('related_modal_active' 'related_modal_background_color' 'related_modal_background_opacity' 'related_modal_rounded_corners' 'related_modal_close_button_visible' )}) (_('Form Controls') {'classes':('wide' ) 'fields':('form_submit_sticky' 'form_pagination_sticky' )}) (_('List Filter') {'classes':('wide' ) 'fields':('list_filter_dropdown' 'list_filter_sticky' )}) (_('Recent Actions') {'classes':('wide' ) 'fields':('recent_actions_visible' )}) )<line_sep>save_on_top=<true><block_end>admin.site.register(Theme ThemeAdmin)<line_sep>
""" Test stop hook functionality """<import_stmt>lldb<import_stmt>lldbsuite.test.lldbutil<as>lldbutil<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test.decorators *<class_stmt>TestStopHooks(TestBase)<block_start>mydir=TestBase.compute_mydir(__file__)<line_sep># If your test case doesn't stress debug info, the # set this to true. That way it won't be run once for # each debug info format. NO_DEBUG_INFO_TESTCASE=<true><def_stmt>setUp self<block_start>TestBase.setUp(self)<line_sep>self.build()<line_sep>self.main_source_file=lldb.SBFileSpec("main.c")<line_sep>full_path=os.path.join(self.getSourceDir() "main.c")<line_sep>self.main_start_line=line_number(full_path "main()")<block_end><def_stmt>test_bad_handler self<block_start>"""Test that we give a good error message when the handler is bad"""<line_sep>self.script_setup()<line_sep>result=lldb.SBCommandReturnObject()<line_sep># First try the wrong number of args handler: command="target stop-hook add -P stop_hook.bad_handle_stop"<line_sep>self.interp.HandleCommand(command result)<line_sep>self.assertFalse(result.Succeeded() "Set the target stop hook")<line_sep>self.assertIn("Wrong number of args" result.GetError() "Got the wrong number of args error")<line_sep># Next the no handler at all handler: command="target stop-hook add -P stop_hook.no_handle_stop"<line_sep>self.interp.HandleCommand(command result)<line_sep>self.assertFalse(result.Succeeded() "Set the target stop hook")<line_sep>self.assertIn('Class "stop_hook.no_handle_stop" is missing the required handle_stop callback' result.GetError() "Got the right error")<block_end><def_stmt>test_stop_hooks_scripted self<block_start>"""Test that a scripted stop hook works with no specifiers"""<line_sep>self.stop_hooks_scripted(5)<block_end><def_stmt>test_stop_hooks_scripted_right_func self<block_start>"""Test that a scripted stop hook fires when there is a function match"""<line_sep>self.stop_hooks_scripted(5 "-n step_out_of_me")<block_end><def_stmt>test_stop_hooks_scripted_wrong_func self<block_start>"""Test that a scripted stop hook doesn't fire when the function does not match"""<line_sep>self.stop_hooks_scripted(0 "-n main")<block_end><def_stmt>test_stop_hooks_scripted_right_lines self<block_start>"""Test that a scripted stop hook fires when there is a function match"""<line_sep>self.stop_hooks_scripted(5 "-f main.c -l 1 -e %d"%(self.main_start_line))<block_end><def_stmt>test_stop_hooks_scripted_wrong_lines self<block_start>"""Test that a scripted stop hook doesn't fire when the function does not match"""<line_sep>self.stop_hooks_scripted(0 "-f main.c -l %d -e 100"%(self.main_start_line))<block_end><def_stmt>test_stop_hooks_scripted_auto_continue self<block_start>"""Test that the --auto-continue flag works"""<line_sep>self.do_test_auto_continue(<false>)<block_end><def_stmt>test_stop_hooks_scripted_return_false self<block_start>"""Test that the returning False from a stop hook works"""<line_sep>self.do_test_auto_continue(<true>)<block_end><def_stmt>do_test_auto_continue self return_true<block_start>"""Test that auto-continue works."""<line_sep># We set auto-continue to 1 but the stop hook only applies to step_out_of_me, # so we should end up stopped in main, having run the expression only once. self.script_setup()<line_sep>result=lldb.SBCommandReturnObject()<if_stmt>return_true<block_start>command="target stop-hook add -P stop_hook.stop_handler -k increment -v 5 -k return_false -v 1 -n step_out_of_me"<block_end><else_stmt><block_start>command="target stop-hook add -G 1 -P stop_hook.stop_handler -k increment -v 5 -n step_out_of_me"<block_end>self.interp.HandleCommand(command result)<line_sep>self.assertTrue(result.Succeeded "Set the target stop hook")<line_sep># First run to main. If we go straight to the first stop hook hit, # run_to_source_breakpoint will fail because we aren't at original breakpoint (target process thread bkpt)=lldbutil.run_to_source_breakpoint(self "Stop here first" self.main_source_file)<line_sep># Now set the breakpoint on step_out_of_me, and make sure we run the # expression, then continue back to main. bkpt=target.BreakpointCreateBySourceRegex("Set a breakpoint here and step out" self.main_source_file)<line_sep>self.assertNotEqual(bkpt.GetNumLocations() 0 "Got breakpoints in step_out_of_me")<line_sep>process.Continue()<line_sep>var=target.FindFirstGlobalVariable("g_var")<line_sep>self.assertTrue(var.IsValid())<line_sep>self.assertEqual(var.GetValueAsUnsigned() 6 "Updated g_var")<line_sep>func_name=process.GetSelectedThread().frames[0].GetFunctionName()<line_sep>self.assertEqual("main" func_name "Didn't stop at the expected function.")<block_end><def_stmt>script_setup self<block_start>self.interp=self.dbg.GetCommandInterpreter()<line_sep>result=lldb.SBCommandReturnObject()<line_sep># Bring in our script file: script_name=os.path.join(self.getSourceDir() "stop_hook.py")<line_sep>command="command script import "+script_name<line_sep>self.interp.HandleCommand(command result)<line_sep>self.assertTrue(result.Succeeded() "com scr imp failed: %s"%(result.GetError()))<line_sep># set a breakpoint at the end of main to catch our auto-continue tests. # Do it in the dummy target so it will get copied to our target even when # we don't have a chance to stop. dummy_target=self.dbg.GetDummyTarget()<line_sep>dummy_target.BreakpointCreateBySourceRegex("return result" self.main_source_file)<block_end><def_stmt>stop_hooks_scripted self g_var_value specifier=<none><block_start>self.script_setup()<line_sep>result=lldb.SBCommandReturnObject()<line_sep>command="target stop-hook add -P stop_hook.stop_handler -k increment -v 5 "<if_stmt>specifier<block_start>command<augadd>specifier<block_end>self.interp.HandleCommand(command result)<line_sep>self.assertTrue(result.Succeeded "Set the target stop hook")<line_sep>(target process thread bkpt)=lldbutil.run_to_source_breakpoint(self "Set a breakpoint here" self.main_source_file)<line_sep># At this point we've hit our stop hook so we should have run our expression, # which increments g_var by the amount specified by the increment key's value. <while_stmt>process.GetState()<eq>lldb.eStateRunning<block_start><continue><block_end>var=target.FindFirstGlobalVariable("g_var")<line_sep>self.assertTrue(var.IsValid())<line_sep>self.assertEqual(var.GetValueAsUnsigned() g_var_value "Updated g_var")<block_end><block_end>
<import_from_stmt>mido.messages.specs SPEC_BY_STATUS<import_from_stmt>mido.messages.encode encode_message<import_from_stmt>mido.messages.decode decode_message<def_stmt>test_encode_decode_all <block_start>"""Encode and then decode all messages on all channels. Each data byte is different so that the test will fail if the bytes are swapped during encoding or decoding. """<line_sep>data_bytes=[1 2 3]<for_stmt>status_byte,spec SPEC_BY_STATUS.items()<block_start><if_stmt>status_byte<eq>0xf0<block_start>msg_bytes=[0xf0]+data_bytes+[0xf7]<block_end><else_stmt><block_start>msg_bytes=[status_byte]+data_bytes[:spec['length']-1]<block_end><assert_stmt>encode_message(decode_message(msg_bytes))<eq>msg_bytes<block_end><block_end>
<import_stmt>sqlite3<import_from_stmt>seldom.db_operation.base_db SQLBase<class_stmt>SQLiteDB(SQLBase)<block_start><def_stmt>__init__ self db_path<block_start>""" Connect to the sqlite database """<line_sep>self.connection=sqlite3.connect(db_path)<line_sep>self.cursor=self.connection.cursor()<block_end><def_stmt>close self<block_start>""" Close the database connection """<line_sep>self.connection.close()<block_end><def_stmt>execute_sql self sql<block_start>""" Execute SQL """<line_sep>self.cursor.execute(sql)<line_sep>self.connection.commit()<block_end><def_stmt>insert_data self table data<block_start>""" insert sql statement """<for_stmt>key data<block_start>data[key]="'"+str(data[key])+"'"<block_end>key=','.join(data.keys())<line_sep>value=','.join(data.values())<line_sep>sql="""INSERT INTO {t} ({k}) VALUES ({v})""".format(t=table k=key v=value)<line_sep>self.execute_sql(sql)<block_end><def_stmt>query_sql self sql<block_start>""" Query SQL return: query data """<line_sep>data_list=[]<line_sep>rows=self.cursor.execute(sql)<for_stmt>row rows<block_start>data_list.append(row)<block_end><return>data_list<block_end><def_stmt>select_data self table where=<none><block_start>""" select sql statement """<line_sep>sql="""select * from {} """.format(table)<if_stmt>where<is><not><none><block_start>sql<augadd>'where {};'.format(self.dict_to_str_and(where))<block_end><return>self.query_sql(sql)<block_end><def_stmt>update_data self table data where<block_start>""" update sql statement """<line_sep>sql="""update {} set """.format(table)<line_sep>sql<augadd>self.dict_to_str(data)<if_stmt>where<block_start>sql<augadd>' where {};'.format(self.dict_to_str_and(where))<block_end>self.execute_sql(sql)<block_end><def_stmt>delete_data self table where=<none><block_start>""" delete table data """<line_sep>sql="""delete from {}""".format(table)<if_stmt>where<is><not><none><block_start>sql<augadd>' where {};'.format(self.dict_to_str_and(where))<block_end>self.execute_sql(sql)<block_end><def_stmt>init_table self table_data<block_start>""" init table data """<for_stmt>table,data_list table_data.items()<block_start>self.delete_data(table)<for_stmt>data data_list<block_start>self.insert_data(table data)<block_end><block_end>self.close()<block_end><block_end>
<import_stmt>pytest<import_from_stmt>sqlalchemy Column Integer MetaData Table <import_from_stmt>sqlalchemy.sql column text <import_from_stmt>galaxy.model.database_utils create_database sqlalchemy_engine <import_from_stmt>galaxy.model.view.utils CreateView View <import_from_stmt>.common drop_database replace_database_in_url skip_if_not_mysql_uri skip_if_not_postgres_uri <line_sep>@pytest.fixture<def_stmt>view # A View class we would add to galaxy.model.view <block_start><class_stmt>TestView(View)<block_start>name='testview'<line_sep>__view__=text('SELECT id, foo FROM testfoo').columns(column('id' Integer) column('foo' Integer))<line_sep>pkeys={'id'}<line_sep>View._make_table(name __view__ pkeys)<block_end><return>TestView<block_end>@skip_if_not_postgres_uri<def_stmt>test_postgres_create_view database_name postgres_url view<block_start>metadata=MetaData()<line_sep>make_table(metadata)# table from which the view will select url=replace_database_in_url(postgres_url database_name)<line_sep>query=f"SELECT 1 FROM information_schema.views WHERE table_name = '{view.name}'"<line_sep>create_database(postgres_url database_name)<line_sep>run_view_test(url metadata view query)<line_sep>drop_database(postgres_url database_name)<block_end><def_stmt>test_sqlite_create_view sqlite_memory_url view<block_start>metadata=MetaData()<line_sep>make_table(metadata)# table from which the view will select url=sqlite_memory_url<line_sep>query=f"SELECT 1 FROM sqlite_master WHERE type='view' AND name='{view.name}'"<line_sep>run_view_test(url metadata view query)<block_end>@skip_if_not_mysql_uri<def_stmt>test_mysql_create_view database_name mysql_url view<block_start>metadata=MetaData()<line_sep>make_table(metadata)# table from which the view will select url=replace_database_in_url(mysql_url database_name)<line_sep>query=f"SELECT 1 FROM information_schema.views WHERE table_name = '{view.name}'"<line_sep>create_database(mysql_url database_name)<line_sep>run_view_test(url metadata view query)<line_sep>drop_database(mysql_url database_name)<block_end><def_stmt>make_table metadata<block_start>users=Table('testfoo' metadata Column('id' Integer primary_key=<true>) Column('foo' Integer) Column('bar' Integer))<line_sep><return>users<block_end><def_stmt>run_view_test url metadata view query<block_start><with_stmt>sqlalchemy_engine(url)<as>engine<block_start><with_stmt>engine.connect()<as>conn<block_start>metadata.create_all(conn)# create table in database conn.execute(CreateView(view.name view.__view__))# create view in database result=conn.execute(query).fetchall()<assert_stmt>len(result)<eq>1<block_end><block_end><block_end># assert that view exists in database
<import_stmt>time<import_stmt>unittest<import_from_stmt>sleekxmpp.test SleekTest<class_stmt>TestStreamChatStates(SleekTest)<block_start><def_stmt>tearDown self<block_start>self.stream_close()<block_end><def_stmt>testChatStates self<block_start>self.stream_start(mode='client' plugins=['xep_0030' 'xep_0085'])<line_sep>results=[]<def_stmt>handle_state msg<block_start>results.append(msg['chat_state'])<block_end>self.xmpp.add_event_handler('chatstate_active' handle_state)<line_sep>self.xmpp.add_event_handler('chatstate_inactive' handle_state)<line_sep>self.xmpp.add_event_handler('chatstate_paused' handle_state)<line_sep>self.xmpp.add_event_handler('chatstate_gone' handle_state)<line_sep>self.xmpp.add_event_handler('chatstate_composing' handle_state)<line_sep>self.recv(""" <message> <active xmlns="http://jabber.org/protocol/chatstates" /> </message> """)<line_sep>self.recv(""" <message> <inactive xmlns="http://jabber.org/protocol/chatstates" /> </message> """)<line_sep>self.recv(""" <message> <paused xmlns="http://jabber.org/protocol/chatstates" /> </message> """)<line_sep>self.recv(""" <message> <composing xmlns="http://jabber.org/protocol/chatstates" /> </message> """)<line_sep>self.recv(""" <message> <gone xmlns="http://jabber.org/protocol/chatstates" /> </message> """)<line_sep># Give event queue time to process time.sleep(0.3)<line_sep>expected=['active' 'inactive' 'paused' 'composing' 'gone']<line_sep>self.failUnless(results<eq>expected "Chat state event not handled: %s"%results)<block_end><block_end>suite=unittest.TestLoader().loadTestsFromTestCase(TestStreamChatStates)<line_sep>
# # Unit tests for myparser.py # <import_stmt>myparser<import_stmt>unittest<class_stmt>TestMyParser(unittest.TestCase)<block_start><def_stmt>test_emails self<block_start>word='domain.com'<line_sep>results='@domain.com***a@domain***banotherdomain.com***<EMAIL>***<EMAIL>***'<line_sep>p=myparser.parser(results word)<line_sep>emails=sorted(p.emails())<line_sep>self.assertEquals(emails ['<EMAIL>' '<EMAIL>'])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>