content
stringlengths
0
1.55M
<import_from_stmt>.explanation Explanation<import_from_stmt>.parsing Rule Grammar Parse SemanticParser<import_from_stmt>.filter_bank FilterBank<import_from_stmt>.utils ExplanationIO link_explanation_candidates<import_from_stmt>.babbler Babbler BabbleStream<line_sep>
# # This file is part of Bakefile (http://bakefile.org) # # Copyright (C) 2008-2013 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # """ This module contains the very core of Bakefile -- the interpreter, :class:`bkl.interpreter.Interpreter`, and its supporting classes. """<import_stmt>logging<import_stmt>bkl.parser<import_stmt>bkl.model<import_stmt>bkl.api<import_stmt>bkl.expr<import_stmt>passes<import_from_stmt>builder Builder<import_from_stmt>bkl.error Error warning<import_from_stmt>bkl.parser parse_file<line_sep>logger=logging.getLogger("bkl.interpreter")<class_stmt>Interpreter(object)<block_start>""" The interpreter is responsible for doing everything necessary to "translate" input ``.bkl`` files into generated native makefiles. This includes building a project model from the input, checking it for correctness, optimizing it and creating outputs for all enabled toolsets. :class:`Interpreter` provides both high-level interface for single-call usage (see :meth:`process`) and other methods with finer granularity that allows you to inspect individual steps (most useful for the test suite). .. attribute:: model Model of the project, as :class:`bkl.model.Project`. It's state always reflects current state of processing. .. attribute:: toolsets_to_use Set of toolsets to generate for. This list may contain only a subset of toolsets the bakefile is written for and may even contain toolsets not specified in the bakefile. If :const:`None` (the default), then the toolsets listed in the bakefile are used. """<def_stmt>__init__ self<block_start>self.model=bkl.model.Project()<line_sep>self.toolsets_to_use=<none><block_end><def_stmt>limit_toolsets self toolsets<block_start>"""Sets :attr:`toolsets_to_use`."""<line_sep>self.toolsets_to_use=set(toolsets)<block_end><def_stmt>process self ast<block_start>""" Interprets input file and generates the outputs. :param ast: AST of the input file, as returned by :func:`bkl.parser.parse_file`. Processing is done in several phases: 1. Basic model is built (see :class:`bkl.interpreter.builder.Builder`). No optimizations or checks are performed at this point. 2. Several generic optimization and checking passes are run on the model. Among other things, types correctness and other constraints are checked, variables are substituted and evaluated. 3. The model is split into several copies, one per output toolset. 4. Further optimization passes are done. 5. Output files are generated. Step 1 is done by :meth:`add_module`. Steps 2-4 are done by :meth:`finalize` and step 5 is implemented in :meth:`generate`. """<line_sep>self.add_module(ast self.model)<line_sep>self.finalize()<line_sep>self.generate()<block_end><def_stmt>process_file self filename<block_start>"""Like :meth:`process()`, but takes filename as its argument."""<line_sep>self.process(parse_file(filename))<block_end><def_stmt>add_module self ast parent<block_start>""" Adds parsed AST to the model, without doing any optimizations. May be called more than once, with different parsed files. :param ast: AST of the input file, as returned by :func:`bkl.parser.parse_file`. """<line_sep>logger.info("processing %s" ast.filename)<line_sep>submodules=[]<line_sep>b=Builder(on_submodule=<lambda>fn pos:submodules.append((fn pos)))<line_sep>module=b.create_model(ast parent)<while_stmt>submodules<block_start>sub_filename,sub_pos=submodules[0]<line_sep>submodules.pop(0)<try_stmt><block_start>sub_ast=parse_file(sub_filename)<block_end><except_stmt>IOError<as>e<block_start><if_stmt>e.filename<block_start>msg="%s: %s"%(e.strerror e.filename)<block_end><else_stmt><block_start>msg=e.strerror<block_end><raise>Error(msg pos=sub_pos)<block_end>self.add_module(sub_ast module)<block_end><block_end><def_stmt>_call_custom_steps self model func<block_start><for_stmt>step bkl.api.CustomStep.all()<block_start>logger.debug("invoking custom step %s.%s()" step.name func)<line_sep>getattr(step func)(model)<block_end><block_end><def_stmt>finalize self<block_start>""" Finalizes the model, i.e. checks it for validity, optimizes, creates per-toolset models etc. """<line_sep>logger.debug("finalizing the model")<line_sep># call any custom steps first: self._call_custom_steps(self.model "finalize")<line_sep># then apply standard processing: passes.detect_potential_problems(self.model)<line_sep>passes.normalize_and_validate_bool_subexpressions(self.model)<line_sep>passes.normalize_vars(self.model)<line_sep>passes.validate_vars(self.model)<line_sep>passes.normalize_paths_in_model(self.model toolset=<none>)<line_sep>passes.simplify_exprs(self.model)<block_end><def_stmt>finalize_for_toolset self toolset_model toolset<block_start>""" Finalizes after "toolset" variable was set. """<line_sep>passes.remove_disabled_model_parts(toolset_model toolset)<line_sep># TODO: do this in finalize() instead passes.make_variables_for_missing_props(toolset_model toolset)<line_sep>passes.eliminate_superfluous_conditionals(toolset_model)<line_sep># This is done second time here (in addition to finalize()) to deal # with paths added by make_variables_for_missing_props() and paths with # @builddir (which is toolset specific and couldn't be resolved # earlier). Ideally we wouldn't do it, but hopefully it's not all that # inefficient, as no real work is done for paths that are already # normalized: passes.normalize_paths_in_model(toolset_model toolset)<block_end><def_stmt>make_toolset_specific_model self toolset skip_making_copy=<false><block_start>""" Returns toolset-specific model, i.e. one that works only with *toolset*, has the ``toolset`` property set to it. The caller still needs to call finalize_for_toolset() on it. """<if_stmt>skip_making_copy<block_start>model=self.model<block_end><else_stmt><block_start>model=self.model.clone()<block_end># don't use Variable.from_property(), because it's read-only model.add_variable(bkl.model.Variable.from_property(model.get_prop("toolset") bkl.expr.LiteralExpr(toolset)))<line_sep><return>model<block_end><def_stmt>generate self<block_start>""" Generates output files. """<line_sep># collect all requested toolsets: toolsets=set()<for_stmt>module self.model.modules<block_start>module_toolsets=module.get_variable("toolsets")<if_stmt>module_toolsets<block_start>toolsets.update(module_toolsets.value.as_py())<block_end><block_end><if_stmt>self.toolsets_to_use<block_start><for_stmt>t self.toolsets_to_use<block_start><if_stmt>t<not><in>toolsets<block_start><try_stmt><block_start>bkl.api.Toolset.get(t)<block_end><except_stmt>KeyError<block_start><raise>Error("unknown toolset \"%s\" given on command line"%t)<block_end>warning("toolset \"%s\" is not supported by the project, there may be issues" t)<line_sep># Add the forced toolset to all submodules: <for_stmt>module self.model.modules<block_start>module_toolsets=module.get_variable("toolsets")<if_stmt>module_toolsets<block_start>module_toolsets.value.items.append(bkl.expr.LiteralExpr(t))<block_end><block_end><block_end><block_end>toolsets=self.toolsets_to_use<block_end>toolsets=list(toolsets)<line_sep>logger.debug("toolsets to generate for: %s" toolsets)<if_stmt><not>toolsets<block_start><raise>Error("nothing to generate, \"toolsets\" property is empty")<block_end># call any custom steps first: self._call_custom_steps(self.model "generate")<line_sep># and generate the outputs (notice that we can avoid making a # (expensive!) deepcopy of the model for one of the toolsets and can # reuse the current model): <for_stmt>toolset toolsets[:-1]<block_start>self.generate_for_toolset(toolset)<block_end>self.generate_for_toolset(toolsets[-1] skip_making_copy=<true>)<block_end><def_stmt>generate_for_toolset self toolset skip_making_copy=<false><block_start>""" Generates output for given *toolset*. """<line_sep>logger.debug("****** preparing model for toolset %s ******" toolset)<line_sep>model=self.make_toolset_specific_model(toolset skip_making_copy)<line_sep>self.finalize_for_toolset(model toolset)<line_sep>logger.debug("****** generating for toolset %s ********" toolset)<line_sep>bkl.api.Toolset.get(toolset).generate(model)<block_end><block_end>
REST_FRAMEWORK__DEFAULT_AUTHENTICATION_CLASSES=("rest_framework.authentication.SessionAuthentication" "pulpcore.app.authentication.PulpRemoteUserAuthentication" "foo_bar1" )<line_sep>
<import_stmt>unittest<import_from_stmt>pyatlas identifier_converters<class_stmt>IdentifierConvertersTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start><pass><block_end><def_stmt>test_osm_conversion self<block_start>atlas_id=222222000000<line_sep>osm_id=222222<line_sep>self.assertEqual(osm_id identifier_converters.get_osm_identifier(atlas_id))<line_sep>atlas_id=123001002<line_sep>osm_id=123<line_sep>self.assertEqual(osm_id identifier_converters.get_osm_identifier(atlas_id))<line_sep>atlas_id=3101220<line_sep>osm_id=3<line_sep>self.assertEqual(osm_id identifier_converters.get_osm_identifier(atlas_id))<line_sep>atlas_id=-222222000001<line_sep>osm_id=222222<line_sep>self.assertEqual(osm_id identifier_converters.get_osm_identifier(atlas_id))<block_end><def_stmt>test_country_code_conversion self<block_start>atlas_id=222222000000<line_sep>country_code=0<line_sep>self.assertEqual(country_code identifier_converters.get_country_code(atlas_id))<line_sep>atlas_id=123001002<line_sep>country_code=1<line_sep>self.assertEqual(country_code identifier_converters.get_country_code(atlas_id))<line_sep>atlas_id=3101220<line_sep>country_code=101<line_sep>self.assertEqual(country_code identifier_converters.get_country_code(atlas_id))<line_sep>atlas_id=-222222002001<line_sep>country_code=2<line_sep>self.assertEqual(country_code identifier_converters.get_country_code(atlas_id))<block_end><def_stmt>test_way_section_conversion self<block_start>atlas_id=222222000000<line_sep>way_section=0<line_sep>self.assertEqual(way_section identifier_converters.get_way_section_index(atlas_id))<line_sep>atlas_id=123001002<line_sep>way_section=2<line_sep>self.assertEqual(way_section identifier_converters.get_way_section_index(atlas_id))<line_sep>atlas_id=3101220<line_sep>way_section=220<line_sep>self.assertEqual(way_section identifier_converters.get_way_section_index(atlas_id))<line_sep>atlas_id=-222222002001<line_sep>way_section=1<line_sep>self.assertEqual(way_section identifier_converters.get_way_section_index(atlas_id))<block_end><block_end>
# from widcardw <import_from_stmt>manimlib.imports *<class_stmt>Test6(Scene)<block_start>CONFIG={"camera_config":{"background_color":"#ffffff"}}<def_stmt>construct self<block_start>circle0=Circle(radius=1.5 stroke_color="#559944" plot_depth=-2)<line_sep>doto=Dot(ORIGIN color="#000000")<line_sep>texto=TexMobject("O" color="#000000" background_stroke_color="#ffffff" background_stroke_width=6).next_to(doto RIGHT+DOWN buff=SMALL_BUFF)<line_sep>self.play(ShowCreation(circle0))<line_sep>self.play(Write(doto) Write(texto))<line_sep>dota=Dot(np.array([3.2 0 0]) color="#000000" plot_depth=1)<line_sep>texta=TexMobject("A" color="#000000").next_to(dota RIGHT+DOWN buff=SMALL_BUFF)<line_sep>self.play(Write(dota) Write(texta))<line_sep>t=ValueTracker(2)<line_sep>dotb=Dot(color="#bb3333" plot_depth=1).add_updater(<lambda>b:b.move_to(np.array([1.5<times>np.cos(t.get_value()) 1.5<times>np.sin(t.get_value()) 0])))<line_sep>textb=TexMobject("B" color="#000000" background_stroke_color="#ffffff" background_stroke_width=6).add_updater(<lambda>b:b.next_to(dotb UP+LEFT buff=SMALL_BUFF))<line_sep>self.play(Write(dotb) Write(textb))<line_sep>self.wait(0.2)<line_sep>l_ab=DashedLine(color="#bb7755" stroke_width=1.5 plot_depth=0).add_updater(<lambda>l:l.put_start_and_end_on(dota.get_center() dotb.get_center()))<line_sep>self.play(ShowCreation(l_ab))<line_sep>self.wait(0.2)<line_sep>self.play(t.increment_value 1 rate_func=smooth)<line_sep>self.play(t.increment_value -3 rate_func=smooth)<line_sep>l_b=Line(LEFT RIGHT).add_updater(<lambda>l:l.become(Line(color="#55aaee" plot_depth=0).rotate(l_ab.get_angle()+PI/2 about_point=l_ab.get_start()).move_to(l_ab.get_end()).scale(20)))<line_sep>dotc=Dot(stroke_opacity=0 fill_opacity=0).add_updater(<lambda>d:d.move_to(l_b.get_start()))<line_sep>self.play(ShowCreation(l_b))<line_sep>self.add(dotc)<line_sep>anglea=Angle(dota dotb dotc).add_updater(<lambda>a:a.become(Angle(dota dotb dotc color="#E65A4C")))<line_sep>self.play(ShowCreation(anglea))<for_stmt>i range(50)<block_start>self.play(t.increment_value TAU/50 rate_func=linear run_time=0.12)<line_sep>l_b.clear_updaters()<line_sep>l_b.plot_depth=-1<line_sep>l_bc=l_b.copy().set_stroke(width=1.5 color="#00aaff")<line_sep>self.add(l_bc)<line_sep>l_b.add_updater(<lambda>l:l.become(Line(color="#55aaee" plot_depth=0).rotate(l_ab.get_angle()+PI/2 about_point=l_ab.get_start()).move_to(l_ab.get_end()).scale(20)))<line_sep>self.add(l_b)<block_end>anglea.clear_updaters()<line_sep>l_b.clear_updaters()<line_sep>self.play(FadeOut(anglea) FadeOut(l_b))<line_sep>self.wait(3)<block_end><block_end><class_stmt>Test7(Scene)<block_start>CONFIG={"camera_config":{"background_color":"#ffffff"}}<def_stmt>construct self<block_start>t=ValueTracker(0)<line_sep>doto=Dot(DOWN<times>0.6 color="#000000" background_stroke_color="#ffffff" background_stroke_width=3 plot_depth=2).scale(0.5)<line_sep>dotp=Dot(np.array([0 -2.7 0]) color="#000000" background_stroke_color="#ffffff" background_stroke_width=3 plot_depth=2).scale(0.5)<line_sep>dota=Dot(color="#000000" background_stroke_color="#ffffff" background_stroke_width=3 plot_depth=2).scale(0.5).add_updater(<lambda>d:d.move_to(np.array([doto.get_center()[0]+np.cos(t.get_value()) doto.get_center()[1]+np.sin(t.get_value()) 0])))<line_sep>cira=Circle().add_updater(<lambda>c:c.become(Circle(radius=get_line_long(dotp.get_center() dota.get_center()) color="#559944").move_to(dota.get_center())))<line_sep>texto=TexMobject("O" color="#000000" background_stroke_color="#ffffff" background_stroke_width=6).scale(0.7).next_to(doto DOWN+RIGHT buff=SMALL_BUFF)<line_sep>textp=TexMobject("P" color="#000000" background_stroke_color="#ffffff" background_stroke_width=6).scale(0.7).next_to(dotp DOWN+LEFT buff=SMALL_BUFF)<line_sep>texta=TexMobject("A" color="#000000" background_stroke_color="#ffffff" background_stroke_width=6).scale(0.7).add_updater(<lambda>a:a.next_to(dota DOWN+LEFT buff=SMALL_BUFF))<line_sep>ciro=Circle(radius=1 color="#bb7755").move_to(doto.get_center())<line_sep>dotpc=Dot(color="#000000").scale(0.5).move_to(dotp.get_center())<line_sep>l_pa=DashedLine(color="#55bb33" stroke_width=1.5).add_updater(<lambda>l:l.put_start_and_end_on(dota.get_center() dotpc.get_center()))<line_sep>self.play(ShowCreation(ciro) Write(doto) Write(texto))<line_sep>self.play(Write(dotp) Write(textp))<line_sep>self.wait(0.3)<line_sep>self.play(Write(dota) Write(texta))<line_sep>self.add(dotpc)<line_sep>self.play(ShowCreation(l_pa))<line_sep>path=TracedPath(dotpc.get_center stroke_color="#559944" stroke_width=3)<line_sep>self.add(path)<line_sep>self.play(Rotating(dotpc about_point=dota.get_center()) run_time=1.8 rate_func=smooth)<line_sep># self.play(ShowCreation(cira)) l_pa.clear_updaters()<line_sep>self.remove(dotpc path)<line_sep>self.play(FadeOut(l_pa) FadeIn(cira))<line_sep>self.play(t.increment_value -PI/2)<line_sep>self.wait(0.3)<for_stmt>i range(40)<block_start>self.play(t.increment_value TAU/40 rate_func=linear run_time=0.2)<line_sep>cira.clear_updaters()<line_sep>ciracpy=cira.copy().set_color("#9944bb").set_stroke(width=1.5)<line_sep>self.add(ciracpy)<line_sep>cira.add_updater(<lambda>c:c.become(Circle(radius=get_line_long(dotp.get_center() dota.get_center()) color="#559944").move_to(dota.get_center())))<line_sep>self.add(cira)<block_end>#attention: get_line_long is defined by Shy_Vector #if it does not work, you can turn to "get_norm(...)" cira.clear_updaters()<line_sep>self.play(FadeOut(cira))<line_sep>self.wait(2.5)<block_end><block_end><class_stmt>Test8(Scene)<block_start>CONFIG={"camera_config":{"background_color":"#ffffff"}}<def_stmt>construct self<block_start>doto=Dot(color="#000000" background_stroke_color="#ffffff" background_stroke_width=3 plot_depth=2).scale(0.7)<line_sep>dota=Dot(LEFT<times>1.8 color="#000000" background_stroke_color="#ffffff" background_stroke_width=3 plot_depth=2).scale(0.7)<line_sep>dotb=Dot(RIGHT<times>1.8 color="#000000" background_stroke_color="#ffffff" background_stroke_width=3 plot_depth=2).scale(0.7)<line_sep>texto=TexMobject("O" color="#000000" background_stroke_color="#ffffff" background_stroke_width=6 plot_depth=2).scale(0.7).next_to(doto RIGHT+DOWN buff=SMALL_BUFF)<line_sep>texta=TexMobject("A" color="#000000" background_stroke_color="#ffffff" background_stroke_width=6 plot_depth=2).scale(0.7).next_to(dota LEFT buff=SMALL_BUFF)<line_sep>textb=TexMobject("B" color="#000000" background_stroke_color="#ffffff" background_stroke_width=6 plot_depth=2).scale(0.7).next_to(dotb RIGHT buff=SMALL_BUFF)<line_sep>ciro=Circle(radius=1.8 color="#559944")<line_sep>l_ab=Line(LEFT<times>1.8 RIGHT<times>1.8 color="#4488dd")<line_sep>self.play(ShowCreation(ciro) Write(doto) Write(texto))<line_sep>self.play(ShowCreation(l_ab) *[Write(obj)<for>obj [dota dotb texta textb]])<line_sep>self.wait(0.3)<line_sep>t=ValueTracker(1)<line_sep>dotp=Dot(color="#000000" background_stroke_color="#ffffff" background_stroke_width=3 plot_depth=2).scale(0.7).add_updater(<lambda>d:d.move_to(np.array([1.8<times>np.cos(t.get_value()) 1.8<times>np.sin(t.get_value()) 0])))<line_sep>textp=TexMobject("P" color="#000000" background_stroke_color="#ffffff" background_stroke_width=6 plot_depth=2).scale(0.7).add_updater(<lambda>p:p.next_to(dotp UP+RIGHT buff=SMALL_BUFF))<line_sep>self.play(Write(dotp) Write(textp))<line_sep>self.wait(0.2)<line_sep>cirp=Circle(radius=2).add_updater(<lambda>c:c.become(Circle(radius=abs(dotp.get_center()[1]) color="#dd7766").move_to(dotp.get_center())))<line_sep>self.play(ShowCreation(cirp))<line_sep>self.play(t.increment_value 1)<line_sep>self.play(t.increment_value -2)<line_sep>self.wait(0.2)<for_stmt>i range(40)<block_start>self.play(t.increment_value TAU/40 rate_func=linear run_time=0.2)<line_sep>cirp.clear_updaters()<line_sep>cirpc=cirp.copy().set_stroke(width=1.5 color="#715582")<line_sep>self.add(cirpc)<line_sep>cirp.add_updater(<lambda>c:c.become(Circle(radius=abs(dotp.get_center()[1]) color="#dd7766").move_to(dotp.get_center())))<line_sep>self.add(cirp)<block_end>cirp.clear_updaters()<line_sep>textp.clear_updaters()<line_sep>dotp.clear_updaters()<line_sep>self.wait()<line_sep>self.play(*[FadeOut(obj)<for>obj [doto dota dotb texta textb textp textp dotp l_ab ciro texto]])<line_sep>self.wait(2)<block_end><block_end>''' to be completed... class Test5(Scene): CONFIG = {"camera_config": {"background_color": "#ffffff"}} def construct(self): dotb = Dot(LEFT*2, color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2) dotc = Dot(RIGHT*2, color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2) dota = Dot(LEFT*2+UP*1.3, color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2) texta = TexMobject("A", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6, plot_depth=2).next_to(dota, UP+LEFT, buff=SMALL_BUFF) textb = TexMobject("B", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6, plot_depth=2).next_to(dotb, LEFT+DOWN, buff=SMALL_BUFF) textc = TexMobject("C", color="#000000", background_stroke_color="#ffffff", background_stroke_width=6, plot_depth=2).next_to(dotc, RIGHT+DOWN, buff=SMALL_BUFF) l_ab = Line(color="#559944")\ .put_start_and_end_on(dota.get_center(), dotb.get_center()) l_bc = Line(color="#559944")\ .put_start_and_end_on(dotc.get_center(), dotb.get_center()) self.play(*[ShowCreation(obj) for obj in [l_ab, l_bc, dota, dotb, dotc]]) self.play(*[Write(obj) for obj in [texta, textb, textc]]) self.wait(0.3) t = ValueTracker(0) def p_pos(t): return np.array([0, 0, 0]) dotp = Dot(color="#000000", background_stroke_color="#ffffff", background_stroke_width=3, plot_depth=2)\ .add_updater(lambda d: d.move_to())'''<line_sep>
<import_from_future_stmt> absolute_import<import_from_stmt>.assertpy assert_that assert_warn soft_assertions fail soft_fail add_extension remove_extension WarningLoggingAdapter __version__<import_from_stmt>.file contents_of<line_sep>
<import_stmt>json<import_stmt>logging.config<import_from_stmt>pathlib Path<import_stmt>gym<import_from_stmt>rl_agents.configuration Configurable<line_sep>logging_config={"version":1 "disable_existing_loggers":<false> "formatters":{"standard":{"format":"[%(levelname)s] %(message)s "} "detailed":{"format":"[%(name)s:%(levelname)s] %(message)s "}} "handlers":{"default":{"level":"INFO" "formatter":"standard" "class":"logging.StreamHandler"}} "loggers":{"":{"handlers":["default"] "level":"DEBUG" "propagate":<true>}}}<def_stmt>configure config={} gym_level=gym.logger.INFO<block_start>""" Configure logging. Update the default configuration by a configuration file. Also configure the gym logger. :param config: logging configuration, or path to a configuration file :param gym_level: desired level for gym logger """<if_stmt>config<block_start><if_stmt>isinstance(config str)<block_start><with_stmt>Path(config).open()<as>f<block_start>config=json.load(f)<block_end><block_end>Configurable.rec_update(logging_config config)<block_end>logging.config.dictConfig(logging_config)<line_sep>gym.logger.set_level(gym_level)<block_end><def_stmt>add_file_handler file_path<block_start>""" Add a file handler to the root logger. :param Path file_path: log file path """<line_sep>configure({"handlers":{file_path.name:{"class":"logging.FileHandler" "filename":file_path "level":"DEBUG" "formatter":"detailed" "mode":'w'}} "loggers":{"":{"handlers":[file_path.name *logging_config["handlers"]]}}})<block_end>
# Copyright 2017-2021 The GPflow Contributors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module is deprecated, and is only provided for backwards compatibility. It will be removed in GPflow 2.3. """<import_from_stmt>deprecated deprecated<import_from_stmt>. misc traversal<line_sep>__all__=[]<def_stmt>_create_module_redirects m<block_start><for_stmt>name m.__all__<block_start>func=getattr(m name)<assert_stmt>callable(func) "all names exported by misc and traversal should be functions"<line_sep>deprecated_func=deprecated(reason="The gpflow.utilities.utilities module is deprecated and will "<concat>f"be removed in GPflow 2.3; use gpflow.utilities.{name} instead.")(func)<line_sep>globals()[name]=deprecated_func<line_sep>__all__.append(name)<block_end><block_end>_create_module_redirects(misc)<line_sep>_create_module_redirects(traversal)<del_stmt>_create_module_redirects misc traversal<line_sep>
<import_stmt>distutils.command<def_stmt>mod2test <block_start><return>dir(distutils)<block_end>
# encoding: utf-8 <import_from_future_stmt> print_function<import_stmt>term<import_stmt>humanfriendly<class_stmt>RequestStorage(object)<block_start>""" Stores statistics about single request """<def_stmt>__init__ self<block_start>self.queryset_stats=[]<block_end><def_stmt>add_queryset_storage_instance self queryset_storage<block_start>self.queryset_stats.append(queryset_storage)<block_end>@property<def_stmt>total_wasted_memory self<block_start>wasted_memory=0<for_stmt>qs_storage self.queryset_stats<block_start>wasted_memory<augadd>qs_storage.total_wasted_memory<block_end><return>wasted_memory<block_end># Stats print methods <def_stmt>print_stats self<block_start>""" Display statistics of current request """<if_stmt><not>self.queryset_stats<block_start><return><block_end>term.writeLine("\n\t ERASERHEAD STATS \n" term.bold term.reverse)<for_stmt>queryset_storage self.queryset_stats<block_start>queryset_storage.print_stats()<block_end>print()<line_sep>term.write("\t TOTAL WASTED MEMORY: " term.bold term.reverse)<line_sep>term.write(" {}".format(humanfriendly.format_size(self.total_wasted_memory)) term.red)<line_sep>print()<block_end><block_end>
# -*- coding: utf-8 -*- """ Created on 2017-11-1 @author: cheng.li """<import_stmt>unittest<import_stmt>numpy<as>np<import_from_stmt>alphamind.portfolio.optimizers LPOptimizer<import_from_stmt>alphamind.portfolio.optimizers QuadraticOptimizer<import_from_stmt>alphamind.portfolio.optimizers TargetVolOptimizer<class_stmt>TestOptimizers(unittest.TestCase)<block_start><def_stmt>test_lpoptimizer self<block_start>er=np.array([-1. -2.])<line_sep>lower_bound=np.array([0. 0.2])<line_sep>upper_bound=np.array([1. 0.8])<line_sep>optimizer=LPOptimizer(objective=-er cons_matrix=np.array([[1. 1. 1. 1.]]) lbound=lower_bound ubound=upper_bound)<line_sep>self.assertAlmostEqual(optimizer.feval() 1.2)<line_sep>np.testing.assert_array_almost_equal(optimizer.x_value() [0.8 0.2])<block_end><def_stmt>test_qpoptimizer self<block_start>er=np.array([0.01 0.02 0.03])<line_sep>cov=np.array([[0.02 0.01 0.02] [0.01 0.02 0.03] [0.02 0.03 0.02]])<line_sep>ids_var=np.diag([0.01 0.02 0.03])<line_sep>cov<augadd>ids_var<line_sep>lbound=np.array([0. 0. 0.])<line_sep>ubound=np.array([0.4 0.4 0.5])<line_sep>cons=np.array([[1. 1. 1.] [1. 0. 1.]])<line_sep>clbound=np.array([1. 0.3])<line_sep>cubound=np.array([1. 0.7])<line_sep>cons_matrix=np.concatenate([cons clbound.reshape((-1 1)) cubound.reshape((-1 1))] axis=1)<line_sep>optimizer=QuadraticOptimizer(objective=-er cov=cov lbound=lbound ubound=ubound cons_matrix=cons_matrix)<line_sep># check against matlab result np.testing.assert_array_almost_equal(optimizer.x_value() [0.2 0.3 0.5] 4)<block_end><def_stmt>test_qpoptimizer_with_factor_model self<block_start>er=np.array([0.1 0.2 0.3])<line_sep>lbound=np.array([0.0 0.0 0.0])<line_sep>ubound=np.array([1.0 1.0 1.0])<line_sep>factor_var=np.array([[0.5 -0.3] [-0.3 0.7]])<line_sep>factor_load=np.array([[0.8 0.2] [0.5 0.5] [0.2 0.8]])<line_sep>idsync=np.array([0.1 0.3 0.2])<line_sep>cons=np.array([[1. 1. 1.]])<line_sep>clbound=np.array([1.])<line_sep>cubound=np.array([1.])<line_sep>cons_matrix=np.concatenate([cons clbound.reshape((-1 1)) cubound.reshape((-1 1))] axis=1)<line_sep>optimizer=QuadraticOptimizer(objective=-er lbound=lbound ubound=ubound factor_cov=factor_var factor_load=factor_load factor_special=idsync cons_matrix=cons_matrix)<line_sep># check against cvxpy result np.testing.assert_array_almost_equal(optimizer.x_value() [0.2866857 0.21416417 0.49915014] 4)<block_end><def_stmt>test_qpoptimizer_with_identity_matrix self<block_start>er=np.array([-0.02 0.01 0.03])<line_sep>cov=np.diag([1. 1. 1.])<line_sep>optimizer=QuadraticOptimizer(objective=-er cov=cov)<line_sep>np.testing.assert_array_almost_equal(optimizer.x_value() [-0.02 0.01 0.03] 4)<block_end><def_stmt>test_target_vol_optimizer_without_cons self<block_start>er=np.array([0.1 0.2 0.3])<line_sep>cov=np.array([[0.05 0.01 0.02] [0.01 0.06 0.03] [0.02 0.03 0.07]])<line_sep>lbound=np.array([-0.3 -0.3 -0.3])<line_sep>ubound=np.array([0.5 0.5 0.5])<line_sep>target_vol=0.1<line_sep>optimizer=TargetVolOptimizer(objective=-er cov=cov lbound=lbound ubound=ubound target_vol=target_vol)<line_sep># check against known good result np.testing.assert_array_almost_equal(optimizer.x_value() [.0231776 0.1274768 0.30130881] 4)<block_end><def_stmt>test_target_vol_optimizer_with_cons self<block_start>er=np.array([0.1 0.2 0.3])<line_sep>cov=np.array([[0.05 0.01 0.02] [0.01 0.06 0.03] [0.02 0.03 0.07]])<line_sep>lbound=np.array([-0.3 -0.3 -0.3])<line_sep>ubound=np.array([0.5 0.5 0.5])<line_sep>cons=np.array([[1. 1. 1.]])<line_sep>clbound=np.array([0.])<line_sep>cubound=np.array([0.])<line_sep>cons_matrix=np.concatenate([cons clbound.reshape((-1 1)) cubound.reshape((-1 1))] axis=1)<line_sep>target_vol=0.1<line_sep>optimizer=TargetVolOptimizer(objective=-er cov=cov lbound=lbound ubound=ubound target_vol=target_vol cons_matrix=cons_matrix)<line_sep># check against known good result np.testing.assert_array_almost_equal(optimizer.x_value() [-0.3 -0.10919033 0.40919033] 4)<block_end><def_stmt>test_target_vol_optimizer_with_factor_model self<block_start>er=np.array([0.1 0.2 0.3])<line_sep>lbound=np.array([0.0 0.0 0.0])<line_sep>ubound=np.array([1.0 1.0 1.0])<line_sep>factor_var=np.array([[0.5 -0.3] [-0.3 0.7]])<line_sep>factor_load=np.array([[0.8 0.2] [0.5 0.5] [0.2 0.8]])<line_sep>idsync=np.array([0.1 0.3 0.2])<line_sep>cons=np.array([[1. 1. 1.]])<line_sep>clbound=np.array([1.])<line_sep>cubound=np.array([1.])<line_sep>target_vol=0.5<line_sep>cons_matrix=np.concatenate([cons clbound.reshape((-1 1)) cubound.reshape((-1 1))] axis=1)<line_sep>optimizer=TargetVolOptimizer(objective=-er factor_cov=factor_var factor_load=factor_load factor_special=idsync lbound=lbound ubound=ubound target_vol=target_vol cons_matrix=cons_matrix)<line_sep># check against cvxpy result np.testing.assert_array_almost_equal(optimizer.x_value() [0.26595552 0.21675092 0.51729356] 4)<block_end><def_stmt>test_target_vol_with_cons_and_ieq self<block_start>er=np.array([0.1 0.2 0.3])<line_sep>cov=np.array([[0.05 0.01 0.02] [0.01 0.06 0.03] [0.02 0.03 0.07]])<line_sep>lbound=np.array([-0.3 -0.3 -0.3])<line_sep>ubound=np.array([0.5 0.5 0.5])<line_sep>cons=np.array([[1. 1. 1.]])<line_sep>clbound=np.array([0.])<line_sep>cubound=np.array([0.])<line_sep>target_vol=0.1<line_sep>cons_matrix=np.concatenate([cons clbound.reshape((-1 1)) cubound.reshape((-1 1))] axis=1)<line_sep>optimizer=TargetVolOptimizer(objective=-er cov=cov lbound=lbound ubound=ubound target_vol=target_vol cons_matrix=cons_matrix)<line_sep># check against known good result np.testing.assert_array_almost_equal(optimizer.x_value() [-0.3 -0.10919033 0.40919033] 4)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>os<import_stmt>sys<import_stmt>h5py<import_stmt>argparse<import_stmt>numpy<as>np<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--root' help='path to root directory')<line_sep>args=parser.parse_args()<line_sep>root=args.root<line_sep>fname=os.path.join(root 'metadata/train.txt')<line_sep>flist=[os.path.join(root 'h5' line.strip())<for>line open(fname 'r')]<line_sep>fname=os.path.join(root 'metadata' 'classes.txt')<line_sep>classes=[line.strip()<for>line open(fname 'r')]<line_sep>num_classes=len(classes)<line_sep>sizes=np.zeros(num_classes)<line_sep>total=np.zeros(num_classes)<for_stmt>fname flist<block_start>print('> Processing {}...'.format(fname))<line_sep>fin=h5py.File(fname)<line_sep>coords=fin['coords'][:]<line_sep>points=fin['points'][:]<line_sep>labels=fin['labels'][:]<line_sep>labels=labels.reshape(-1 2)<line_sep>num_points=labels.shape[0]<for_stmt>i range(num_classes)<block_start>indices=(labels[: 0]<eq>i)<line_sep>size=np.sum(indices)<line_sep>sizes[i]<augadd>size<if_stmt>size<eq>0<block_start><continue><block_end>total[i]<augadd>num_points<block_end><block_end>freq=sizes/total<line_sep>weight=np.median(freq)/freq<line_sep>fname=os.path.join(root 'metadata' 'weight.txt')<line_sep>print('> Saving statistics to {}...'.format(fname))<line_sep>np.savetxt(fname weight fmt='%f')<line_sep>
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=import-error,print-statement,relative-import,protected-access """Unit tests for name_style_converter.py."""<import_stmt>unittest<import_from_stmt>name_style_converter NameStyleConverter<import_from_stmt>name_style_converter tokenize_name<class_stmt>SmartTokenizerTest(unittest.TestCase)<block_start><def_stmt>test_simple_cases self<block_start>self.assertEqual(tokenize_name('foo') ['foo'])<line_sep>self.assertEqual(tokenize_name('fooBar') ['foo' 'Bar'])<line_sep>self.assertEqual(tokenize_name('fooBarBaz') ['foo' 'Bar' 'Baz'])<line_sep>self.assertEqual(tokenize_name('Baz') ['Baz'])<line_sep>self.assertEqual(tokenize_name('') [])<line_sep>self.assertEqual(tokenize_name('FOO') ['FOO'])<line_sep>self.assertEqual(tokenize_name('foo2') ['foo' '2'])<block_end><def_stmt>test_tricky_cases self<block_start>self.assertEqual(tokenize_name('XMLHttpRequest') ['XML' 'Http' 'Request'])<line_sep>self.assertEqual(tokenize_name('HTMLElement') ['HTML' 'Element'])<line_sep>self.assertEqual(tokenize_name('WebGLRenderingContext') ['WebGL' 'Rendering' 'Context'])<line_sep>self.assertEqual(tokenize_name('CanvasRenderingContext2D') ['Canvas' 'Rendering' 'Context' '2D'])<line_sep>self.assertEqual(tokenize_name('CanvasRenderingContext2DAPITest') ['Canvas' 'Rendering' 'Context' '2D' 'API' 'Test'])<line_sep>self.assertEqual(tokenize_name('SVGSVGElement') ['SVG' 'SVG' 'Element'])<line_sep>self.assertEqual(tokenize_name('CanvasRenderingContext2D') ['Canvas' 'Rendering' 'Context' '2D'])<line_sep>self.assertEqual(tokenize_name('CSSURLImageValue') ['CSS' 'URL' 'Image' 'Value'])<line_sep>self.assertEqual(tokenize_name('CSSPropertyAPID') ['CSS' 'Property' 'API' 'D'])<line_sep>self.assertEqual(tokenize_name('AXARIAGridCell') ['AX' 'ARIA' 'Grid' 'Cell'])<line_sep>self.assertEqual(tokenize_name('CDATASection') ['CDATA' 'Section'])<line_sep>self.assertEqual(tokenize_name('ASCIICType') ['ASCII' 'CType'])<line_sep>self.assertEqual(tokenize_name('CString') ['CString'])<line_sep>self.assertEqual(tokenize_name('HTMLDListElement') ['HTML' 'DList' 'Element'])<line_sep>self.assertEqual(tokenize_name('HTMLOListElement') ['HTML' 'OList' 'Element'])<line_sep>self.assertEqual(tokenize_name('HTMLIFrameElement') ['HTML' 'IFrame' 'Element'])<line_sep>self.assertEqual(tokenize_name('HTMLPlugInElement') ['HTML' 'PlugIn' 'Element'])<line_sep># No special handling for OptGroup, FieldSet, and TextArea. self.assertEqual(tokenize_name('HTMLOptGroupElement') ['HTML' 'Opt' 'Group' 'Element'])<line_sep>self.assertEqual(tokenize_name('HTMLFieldSetElement') ['HTML' 'Field' 'Set' 'Element'])<line_sep>self.assertEqual(tokenize_name('HTMLTextAreaElement') ['HTML' 'Text' 'Area' 'Element'])<line_sep>self.assertEqual(tokenize_name('Path2D') ['Path' '2D'])<line_sep>self.assertEqual(tokenize_name('Point2D') ['Point' '2D'])<line_sep>self.assertEqual(tokenize_name('CanvasRenderingContext2DState') ['Canvas' 'Rendering' 'Context' '2D' 'State'])<line_sep>self.assertEqual(tokenize_name('Accelerated2dCanvas') ['Accelerated' '2d' 'Canvas'])<line_sep>self.assertEqual(tokenize_name('RTCDTMFSender') ['RTC' 'DTMF' 'Sender'])<line_sep>self.assertEqual(tokenize_name('WebGLCompressedTextureS3TCsRGB') ['WebGL' 'Compressed' 'Texture' 'S3TC' 'sRGB'])<line_sep>self.assertEqual(tokenize_name('WebGL2CompressedTextureETC1') ['WebGL2' 'Compressed' 'Texture' 'ETC1'])<line_sep>self.assertEqual(tokenize_name('EXTsRGB') ['EXT' 'sRGB'])<line_sep># 'PVRTC' contains a special token 'RTC', but it should be a # single token. self.assertEqual(tokenize_name('WebGLCompressedTexturePVRTC') ['WebGL' 'Compressed' 'Texture' 'PVRTC'])<line_sep>self.assertEqual(tokenize_name('SVGFEBlendElement') ['SVG' 'FE' 'Blend' 'Element'])<line_sep>self.assertEqual(tokenize_name('SVGMPathElement') ['SVG' 'MPath' 'Element'])<line_sep>self.assertEqual(tokenize_name('SVGTSpanElement') ['SVG' 'TSpan' 'Element'])<line_sep>self.assertEqual(tokenize_name('SVGURIReference') ['SVG' 'URI' 'Reference'])<line_sep>self.assertEqual(tokenize_name('UTF16TextIterator') ['UTF16' 'Text' 'Iterator'])<line_sep>self.assertEqual(tokenize_name('UTF8Decoder') ['UTF8' 'Decoder'])<line_sep>self.assertEqual(tokenize_name('Uint8Array') ['Uint8' 'Array'])<line_sep>self.assertEqual(tokenize_name('DOMWindowBase64') ['DOM' 'Window' 'Base64'])<line_sep>self.assertEqual(tokenize_name('TextCodecLatin1') ['Text' 'Codec' 'Latin1'])<line_sep>self.assertEqual(tokenize_name('V8BindingForCore') ['V8' 'Binding' 'For' 'Core'])<line_sep>self.assertEqual(tokenize_name('V8DOMRect') ['V8' 'DOM' 'Rect'])<line_sep>self.assertEqual(tokenize_name('String16MojomTraits') ['String16' 'Mojom' 'Traits'])<line_sep>self.assertEqual(tokenize_name('V0InsertionPoint') ['V0' 'Insertion' 'Point'])<line_sep>self.assertEqual(tokenize_name('ShadowDOMV0Test') ['Shadow' 'DOM' 'V0' 'Test'])<line_sep>self.assertEqual(tokenize_name('ElementShadowV0') ['Element' 'Shadow' 'V0'])<line_sep>self.assertEqual(tokenize_name('StubChromeClientForSPv2') ['Stub' 'Chrome' 'Client' 'For' 'SPv2'])<line_sep>self.assertEqual(tokenize_name('SQLiteAuthorizer') ['SQLite' 'Authorizer'])<line_sep>self.assertEqual(tokenize_name('XPathEvaluator') ['XPath' 'Evaluator'])<line_sep>self.assertEqual(tokenize_name('IsXHTMLDocument') ['Is' 'XHTML' 'Document'])<line_sep>self.assertEqual(tokenize_name('isHTMLDocument') ['is' 'HTML' 'Document'])<line_sep>self.assertEqual(tokenize_name('matrix3d') ['matrix' '3d'])<block_end><def_stmt>test_ignoring_characters self<block_start>self.assertEqual(tokenize_name('Animation.idl') ['Animation' 'idl'])<line_sep>self.assertEqual(tokenize_name('-webkit-appearance') ['webkit' 'appearance'])<line_sep>self.assertEqual(tokenize_name(' foo_bar!#"$') ['foo' 'bar'])<block_end><block_end><class_stmt>NameStyleConverterTest(unittest.TestCase)<block_start><def_stmt>test_snake_case self<block_start>converter=NameStyleConverter('HTMLElement')<line_sep>self.assertEqual(converter.to_snake_case() 'html_element')<block_end><def_stmt>test_upper_camel_case self<block_start>converter=NameStyleConverter('someSuperThing')<line_sep>self.assertEqual(converter.to_upper_camel_case() 'SomeSuperThing')<line_sep>converter=NameStyleConverter('SVGElement')<line_sep>self.assertEqual(converter.to_upper_camel_case() 'SVGElement')<line_sep>converter=NameStyleConverter('cssExternalScannerPreload')<line_sep>self.assertEqual(converter.to_upper_camel_case() 'CSSExternalScannerPreload')<line_sep>converter=NameStyleConverter('xpathExpression')<line_sep>self.assertEqual(converter.to_upper_camel_case() 'XPathExpression')<line_sep>converter=NameStyleConverter('feDropShadow')<line_sep>self.assertEqual(converter.to_upper_camel_case() 'FEDropShadow')<block_end><def_stmt>test_lower_camel_case self<block_start>converter=NameStyleConverter('someSuperThing')<line_sep>self.assertEqual(converter.to_lower_camel_case() 'someSuperThing')<line_sep>converter=NameStyleConverter('SVGElement')<line_sep>self.assertEqual(converter.to_lower_camel_case() 'svgElement')<line_sep>converter=NameStyleConverter('documentURI')<line_sep>self.assertEqual(converter.to_lower_camel_case() 'documentURI')<line_sep>converter=NameStyleConverter('-webkit-margin-start')<line_sep>self.assertEqual(converter.to_lower_camel_case() 'webkitMarginStart')<line_sep>converter=NameStyleConverter('Accelerated2dCanvas')<line_sep>self.assertEqual(converter.to_lower_camel_case() 'accelerated2dCanvas')<block_end><def_stmt>test_macro_case self<block_start>converter=NameStyleConverter('WebGLBaz2D')<line_sep>self.assertEqual(converter.to_macro_case() 'WEBGL_BAZ_2D')<block_end><def_stmt>test_all_cases self<block_start>converter=NameStyleConverter('SVGScriptElement')<line_sep>self.assertEqual(converter.to_all_cases() {'snake_case':'svg_script_element' 'upper_camel_case':'SVGScriptElement' 'macro_case':'SVG_SCRIPT_ELEMENT' })<block_end><block_end>
# Copyright 2017,2018,2019,2020,2021 Sony Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> print_function<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>nnabla<as>nn<import_stmt>nnabla.functions<as>F<line_sep>@pytest.mark.parametrize("m" [1 2 3])<def_stmt>test_cuda_large_blocks cuda_test_opts m<block_start><if_stmt>cuda_test_opts.disable_test_large_blocks<block_start>pytest.skip('`--disable-test-large-blocks` is passed')<block_end>CUDA_THREAD_PER_BLOCK=512<line_sep>CUDA_MAX_BLOCKS=65536<line_sep>size=CUDA_MAX_BLOCKS<times>CUDA_THREAD_PER_BLOCK<times>m+3<line_sep>print("Variable size:" size)<line_sep>x=np.zeros((size ) np.float32)<line_sep>v=nn.Variable(x.shape)<line_sep>v.d=x<import_from_stmt>nnabla.ext_utils get_extension_context<with_stmt>nn.context_scope(get_extension_context('cuda'))<block_start>y=F.relu(v)<line_sep>y.forward()<block_end><block_end>
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE <import_from_future_stmt> absolute_import<import_stmt>pytest# noqa: F401 <import_stmt>numpy<as>np# noqa: F401 <import_stmt>awkward<as>ak# noqa: F401 <def_stmt>test <block_start><for_stmt>itype ["i8" "u8" "i32" "u32" "i64"]<block_start>form=ak.forms.ListOffsetForm(itype ak.forms.EmptyForm())<assert_stmt>form.offsets<eq>itype<block_end><block_end>
# coding: utf-8 <import_from_stmt>functools partial<import_stmt>sublime<import_from_stmt>sublime_plugin WindowCommand TextCommand EventListener<import_from_stmt>.util find_view_by_settings noop get_setting<import_from_stmt>.cmd GitCmd<import_from_stmt>.helpers GitStatusHelper<import_from_stmt>.status GIT_WORKING_DIR_CLEAN<line_sep>GIT_COMMIT_VIEW_TITLE="COMMIT_EDITMSG"<line_sep>GIT_COMMIT_VIEW_SYNTAX='Packages/SublimeGit/syntax/SublimeGit Commit Message.tmLanguage'<line_sep>GIT_NOTHING_STAGED=u'No changes added to commit. Use s on files/sections in the status view to stage changes.'<line_sep>GIT_COMMIT_TEMPLATE=u"""{old_msg} # Please enter the commit message for your changes. Lines starting # with '#' will be ignored, and an empty message aborts the commit. {status}"""<line_sep>GIT_AMEND_PUSHED=(u"It is discouraged to rewrite history which has already been pushed. "<concat>u"Are you sure you want to amend the commit?")<line_sep>CUT_LINE=u"------------------------ >8 ------------------------\n"<line_sep>CUT_EXPLANATION=u"# Do not touch the line above.\n# Everything below will be removed.\n"<class_stmt>GitCommit(object)<block_start>windows={}<block_end><class_stmt>GitCommitWindowCmd(GitCmd GitStatusHelper)<block_start>@property<def_stmt>is_verbose self<block_start><return>get_setting('git_commit_verbose' <false>)<block_end><def_stmt>get_commit_template self repo add=<false> amend=<false><block_start>cmd=['commit' '--dry-run' '--status' '--all'<if>add<else><none> '--amend'<if>amend<else><none> '--verbose'<if>self.is_verbose<else><none>]<line_sep>exit,stdout,stderr=self.git(cmd cwd=repo)<line_sep>stderr=stderr.strip()<if_stmt>stderr<block_start><for_stmt>line stderr.splitlines()<block_start>stdout<augadd>"# %s\n"%line<block_end><block_end>old_msg=''<if_stmt>amend<block_start>old_msg=self.git_lines(['rev-list' '--format=%B' '--max-count=1' 'HEAD'] cwd=repo)<line_sep>old_msg="%s\n"%"\n".join(old_msg[1:])<block_end><if_stmt>self.is_verbose<and>CUT_LINE<not><in>stdout<block_start>comments=[]<line_sep>other=[]<for_stmt>line stdout.splitlines()<block_start><if_stmt>line.startswith('#')<block_start>comments.append(line)<block_end><else_stmt><block_start>other.append(line)<block_end><block_end>status="\n".join(comments)<line_sep>status<augadd>"\n# %s"%CUT_LINE<line_sep>status<augadd>CUT_EXPLANATION<line_sep>status<augadd>"\n".join(other)<block_end><else_stmt><block_start>status=stdout<block_end><return>GIT_COMMIT_TEMPLATE.format(status=status old_msg=old_msg)<block_end><def_stmt>show_commit_panel self content<block_start>panel=self.window.get_output_panel('git-commit')<line_sep>panel.run_command('git_panel_write' {'content':content})<line_sep>self.window.run_command('show_panel' {'panel':'output.git-commit'})<block_end><block_end><class_stmt>GitCommitCommand(WindowCommand GitCommitWindowCmd)<block_start>""" Documentation coming soon. """<def_stmt>run self add=<false><block_start>repo=self.get_repo()<if_stmt><not>repo<block_start><return><block_end>staged=self.has_staged_changes(repo)<line_sep>dirty=self.has_unstaged_changes(repo)<if_stmt><not>add<and><not>staged<block_start><return>sublime.error_message(GIT_NOTHING_STAGED)<block_end><elif_stmt>add<and>(<not>staged<and><not>dirty)<block_start><return>sublime.error_message(GIT_WORKING_DIR_CLEAN)<block_end>view=find_view_by_settings(self.window git_view='commit' git_repo=repo)<if_stmt><not>view<block_start>view=self.window.new_file()<line_sep>view.set_name(GIT_COMMIT_VIEW_TITLE)<line_sep>view.set_syntax_file(GIT_COMMIT_VIEW_SYNTAX)<line_sep>view.set_scratch(<true>)<line_sep>view.settings().set('git_view' 'commit')<line_sep>view.settings().set('git_repo' repo)<block_end>GitCommit.windows[view.id()]=(self.window add <false>)<line_sep>self.window.focus_view(view)<line_sep>template=self.get_commit_template(repo add=add)<line_sep>view.run_command('git_commit_template' {'template':template})<block_end><block_end><class_stmt>GitCommitAmendCommand(GitCommitWindowCmd WindowCommand)<block_start>""" Documentation coming soon. """<def_stmt>run self<block_start>repo=self.get_repo()<if_stmt><not>repo<block_start><return><block_end>unpushed=self.git_exit_code(['diff' '--exit-code' '--quiet' '@{upstream}..'] cwd=repo)<if_stmt>unpushed<eq>0<block_start><if_stmt><not>sublime.ok_cancel_dialog(GIT_AMEND_PUSHED 'Amend commit')<block_start><return><block_end><block_end>view=find_view_by_settings(self.window git_view='commit' git_repo=repo)<if_stmt><not>view<block_start>view=self.window.new_file()<line_sep>view.set_name(GIT_COMMIT_VIEW_TITLE)<line_sep>view.set_syntax_file(GIT_COMMIT_VIEW_SYNTAX)<line_sep>view.set_scratch(<true>)<line_sep>view.settings().set('git_view' 'commit')<line_sep>view.settings().set('git_repo' repo)<block_end>GitCommit.windows[view.id()]=(self.window <false> <true>)<line_sep>self.window.focus_view(view)<line_sep>template=self.get_commit_template(repo amend=<true>)<line_sep>view.run_command('git_commit_template' {'template':template})<block_end><block_end><class_stmt>GitCommitTemplateCommand(TextCommand)<block_start><def_stmt>is_visible self<block_start><return><false><block_end><def_stmt>run self edit template=''<block_start><if_stmt>self.view.size()<g>0<block_start>self.view.erase(edit sublime.Region(0 self.view.size()))<block_end>self.view.insert(edit 0 template)<line_sep>self.view.sel().clear()<line_sep>self.view.sel().add(sublime.Region(0))<block_end><block_end><class_stmt>GitCommitEventListener(EventListener)<block_start>_lpop=<false><def_stmt>mark_pedantic self view<block_start><if_stmt>view.settings().get('git_view')<eq>'commit'<or>view.file_name()<eq>'COMMIT_EDITMSG'# Header lines should be a max of 50 chars <block_start>view.erase_regions('git-commit.header')<line_sep>firstline=view.line(view.text_point(0 0))<if_stmt>firstline.end()<g>50<and><not>view.substr(firstline).startswith('#')<block_start>view.add_regions('git-commit.header' [sublime.Region(50 firstline.end())] 'invalid' 'dot')<block_end># The second line should be empty view.erase_regions('git-commit.line2')<line_sep>secondline=view.line(view.text_point(1 0))<if_stmt>secondline.end()-secondline.begin()<g>0<and><not>view.substr(secondline).startswith('#')<block_start>view.add_regions('git-commit.line2' [secondline] 'invalid' 'dot')<block_end># Other lines should be at most 72 chars view.erase_regions('git-commit.others')<for_stmt>l view.lines(sublime.Region(view.text_point(2 0) view.size()))<block_start><if_stmt>view.substr(l).startswith('#')<block_start><break><block_end><if_stmt>l.end()-l.begin()<g>72<block_start>view.add_regions('git-commit.others' [sublime.Region(l.begin()+72 l.end())] 'invalid' 'dot')<block_end><block_end><block_end><block_end><def_stmt>on_activated self view<block_start><if_stmt>sublime.version()<l>'3000'<and>get_setting('git_commit_pedantic')<is><true><block_start>self.mark_pedantic(view)<block_end><block_end><def_stmt>on_modified self view<block_start><if_stmt>sublime.version()<l>'3000'<and>get_setting('git_commit_pedantic')<is><true><block_start>self.mark_pedantic(view)<block_end><block_end><def_stmt>on_modified_async self view<block_start><if_stmt>get_setting('git_commit_pedantic')<is><true><block_start>self.mark_pedantic(view)<block_end><block_end><def_stmt>on_activated_async self view<block_start><if_stmt>get_setting('git_commit_pedantic')<is><true><block_start>self.mark_pedantic(view)<block_end><block_end><def_stmt>on_close self view<block_start><if_stmt>view.settings().get('git_view')<eq>'commit'<and>view.id()<in>GitCommit.windows<block_start>message=view.substr(sublime.Region(0 view.size()))<line_sep>window,add,amend=GitCommit.windows[view.id()]<line_sep>repo=view.settings().get('git_repo')<line_sep>window.run_command('git_commit_perform' {'message':message 'add':add 'amend':amend 'repo':repo})<block_end><block_end><block_end><class_stmt>GitCommitPerformCommand(WindowCommand GitCommitWindowCmd)<block_start><def_stmt>run self repo message add=<false> amend=<false><block_start>cmd=['commit' '--cleanup=strip' '--all'<if>add<else><none> '--amend'<if>amend<else><none> '--verbose'<if>self.is_verbose<else><none> '-F' '-']<line_sep>exit,stdout,stderr=self.git(cmd stdin=message cwd=repo)<line_sep>self.show_commit_panel(stdout<if>exit<eq>0<else>stderr)<line_sep>self.window.run_command('git_status' {'refresh_only':<true>})<block_end><def_stmt>is_visible self<block_start><return><false><block_end><block_end><class_stmt>GitCommitSaveCommand(TextCommand)<block_start><def_stmt>is_visible self<block_start><return><false><block_end><def_stmt>run self edit<block_start><if_stmt>self.view.settings().get('git_view')<eq>'commit'<and>self.view.id()<in>GitCommit.windows<block_start><return><block_end>self.view.run_command('save')<block_end><block_end><class_stmt>GitQuickCommitCommand(WindowCommand GitCommitWindowCmd)<block_start>""" Quickly commit changes with a one-line commit message. If there are any staged changes, only those changes will be added. If there are no staged changes, any changed files that git know about will be added in the commit. If the working directory is clean, an error will be shown indicating it. After entering the commit message, press enter to commit, or esc to cancel. An empty commit message will also result in the commit being cancelled. """<def_stmt>run self<block_start>repo=self.get_repo()<if_stmt><not>repo<block_start><return><block_end>staged=self.has_staged_changes(repo)<line_sep>dirty=self.has_unstaged_changes(repo)<if_stmt><not>staged<and><not>dirty<block_start>sublime.error_message(GIT_WORKING_DIR_CLEAN.capitalize())<line_sep><return><block_end>self.window.show_input_panel("Commit message:" '' partial(self.on_commit_message repo) noop noop)<block_end><def_stmt>on_commit_message self repo msg=<none><block_start><if_stmt><not>msg<block_start>msg=''<block_end>cmd=['commit' '-F' '-']<if>self.has_staged_changes(repo)<else>['commit' '-a' '-F' '-']<line_sep>stdout=self.git_string(cmd stdin=msg cwd=repo)<line_sep>self.show_commit_panel(stdout)<line_sep>self.window.run_command('git_status' {'refresh_only':<true>})<block_end><block_end><class_stmt>GitQuickCommitCurrentFileCommand(TextCommand GitCmd GitStatusHelper)<block_start>""" Documentation coming soon. """<def_stmt>run self edit<block_start>filename=self.view.file_name()<if_stmt><not>filename<block_start>sublime.error_message("Cannot commit a file which has not been saved.")<line_sep><return><block_end>repo=self.get_repo()<if_stmt><not>repo<block_start><return><block_end><if_stmt><not>self.file_in_git(repo filename)<block_start><if_stmt>sublime.ok_cancel_dialog("The file %s is not tracked by git. Do you want to add it?"%filename "Add file")<block_start>exit,stdout,stderr=self.git(['add' '--force' '--' filename] cwd=repo)<if_stmt>exit<eq>0<block_start>sublime.status_message('Added %s'%filename)<block_end><else_stmt><block_start>sublime.error_message('git error: %s'%stderr)<block_end><block_end><else_stmt><block_start><return><block_end><block_end>self.view.window().show_input_panel("Commit message:" '' partial(self.on_commit_message repo filename) noop noop)<block_end><def_stmt>on_commit_message self repo filename msg=<none><block_start><if_stmt><not>msg<block_start>msg=''<block_end># run command cmd=['commit' '-F' '-' '--only' '--' filename]<line_sep>stdout=self.git_string(cmd stdin=msg cwd=repo)<line_sep># show output panel panel=self.view.window().get_output_panel('git-commit')<line_sep>panel.run_command('git_panel_write' {'content':stdout})<line_sep>self.view.window().run_command('show_panel' {'panel':'output.git-commit'})<line_sep># update status if necessary self.view.window().run_command('git_status' {'refresh_only':<true>})<block_end><block_end>
<import_from_stmt>pythonjsonlogger jsonlogger<import_from_stmt>datetime datetime<import_stmt>logging<import_from_stmt>logging Logger<import_from_stmt>logging.config dictConfig<import_from_stmt>seedwork.utils.functional SimpleLazyObject<import_from_stmt>seedwork.infrastructure.request_context request_context<class_stmt>RequestContextFilter(logging.Filter)<block_start>""" "Provides correlation id parameter for the logger"""<def_stmt>__init__ self name:str request_context<arrow><none><block_start>super().__init__(name=name)<line_sep>self.request_context=request_context<block_end><def_stmt>filter self record<block_start>record.correlation_id=self.request_context.correlation_id.get()<line_sep><return><true><block_end><block_end><class_stmt>ElkJsonFormatter(jsonlogger.JsonFormatter)<block_start>""" ELK stack-compatibile formatter """<def_stmt>add_fields self log_record record message_dict<block_start>super(ElkJsonFormatter self).add_fields(log_record record message_dict)<line_sep>log_record["@timestamp"]=datetime.now().isoformat()<line_sep>log_record["level"]=record.levelname<line_sep>log_record["logger"]=record.name<block_end><block_end><class_stmt>LoggerFactory<block_start>_configured=<false><line_sep>@classmethod<def_stmt>configure cls logger_name="app" log_filename="./logs.json" request_context=request_context <block_start>cls.logger_name=logger_name<line_sep>cls.log_filename=log_filename<line_sep>cls.request_context=request_context<line_sep>cls._configured=<true><block_end>@classmethod<def_stmt>create_logger cls<block_start>""" Returns a logger instance, based on a configuration options """<if_stmt><not>cls._configured<block_start>cls.configure()<block_end>logging_config={"version":1 "disable_existing_loggers":<false> "formatters":{"default":{# exact format is not important, this is the minimum information "format":"%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s" } "colored":{"()":"colorlog.ColoredFormatter" "format":"%(log_color)s%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s" "log_colors":{"DEBUG":"white" "INFO":"green" "WARNING":"yellow" "ERROR":"red" "CRITICAL":"red,bold" } } "colored_db":{"()":"colorlog.ColoredFormatter" "format":"%(log_color)s%(asctime)s %(name)-12s %(levelname)-8s %(correlation_id)s %(message)s" "log_colors":{"DEBUG":"purple" "INFO":"green" "WARNING":"yellow" "ERROR":"red" "CRITICAL":"red,bold" } } "json_formatter":{"()":"seedwork.infrastructure.logging.ElkJsonFormatter" } } "handlers":{# console logs to stderr "console":{"class":"logging.StreamHandler" "formatter":"default" } "colored_console":{"class":"colorlog.StreamHandler" "formatter":"colored" } "colored_console_db":{"class":"colorlog.StreamHandler" "formatter":"colored_db" } "file_handler":{"class":"logging.handlers.RotatingFileHandler" "filename":cls.log_filename "formatter":"json_formatter" }<if>cls.log_filename<else><none> # Add Handler for Sentry for `warning` and above # 'sentry': { # 'level': 'WARNING', # 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler', # }, } "loggers":{cls.logger_name:{"level":"DEBUG" "handlers":["colored_console" "file_handler"] # , 'sentry'], } # Prevent noisy modules from logging to Sentry "noisy_module":{"level":"ERROR" "handlers":["console"] "propagate":<false> } } }<line_sep>dictConfig(logging_config)<line_sep>logger=logging.getLogger(name=cls.logger_name)<line_sep>logger.addFilter(RequestContextFilter(name=cls.logger_name request_context=cls.request_context))<line_sep><return>logger<block_end><block_end>""" We are making logger globally available, but to make it configurable logger lazy-evaluated. Use `LoggerFactory.configure()` to configure the logger prior to its usage """<line_sep>logger=SimpleLazyObject(LoggerFactory.create_logger)<line_sep>
"""Definition of string used in wiffi json telegrams."""<line_sep># units of measurement WIFFI_UOM_TEMP_CELSIUS="gradC"<line_sep>WIFFI_UOM_DEGREE="grad"<line_sep>WIFFI_UOM_PERCENT="%"<line_sep>WIFFI_UOM_MILLI_BAR="mb"<line_sep>WIFFI_UOM_LUX="lux"<line_sep>
<import_from_stmt>uasyncio StreamReader<class_stmt>MockSock<block_start><def_stmt>__init__ self data_list<block_start>self.data=data_list<block_end><def_stmt>read self sz<block_start><try_stmt><block_start><return>self.data.pop(0)<block_end><except_stmt>IndexError<block_start><return>b""<block_end><block_end><block_end>mock=MockSock([b"123" b"234" b"5" b"a" b"b" b"c" b"d" b"e" ])<def_stmt>func <block_start>sr=StreamReader(mock)<assert_stmt><await>sr.readexactly(3)<eq>b"123"<assert_stmt><await>sr.readexactly(4)<eq>b"2345"<assert_stmt><await>sr.readexactly(5)<eq>b"abcde"<line_sep># This isn't how it should be, but the current behavior <assert_stmt><await>sr.readexactly(10)<eq>b""<block_end><for_stmt>i func()<block_start><pass><block_end>
''' Example dangerous usage of urllib[2] opener functions The urllib and urllib2 opener functions and object can open http, ftp, and file urls. Often, the ability to open file urls is overlooked leading to code that can unexpectedly open files on the local server. This could be used by an attacker to leak information about the server. '''<import_stmt>urllib<import_stmt>urllib2<line_sep># Python 3 <import_stmt>urllib.request<line_sep># Six <import_stmt>six<def_stmt>test_urlopen # urllib <block_start>url=urllib.quote('file:///bin/ls')<line_sep>urllib.urlopen(url 'blah' 32)<line_sep>urllib.urlretrieve('file:///bin/ls' '/bin/ls2')<line_sep>opener=urllib.URLopener()<line_sep>opener.open('file:///bin/ls')<line_sep>opener.retrieve('file:///bin/ls')<line_sep>opener=urllib.FancyURLopener()<line_sep>opener.open('file:///bin/ls')<line_sep>opener.retrieve('file:///bin/ls')<line_sep># urllib2 handler=urllib2.HTTPBasicAuthHandler()<line_sep>handler.add_password(realm='test' uri='http://mysite.com' user='bob')<line_sep>opener=urllib2.build_opener(handler)<line_sep>urllib2.install_opener(opener)<line_sep>urllib2.urlopen('file:///bin/ls')<line_sep>urllib2.Request('file:///bin/ls')<line_sep># Python 3 urllib.request.urlopen('file:///bin/ls')<line_sep>urllib.request.urlretrieve('file:///bin/ls' '/bin/ls2')<line_sep>opener=urllib.request.URLopener()<line_sep>opener.open('file:///bin/ls')<line_sep>opener.retrieve('file:///bin/ls')<line_sep>opener=urllib.request.FancyURLopener()<line_sep>opener.open('file:///bin/ls')<line_sep>opener.retrieve('file:///bin/ls')<line_sep># Six six.moves.urllib.request.urlopen('file:///bin/ls')<line_sep>six.moves.urllib.request.urlretrieve('file:///bin/ls' '/bin/ls2')<line_sep>opener=six.moves.urllib.request.URLopener()<line_sep>opener.open('file:///bin/ls')<line_sep>opener.retrieve('file:///bin/ls')<line_sep>opener=six.moves.urllib.request.FancyURLopener()<line_sep>opener.open('file:///bin/ls')<line_sep>opener.retrieve('file:///bin/ls')<block_end>
<import_stmt>os<import_stmt>configparser<import_stmt>yaml<import_stmt>ast<import_from_stmt>pathlib Path<line_sep>HERE=Path(__file__).parent.absolute()<line_sep>print(HERE)<line_sep>config_dir=HERE/'config/config.ini.model'<line_sep>config=configparser.ConfigParser()<line_sep>config.read(config_dir)<line_sep>ACCESS_TOKEN_EXPIRE_MINUTES=config.get('security' 'access_token_expire_minutes')<line_sep>JWT_ALGORITHM=config.get('security' 'jwt_algorithm')<line_sep>OAUTH_REDIRECT_PATH=config.get('github' 'oauth_redirect_path')<line_sep>REDIRECT_URI=config.get('github' 'redirect_uri')<line_sep>CLIENT_ID=config.get('github' 'client_id')<line_sep>CLIENT_SECRET=config.get('github' 'client_secret')<line_sep>HOST_PATH=config.get('global' 'host_path')<line_sep>WEB_PORT=config.get('port' "fastapi")<line_sep># DB_URL = os.getenv('DB_URL', config.get('database', 'db_url')) db_host=config.get('database' 'host')<line_sep>db_username=config.get('database' 'username')<line_sep>db_pwd=config.get('database' 'password')<line_sep>db_port=config.get('database' 'port')<line_sep>db=config.get('database' 'db')<line_sep>charset=config.get('database' 'charset')<line_sep>DB_URL=f'mysql+pymysql://{db_username}:{db_pwd}@{db_host}:{db_port}/{db}?charset={charset}'<line_sep>print(DB_URL)<line_sep>REDIS_URL=os.getenv('REDIS_URL' config.get('redis' 'redis_url'))<line_sep>DEBUG=os.getenv('DEBUG' config.get('global' 'debug')).lower()<in>('true' 'y' 'yes' '1')<line_sep>WTF_CSRF_SECRET_KEY=123<line_sep>AUTH_LOGIN_ENDPOINT='index.login'<line_sep>MEMCACHED_HOST=os.getenv('MEMCACHED_HOST' config.get('memcached' 'memcached_host'))<line_sep>MEMCACHED_PORT=config.get('memcached' 'memcached_port')<line_sep>oauth_redirect_path='/oauth'<line_sep>redirect_uri='http://127.0.0.1:8000/oauth'<line_sep>client_id="098a2e6da880878e05da"<line_sep>client_secret="<KEY>"<line_sep>REACT_PROMPT='ๅ–œๆฌข่ฟ™็ฏ‡ๆ–‡็ซ ๅ—? ่ฎฐๅพ—็ป™ๆˆ‘็•™่จ€ๆˆ–่ฎข้˜…ๅ“ฆ'<line_sep>PLOAD_FOLDER=HERE/'static/upload'<line_sep>AUTHOR='zhikai'<line_sep>SITE_TITLE='Zhikai-Yang Space'<line_sep>PER_PAGE=10<line_sep>GOOGLE_ANALYTICS=''<line_sep>SENTRY_DSN=''<line_sep>REQUEST_TIMEOUT=15<line_sep>SHOW_PAGEVIEW=<true><line_sep>PERMALINK_TYPE='slug'# ๅฏ้€‰ idใ€slugใ€title # [(Endpoint, Name, IconName, Color), ...] # SITE_NAV_MENUS = [('blog.index', '้ฆ–้กต'), ('blog.topics', 'ไธ“้ข˜'), # ('blog.archives', 'ๅฝ’ๆกฃ'), ('blog.tags', 'ๆ ‡็ญพ'), # ('index.search', 'ๆœ็ดข'), ('/page/aboutme', 'ๅ…ณไบŽๆˆ‘'), # ('index.feed', 'RSS', 'rss', '#fc6423')] SITE_NAV_MENUS=[('blog.index' '้ฆ–้กต') ('blog.activities' 'ๅŠจๆ€') ('blog.tags' 'ๆ ‡็ญพ') ('index.search' 'ๆœ็ดข') ('blog.archives' 'ๅฝ’ๆกฃ') ('/post/aboutme' 'ๅ…ณไบŽๆˆ‘')]<line_sep>BEIAN_ID=''<line_sep>JWT_SECRET=config.get('security' 'jwt_secret')<line_sep>EXPIRATION_DELTA=60<times>60<line_sep>WTF_CSRF_ENABLED=<false><line_sep>MAIL_SERVER='smtp.qq.com'<line_sep>MAIL_PORT=465<line_sep>MAIL_USERNAME=''<line_sep>MAIL_PASSWORD=''<line_sep>BLOG_URL='https://example.com'<line_sep>UPLOAD_FOLDER=HERE/'static/upload'<line_sep># Redis sentinel REDIS_SENTINEL_SERVICE_HOST=<none><line_sep>REDIS_SENTINEL_SERVICE_PORT=26379<line_sep>SHOW_AUTHOR=<true><class_stmt>AttrDict(dict)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>self.__dict__=self<block_end><block_end><try_stmt><block_start><with_stmt>open(HERE/'config.yaml')<as>f<block_start>yaml_content=f.read()<line_sep>partials=AttrDict(yaml.load(yaml_content)).partials<block_end>USE_YAML=<true><block_end><except_stmt>FileNotFoundError<block_start>USE_YAML=<false><line_sep>partials={}<block_end><try_stmt><block_start><import_from_stmt>local_settings *# noqa <block_end><except_stmt>ImportError<block_start><pass><block_end>K_POST=1001<line_sep>K_COMMENT=1002<line_sep>ONE_MINUTE=60<line_sep>ONE_HOUR=ONE_MINUTE<times>60<line_sep>ONE_DAY=ONE_HOUR<times>24<line_sep>K_STATUS=1003<line_sep>K_ACTIVITY=1004<line_sep>CDN_DOMAIN=''<line_sep>USE_FFMPEG=<false><line_sep>STATIC_FILE_TYPES=('jpg' 'png' 'webp' 'gif' 'mp4' 'css' 'js')<line_sep>
<import_from_stmt>.accuracy Accuracy<import_from_stmt>.builder build_metric<line_sep>
# This sample tests the type checker's handling of ParamSpec # and Concatenate as described in PEP 612. <import_from_stmt>typing Callable Concatenate ParamSpec TypeVar<line_sep>P=ParamSpec("P")<line_sep>R=TypeVar("R")<class_stmt>Request<block_start><ellipsis><block_end><def_stmt>with_request f:Callable[Concatenate[Request P] R]<arrow>Callable[P R]<block_start><def_stmt>inner *args:P.args **kwargs:P.kwargs<arrow>R<block_start><return>f(Request() *args **kwargs)<block_end><return>inner<block_end>@with_request<def_stmt>takes_int_str request:Request x:int y:str<arrow>int# use request <block_start><return>x+7<block_end>takes_int_str(1 "A")<line_sep># This should generate an error because the first arg # is the incorrect type. takes_int_str("B" "A")<line_sep># This should generate an error because there are too # many parameters. takes_int_str(1 "A" 2)<line_sep># This should generate an error because a ParamSpec can appear # only within the last type arg for Concatenate <def_stmt>decorator1 f:Callable[Concatenate[P P] int]<arrow>Callable[P int]<block_start><ellipsis><block_end># This should generate an error because the last type arg # for Concatenate should be a ParamSpec. <def_stmt>decorator2 f:Callable[Concatenate[int int] int]<arrow>Callable[P int]<block_start><ellipsis><block_end># This should generate an error because Concatenate is missing # its type arguments. <def_stmt>decorator3 f:Callable[Concatenate int]<arrow>Callable[P int]<block_start><ellipsis><block_end><def_stmt>decorator4 func:Callable[P <none>]<arrow>Callable[Concatenate[int P] <none>]<block_start><def_stmt>wrapper x:int / *args:P.args **kwargs:P.kwargs<arrow><none><block_start><ellipsis><block_end><return>wrapper<block_end><def_stmt>func1 func:Callable[Concatenate[int P] <none>]<arrow>Callable[P <none>]<block_start><ellipsis><block_end><def_stmt>func2 a:int b:str c:str<arrow><none><block_start><ellipsis><block_end><def_stmt>func3 a:int / b:str c:str<arrow><none><block_start><ellipsis><block_end><def_stmt>func4 a:int b:str / c:str<arrow><none><block_start><ellipsis><block_end>v1=func1(func2)<line_sep>reveal_type(v1 expected_text="(b: str, c: str) -> None")<line_sep>v2=func1(func3)<line_sep>reveal_type(v2 expected_text="(b: str, c: str) -> None")<line_sep>v3=func1(func4)<line_sep>reveal_type(v3 expected_text="(b: str, /, c: str) -> None")<def_stmt>func5 __fn:Callable[P R] *args:P.args **kwargs:P.kwargs<arrow>R<block_start><ellipsis><block_end><def_stmt>func6 name:str *args:str<block_start><ellipsis><block_end>v5=func5(func6 "a" "b" "c")<line_sep># This should generate an error because 1 isn't assignable to str. v6=func5(func6 "a" "b" "c" 1)<def_stmt>func7 name:str **kwargs:str<block_start><ellipsis><block_end>v7=func5(func7 "a" b="b" c="c")<line_sep># This should generate an error because 1 isn't assignable to str. v8=func5(func7 "a" b="b" c=1)<line_sep>
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-11-04 18:42 <import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("core" "0014_auto_20180511_2122")]<line_sep>operations=[migrations.AddField(model_name="job" name="ad_interested" field=models.BooleanField(default=<false> verbose_name="Interessado em ficar em destaque no PyJobs?" ) )]<block_end>
# Generated by Django 3.0.7 on 2021-02-05 09:15 <import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_stmt>django.utils.timezone<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('panel' '0003_auto_20210205_0955') ]<line_sep>operations=[migrations.CreateModel(name='Log' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('status' models.CharField(max_length=4)) ('time' models.DateTimeField(default=django.utils.timezone.now)) ('monitor_object' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='panel.MonitorObject')) ] ) ]<block_end>
""" pkg.init """<line_sep>
<import_stmt>os<import_stmt>fnmatch<def_stmt>finder path ext<block_start>"""Returns files from path by extension"""<line_sep>l=[]<if_stmt><not>ext.startswith('*.')<block_start>ext='*.{0}'.format(ext)<block_end><for_stmt>path,dirs,files os.walk(os.path.abspath(path))<block_start><for_stmt>f fnmatch.filter(files ext)<block_start>l.append(os.path.join(path f))<block_end><block_end><return>l<block_end>
"""This module implements the Coordinate entity"""<line_sep># Copyright (C) 2021-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # <import_from_stmt>typing Tuple<class_stmt>Coordinate<block_start>""" Represents a 2D-coordinate with an x-position and a y-position. NB most coordinates are normalized (between 0.0 and 1.0) :param x: x-coordinate :param y: y-coordinate """<line_sep>__slots__=["x" "y"]<def_stmt>__init__ self x:float y:float<block_start>self.x=x<line_sep>self.y=y<block_end><def_stmt>__repr__ self<block_start><return>f"Coordinate(x={self.x}, y={self.y})"<block_end><def_stmt>__eq__ self other<block_start><return>self.x<eq>other.x<and>self.y<eq>other.y<block_end><def_stmt>__hash__ self<block_start><return>hash(str(self))<block_end><def_stmt>as_tuple self<arrow>Tuple[float float]<block_start>""" Convert the coordinates to a pair (x,y) """<line_sep><return>self.x self.y<block_end><def_stmt>as_int_tuple self<arrow>Tuple[int int]<block_start>""" Convert the coordinates to a pair of integer coordinates (x,y) """<line_sep><return>int(self.x) int(self.y)<block_end><block_end>
# encoding: utf-8 # author: BrikerMan # contact: <EMAIL> # blog: https://eliyar.biz # file: __init__.py # time: 7:39 ไธ‹ๅˆ <import_from_stmt>typing Dict Any<import_from_stmt>tensorflow keras<import_from_stmt>.conditional_random_field KConditionalRandomField<import_from_stmt>.behdanau_attention BahdanauAttention# type: ignore L=keras.layers<line_sep>L.BahdanauAttention=BahdanauAttention<line_sep>L.KConditionalRandomField=KConditionalRandomField<def_stmt>resigter_custom_layers custom_objects:Dict[str Any]<arrow>Dict[str Any]<block_start>custom_objects['KConditionalRandomField']=KConditionalRandomField<line_sep>custom_objects['BahdanauAttention']=BahdanauAttention<line_sep><return>custom_objects<block_end><if_stmt>__name__<eq>"__main__"<block_start><pass><block_end>
<class_stmt>Solution# Maximum Size Subarray Sum Equals k <block_start><def_stmt>maxSubArrayLen self nums:List[int] k:int<arrow>int<block_start>hm={0:-1}<line_sep>ps=0<line_sep>rtn=0<for_stmt>i range(len(nums))<block_start>ps<augadd>nums[i]<if_stmt>ps<not><in>hm<block_start>hm[ps]=i<block_end><if_stmt>ps-k<in>hm<block_start>rtn=max(rtn i-hm[ps-k])<block_end><block_end><return>rtn<block_end><def_stmt>maxSubArrayLen self nums:List[int] k:int<arrow>int<block_start>hm={0:-1}<line_sep>ps=rtn=0<for_stmt>i,n enumerate(nums)<block_start>ps<augadd>n<if_stmt>ps<not><in>hm<block_start>hm[ps]=i<block_end><if_stmt>ps-k<in>hm<block_start>rtn=max(rtn i-hm[ps-k])<block_end><block_end><return>rtn<block_end><block_end>
<import_from_stmt>..fil_PH Provider<as>FilPhProvider<class_stmt>Provider(FilPhProvider)<block_start>"""Implement lorem provider for ``tl_PH`` locale. There is no difference from the |FilPhLoremProvider|. .. |FilPhLoremProvider| replace:: :meth:`FilPhLoremProvider <faker.providers.lorem.fil_PH.Provider>` """<line_sep><pass><block_end>
<import_from_stmt>.retinanet build_retinanet<line_sep>
"""TrackML scoring metric"""<line_sep>__authors__=['<NAME>' '<NAME>' '<NAME>' '<NAME>']<import_stmt>numpy<import_stmt>pandas<def_stmt>_analyze_tracks truth submission<block_start>"""Compute the majority particle, hit counts, and weight for each track. Parameters ---------- truth : pandas.DataFrame Truth information. Must have hit_id, particle_id, and weight columns. submission : pandas.DataFrame Proposed hit/track association. Must have hit_id and track_id columns. Returns ------- pandas.DataFrame Contains track_id, nhits, major_particle_id, major_particle_nhits, major_nhits, and major_weight columns. """<line_sep># true number of hits for each particle_id particles_nhits=truth['particle_id'].value_counts(sort=<false>)<line_sep>total_weight=truth['weight'].sum()<line_sep># combined event with minimal reconstructed and truth information event=pandas.merge(truth[['hit_id' 'particle_id' 'weight']] submission[['hit_id' 'track_id']] on=['hit_id'] how='left' validate='one_to_one')<line_sep>event.drop('hit_id' axis=1 inplace=<true>)<line_sep>event.sort_values(by=['track_id' 'particle_id'] inplace=<true>)<line_sep># ASSUMPTIONs: 0 <= track_id, 0 <= particle_id tracks=[]<line_sep># running sum for the reconstructed track we are currently in rec_track_id=-1<line_sep>rec_nhits=0<line_sep># running sum for the particle we are currently in (in this track_id) cur_particle_id=-1<line_sep>cur_nhits=0<line_sep>cur_weight=0<line_sep># majority particle with most hits up to now (in this track_id) maj_particle_id=-1<line_sep>maj_nhits=0<line_sep>maj_weight=0<for_stmt>hit event.itertuples(index=<false>)# we reached the next track so we need to finish the current one <block_start><if_stmt>(rec_track_id<ne>-1)<and>(rec_track_id<ne>hit.track_id)# could be that the current particle is the majority one <block_start><if_stmt>maj_nhits<l>cur_nhits<block_start>maj_particle_id=cur_particle_id<line_sep>maj_nhits=cur_nhits<line_sep>maj_weight=cur_weight<block_end># store values for this track tracks.append((rec_track_id rec_nhits maj_particle_id particles_nhits[maj_particle_id] maj_nhits maj_weight/total_weight))<block_end># setup running values for next track (or first) <if_stmt>rec_track_id<ne>hit.track_id<block_start>rec_track_id=hit.track_id<line_sep>rec_nhits=1<line_sep>cur_particle_id=hit.particle_id<line_sep>cur_nhits=1<line_sep>cur_weight=hit.weight<line_sep>maj_particle_id=-1<line_sep>maj_nhits=0<line_sep>maj_weights=0<line_sep><continue><block_end># hit is part of the current reconstructed track rec_nhits<augadd>1<line_sep># reached new particle within the same reconstructed track <if_stmt>cur_particle_id<ne>hit.particle_id# check if last particle has more hits than the majority one # if yes, set the last particle as the new majority particle <block_start><if_stmt>maj_nhits<l>cur_nhits<block_start>maj_particle_id=cur_particle_id<line_sep>maj_nhits=cur_nhits<line_sep>maj_weight=cur_weight<block_end># reset runnig values for current particle cur_particle_id=hit.particle_id<line_sep>cur_nhits=1<line_sep>cur_weight=hit.weight<block_end># hit belongs to the same particle within the same reconstructed track <else_stmt><block_start>cur_nhits<augadd>1<line_sep>cur_weight<augadd>hit.weight<block_end><block_end># last track is not handled inside the loop <if_stmt>maj_nhits<l>cur_nhits<block_start>maj_particle_id=cur_particle_id<line_sep>maj_nhits=cur_nhits<line_sep>maj_weight=cur_weight<block_end># store values for the last track tracks.append((rec_track_id rec_nhits maj_particle_id particles_nhits[maj_particle_id] maj_nhits maj_weight/total_weight))<line_sep>cols=['track_id' 'nhits' 'major_particle_id' 'major_particle_nhits' 'major_nhits' 'major_weight']<line_sep><return>pandas.DataFrame.from_records(tracks columns=cols)<block_end><def_stmt>score_event truth submission<block_start>"""Compute the TrackML event score for a single event. Parameters ---------- truth : pandas.DataFrame Truth information. Must have hit_id, particle_id, and weight columns. submission : pandas.DataFrame Proposed hit/track association. Must have hit_id and track_id columns. """<line_sep>tracks=_analyze_tracks(truth submission)<line_sep>purity_rec=numpy.true_divide(tracks['major_nhits'] tracks['nhits'])<line_sep>purity_maj=numpy.true_divide(tracks['major_nhits'] tracks['major_particle_nhits'])<line_sep>good_track=(0.5<l>purity_rec)&(0.5<l>purity_maj)<line_sep><return>tracks['major_weight'][good_track].sum()<block_end>
<import_from_stmt>spacy.lang.zh Chinese<import_from_stmt>spacy.tokens Token<line_sep>nlp=Chinese()<line_sep># ๆณจๅ†Œ่ฏ็ฌฆ็š„ๆ‰ฉๅฑ•ๅฑžๆ€ง"is_country"๏ผŒๅ…ถ้ป˜่ฎคๅ€ผๆ˜ฏFalse Token.set_extension("is_country" default=<false>)<line_sep># ๅค„็†ๆ–‡ๆœฌ๏ผŒๅฐ†่ฏ็ฌฆ"ๆ–ฐๅŠ ๅก"็š„is_countryๅฑžๆ€ง่ฎพ็ฝฎไธบTrue doc=nlp("ๆˆ‘ไฝๅœจๆ–ฐๅŠ ๅกใ€‚")<line_sep>doc[3]._.is_country=<true><line_sep># ๅฏนๆ‰€ๆœ‰่ฏ็ฌฆๆ‰“ๅฐ่ฏ็ฌฆๆ–‡ๆœฌๅŠis_countryๅฑžๆ€ง print([(token.text token._.is_country)<for>token doc])<line_sep>
<import_from_stmt>disco.job SimpleJob<class_stmt>SimpleJob(SimpleJob)<block_start><def_stmt>map self worker task **jobargs<block_start>worker.output(task partition=<none>).file.append('hello world!')<block_end><def_stmt>reduce self worker task **jobargs<block_start>worker.output(task partition=<none>).file.append('goodbye world!')<block_end><block_end>
CACHE_EXCLUDED_SHIPPING_KEY="webhook_exclude_shipping_id_"<line_sep>CACHE_EXCLUDED_SHIPPING_TIME=60<times>3<line_sep>EXCLUDED_SHIPPING_REQUEST_TIMEOUT=2<line_sep>
<import_from_stmt>.module *<import_from_stmt>.util *<line_sep>
queuemanager=<none><line_sep>localfiles=<none><line_sep>
""" Note: This is an extension of House Robber. After robbing those houses on that street, the thief has found himself a new place for his thievery so that he will not get too much attention. This time, all houses at this place are arranged in a circle. That means the first house is the neighbor of the last one. Meanwhile, the security system for these houses remain the same as for those in the previous street. Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of money you can rob tonight without alerting the police. """<class_stmt>Solution(object)<block_start><def_stmt>rob self nums<block_start>""" :type nums: List[int] :rtype: int """<line_sep>n=len(nums)<if_stmt>n<eq>0<block_start><return>0<block_end><elif_stmt>n<eq>1<block_start><return>nums[0]<block_end><return>max(self.rob_aux(nums 0) self.rob_aux(nums 1))<block_end><def_stmt>rob_aux self nums left<block_start>n=len(nums)-1<line_sep>t=[0<for>i range(n+1)]<if_stmt>n<eq>0<block_start><return>t[n]<block_end>t[1]=nums[left]<if_stmt>n<le>1<block_start><return>t[n]<block_end>t[2]=max(nums[left:left+2])<for_stmt>i range(3 n+1)<block_start>t[i]=max(t[i-2]+nums[left+i-1] t[i-1])<block_end><return>t[n]<block_end><block_end>a1=[1]<line_sep>a2=[4 1 6 10 5 13 2 7]<line_sep>s=Solution()<line_sep>print(s.rob(a1))<line_sep>print(s.rob(a2))<line_sep>
# -*- coding: utf-8 -*- """ rest module. ========= Provides: 1. Asynchronous execution of JSON services How to use the documentation ---------------------------- Documentation is available in one form: docstrings provided with the code Copyright (c) 2016, <NAME>. MIT, see LICENSE for more details. """<import_from_stmt>. term_rest<line_sep>term_rest<line_sep>
<import_stmt>os<import_stmt>pandas<as>pd<import_from_stmt>os2d.utils.logger extract_value_from_os2d_binary_log mAP_percent_to_points<if_stmt>__name__<eq>"__main__"<block_start>config_path=os.path.dirname(os.path.abspath(__file__))<line_sep>config_job_name="exp2"<line_sep>log_path=os.path.abspath(os.path.join(config_path ".." "output/exp2"))<def_stmt>get_result job_type # "v1" or "v2" sub_index backbone_arch init_model_nickname random_seed <block_start>job_name=f"{config_job_name}.{sub_index}.{job_type}_seed{random_seed}"<line_sep>log_folder=job_name+"_"+backbone_arch+"_init_"+init_model_nickname<line_sep>log_folder=os.path.join(log_path log_folder)<line_sep>data_file=os.path.join(log_folder "train_log.pkl")<line_sep><return>mAP_percent_to_points(extract_value_from_os2d_binary_log(data_file "[email protected]_grozi-val-new-cl" reduce="max")) mAP_percent_to_points(extract_value_from_os2d_binary_log(data_file "[email protected]_grozi-val-new-cl" reduce="first"))<block_end>table=pd.DataFrame(columns=["arch" "init" "v1-train" "v2-init" "v2-train"])<line_sep>random_seed=0<for_stmt>i,arch,init zip(range(10) ["ResNet50"]<times>5+["ResNet101"]<times>5 ["fromScratch" "imageNetPth" "imageNetCaffe2" "imageNetCaffe2GroupNorm" "cocoMaskrcnnFpn" "imageNetPth" "imageNetCaffe2" "buildingsCirtorch" "cocoMaskrcnnFpn" "pascalWeakalign"])<block_start>val_train_v1,val_init_v1=get_result("v1" i arch init random_seed)<line_sep>val_train_v2,val_init_v2=get_result("v2" i arch init random_seed)<line_sep>table=table.append({"arch":arch "init":init "v1-train":val_train_v1 "v2-init":val_init_v2 "v2-train":val_train_v2} ignore_index=<true>)<block_end>print(table sep='\n')<block_end>
<import_from_stmt>flask Flask<import_from_stmt>flask jsonify<import_from_stmt>flask request<import_from_stmt>flask_cors CORS<import_from_stmt>raven.contrib.flask Sentry<import_from_stmt>orion.context Context<import_from_stmt>orion.handlers handler_classes<def_stmt>init_app app<block_start>""" Statefully initialize the Flask application. This involves creating a sever-side application context and adding route definitions for all endpoint handlers. :param app: Uninitialized Flask application instance. :return: Server-side application context. """<line_sep>ctx=Context(app)<line_sep>CORS(app supports_credentials=<true> origins=[ctx.config.get_value('frontend_url')])<line_sep>sentry_dsn=ctx.config.get_value('sentry_dsn')<if_stmt>sentry_dsn<block_start>Sentry(dsn=sentry_dsn).init_app(app)<block_end><def_stmt>map_handler_func HandlerClass<block_start>""" Create all necessary params for adding this route to the Flask server. :param HandlerClass: Handler class to prepare. :return: A tuple of (path, name, view_func, methods) for this handler. """<def_stmt>handler_wrapper *args **kwargs# Provide an abstraction for supplying the handler with request JSON. <block_start>data=request.get_json(force=<true> silent=<true>)<or>{}<line_sep>handler=HandlerClass(ctx data)<line_sep>resp_json,status=handler.run(*args **kwargs)<line_sep><return>jsonify(resp_json) status<block_end><return>HandlerClass.path HandlerClass.__name__ handler_wrapper HandlerClass.methods<block_end><for_stmt>rule,endpoint,view_func,methods map(map_handler_func handler_classes)<block_start>app.add_url_rule(rule=rule endpoint=endpoint view_func=view_func methods=methods )<block_end><return>ctx<block_end><def_stmt>create_app <block_start>""" Create a fully initialized Flask application instance for this server. :return: The initialized Flask application instance. """<line_sep>app=Flask('orion')<line_sep>ctx=init_app(app)<line_sep>app.ctx=ctx<line_sep><return>app<block_end>
<import_from_stmt>typing Optional<import_stmt>graphene<import_from_stmt>django.core.exceptions ValidationError<import_from_stmt>....giftcard.utils order_has_gift_card_lines<import_from_stmt>....order FulfillmentLineData<import_from_stmt>....order models<as>order_models<import_from_stmt>....order.error_codes OrderErrorCode<import_from_stmt>....order.fetch OrderLineInfo<import_from_stmt>....payment.models TransactionItem<import_from_stmt>...core.mutations BaseMutation<import_from_stmt>..types FulfillmentLine OrderLine<class_stmt>FulfillmentRefundAndReturnProductBase(BaseMutation)<block_start><class_stmt>Meta<block_start>abstract=<true><block_end>@classmethod<def_stmt>clean_order_payment cls payment cleaned_input<block_start><if_stmt><not>payment<or><not>payment.can_refund()<block_start><raise>ValidationError({"order":ValidationError("Order cannot be refunded." code=OrderErrorCode.CANNOT_REFUND.value )})<block_end>cleaned_input["payment"]=payment<block_end>@classmethod<def_stmt>clean_amount_to_refund cls order amount_to_refund charged_value cleaned_input<block_start><if_stmt>amount_to_refund<is><not><none><block_start><if_stmt>order_has_gift_card_lines(order)<block_start><raise>ValidationError({"amount_to_refund":ValidationError(("Cannot specified amount to refund when order has "<concat>"gift card lines.") code=OrderErrorCode.CANNOT_REFUND.value )})<block_end><if_stmt>amount_to_refund<g>charged_value<block_start><raise>ValidationError({"amount_to_refund":ValidationError(("The amountToRefund is greater than the maximal "<concat>"possible amount to refund.") code=OrderErrorCode.CANNOT_REFUND.value ) })<block_end><block_end>cleaned_input["amount_to_refund"]=amount_to_refund<block_end>@classmethod<def_stmt>_raise_error_for_line cls msg type line_id field_name code=<none><block_start>line_global_id=graphene.Node.to_global_id(type line_id)<if_stmt><not>code<block_start>code=OrderErrorCode.INVALID_QUANTITY.value<block_end><raise>ValidationError({field_name:ValidationError(msg code=code params={field_name:line_global_id} )})<block_end>@classmethod<def_stmt>raise_error_for_payment_error cls transactions:Optional[TransactionItem]<block_start><if_stmt>transactions<block_start>code=OrderErrorCode.MISSING_TRANSACTION_ACTION_REQUEST_WEBHOOK.value<line_sep>msg="No app or plugin is configured to handle payment action requests."<block_end><else_stmt><block_start>msg="The refund operation is not available yet."<line_sep>code=OrderErrorCode.CANNOT_REFUND.value<block_end><raise>ValidationError(msg code=code )<block_end>@classmethod<def_stmt>clean_fulfillment_lines cls fulfillment_lines_data cleaned_input whitelisted_statuses<block_start>fulfillment_lines=cls.get_nodes_or_error([line["fulfillment_line_id"]<for>line fulfillment_lines_data] field="fulfillment_lines" only_type=FulfillmentLine qs=order_models.FulfillmentLine.objects.prefetch_related("fulfillment" "order_line") )<line_sep>fulfillment_lines=list(fulfillment_lines)<line_sep>cleaned_fulfillment_lines=[]<for_stmt>line,line_data zip(fulfillment_lines fulfillment_lines_data)<block_start>quantity=line_data["quantity"]<if_stmt>line.order_line.is_gift_card<block_start>cls._raise_error_for_line("Cannot refund or return gift card line." "FulfillmentLine" line.pk "fulfillment_line_id" OrderErrorCode.GIFT_CARD_LINE.value )<block_end><if_stmt>line.quantity<l>quantity<block_start>cls._raise_error_for_line("Provided quantity is bigger than quantity from "<concat>"fulfillment line" "FulfillmentLine" line.pk "fulfillment_line_id" )<block_end><if_stmt>line.fulfillment.status<not><in>whitelisted_statuses<block_start>allowed_statuses_str=", ".join(whitelisted_statuses)<line_sep>cls._raise_error_for_line(f"Unable to process action for fulfillmentLine with different "<concat>f"status than {allowed_statuses_str}." "FulfillmentLine" line.pk "fulfillment_line_id" code=OrderErrorCode.INVALID.value )<block_end>replace=line_data.get("replace" <false>)<if_stmt>replace<and><not>line.order_line.variant_id<block_start>cls._raise_error_for_line("Unable to replace line as the assigned product doesn't exist." "OrderLine" line.pk "order_line_id" )<block_end>cleaned_fulfillment_lines.append(FulfillmentLineData(line=line quantity=quantity replace=replace ))<block_end>cleaned_input["fulfillment_lines"]=cleaned_fulfillment_lines<block_end>@classmethod<def_stmt>clean_lines cls lines_data cleaned_input<block_start>order_lines=cls.get_nodes_or_error([line["order_line_id"]<for>line lines_data] field="order_lines" only_type=OrderLine qs=order_models.OrderLine.objects.prefetch_related("fulfillment_lines__fulfillment" "variant" "allocations") )<line_sep>order_lines=list(order_lines)<line_sep>cleaned_order_lines=[]<for_stmt>line,line_data zip(order_lines lines_data)<block_start>quantity=line_data["quantity"]<if_stmt>line.is_gift_card<block_start>cls._raise_error_for_line("Cannot refund or return gift card line." "OrderLine" line.pk "order_line_id" OrderErrorCode.GIFT_CARD_LINE.value )<block_end><if_stmt>line.quantity<l>quantity<block_start>cls._raise_error_for_line("Provided quantity is bigger than quantity from order line." "OrderLine" line.pk "order_line_id" )<block_end>quantity_ready_to_move=line.quantity_unfulfilled<if_stmt>quantity_ready_to_move<l>quantity<block_start>cls._raise_error_for_line("Provided quantity is bigger than unfulfilled quantity." "OrderLine" line.pk "order_line_id" )<block_end>variant=line.variant<line_sep>replace=line_data.get("replace" <false>)<if_stmt>replace<and><not>line.variant_id<block_start>cls._raise_error_for_line("Unable to replace line as the assigned product doesn't exist." "OrderLine" line.pk "order_line_id" )<block_end>cleaned_order_lines.append(OrderLineInfo(line=line quantity=quantity variant=variant replace=replace))<block_end>cleaned_input["order_lines"]=cleaned_order_lines<block_end><block_end>
<import_from_stmt>tuyaha.devices.base TuyaDevice<class_stmt>TuyaSwitch(TuyaDevice)<block_start><def_stmt>turn_on self<block_start><if_stmt>self._control_device("turnOnOff" {"value":"1"})<block_start>self._update_data("state" <true>)<block_end><block_end><def_stmt>turn_off self<block_start><if_stmt>self._control_device("turnOnOff" {"value":"0"})<block_start>self._update_data("state" <false>)<block_end><block_end><def_stmt>update self use_discovery=<true><block_start><return>self._update(use_discovery=<true>)<block_end><block_end>
# Copyright (c) 2015, <NAME> # All rights reserved. <import_stmt>os<import_stmt>re<def_stmt>get osx_version<block_start>dev_dir=re.sub(r'\.' '_' osx_version)<line_sep>dev_dir='OSX_{}_DEVELOPER_DIR'.format(dev_dir)<line_sep><return>os.getenv(dev_dir)<block_end>
<import_from_stmt>libsaas http parsers<import_from_stmt>libsaas.services base<import_from_stmt>. resource flags<class_stmt>CommentsBase(resource.UserVoiceTextResource)<block_start>path='comments'<def_stmt>wrap_object self name<block_start><return>{'comment':{'text':name}}<block_end><block_end><class_stmt>Comments(CommentsBase)<block_start><def_stmt>create self obj<block_start><raise>base.MethodNotSupported()<block_end><block_end><class_stmt>ForumSuggestionComment(CommentsBase)<block_start>@base.resource(flags.SuggestionCommentFlags)<def_stmt>flags self<block_start>""" Return the resource corresponding to all the flags of this comment. """<line_sep><return>flags.SuggestionCommentFlags(self)<block_end><block_end><class_stmt>ForumSuggestionComments(CommentsBase)<block_start>@base.apimethod<def_stmt>get self page=<none> per_page=<none> filter=<none> sort=<none><block_start>""" Fetch comments on this suggestion. :var page: Where should paging start. If left as `None`, the first page is returned. :vartype page: int :var per_page: How many objects sould be returned. If left as `None`, 10 objects are returned. :vartype per_page: int :var filter: The kind of comments to return, see upstream documentation for possible values. :vartype filter: str :var sort: How should the returned collection be sorted. Refer to upstream documentation for possible values. :vartype sort: str """<line_sep>params=base.get_params(<none> locals())<line_sep>request=http.Request('GET' self.get_url() params)<line_sep><return>request parsers.parse_json<block_end><block_end><class_stmt>UserComments(CommentsBase)<block_start><def_stmt>create self obj<block_start><raise>base.MethodNotSupported()<block_end>@base.apimethod<def_stmt>get self page=<none> per_page=<none> filter=<none> sort=<none><block_start>""" Fetch comments from this user. :var page: Where should paging start. If left as `None`, the first page is returned. :vartype page: int :var per_page: How many objects sould be returned. If left as `None`, 10 objects are returned. :vartype per_page: int :var filter: The kind of comments to return, see upstream documentation for possible values. :vartype filter: str :var sort: How should the returned collection be sorted. Refer to upstream documentation for possible values. :vartype sort: str """<line_sep>params=base.get_params(<none> locals())<line_sep>request=http.Request('GET' self.get_url() params)<line_sep><return>request parsers.parse_json<block_end><block_end>
<import_from_stmt>celery.result AsyncResult<import_from_stmt>rest_framework.permissions IsAuthenticated<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.views APIView<class_stmt>TaskStatus(APIView)<block_start>permission_classes=(IsAuthenticated )<def_stmt>get self request *args **kwargs<block_start>task=AsyncResult(kwargs['task_id'])<line_sep>ready=task.ready()<line_sep>error=ready<and><not>task.successful()<line_sep><return>Response({'ready':ready 'result':task.result<if>ready<and><not>error<else><none> 'error':{'text':str(task.result)}<if>error<else><none> })<block_end><block_end>
<import_from_stmt>gpiozero LEDBarGraph CPUTemperature<import_from_stmt>signal pause<line_sep>cpu=CPUTemperature(min_temp=50 max_temp=90)<line_sep>leds=LEDBarGraph(2 3 4 5 6 7 8 pwm=<true>)<line_sep>leds.source=cpu<line_sep>pause()<line_sep>
""" Adapted from: https://github.com/facebookresearch/SlowFast """<import_stmt>math<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch Tensor<import_from_stmt>.head_helper X3DHead<import_from_stmt>.resnet_helper ResStage<import_from_stmt>.stem_helper VideoModelStem<import_stmt>pytorch_lightning<as>pl<class_stmt>X3D(pl.LightningModule)<block_start>""" X3D model, adapted from https://github.com/facebookresearch/SlowFast <NAME>. "X3D: Expanding Architectures for Efficient Video Recognition." https://arxiv.org/abs/2004.04730 """<def_stmt>__init__ self dim_in:int image_size:int frames_per_clip:int num_classes:int conv1_dim:int conv5_dim:int num_groups:int width_per_group:int width_factor:float depth_factor:float bottleneck_factor:float use_channelwise_3x3x3:bool dropout_rate:float head_activation:str head_batchnorm:bool fc_std_init:float final_batchnorm_zero_init:bool loss_name="cross_entropy" <block_start>super().__init__()<line_sep>self.norm_module=torch.nn.BatchNorm3d<line_sep>self.loss_name=loss_name<line_sep>exp_stage=2.0<line_sep>self.dim_conv1=conv1_dim<line_sep>self.dim_res2=(_round_width(self.dim_conv1 exp_stage divisor=8)<if><false># hparams.X3D.SCALE_RES2 <else>self.dim_conv1)<line_sep>self.dim_res3=_round_width(self.dim_res2 exp_stage divisor=8)<line_sep>self.dim_res4=_round_width(self.dim_res3 exp_stage divisor=8)<line_sep>self.dim_res5=_round_width(self.dim_res4 exp_stage divisor=8)<line_sep>self.block_basis=[# blocks, c, stride [1 self.dim_res2 2] [2 self.dim_res3 2] [5 self.dim_res4 2] [3 self.dim_res5 2] ]<line_sep>num_groups=num_groups<line_sep>width_per_group=width_per_group<line_sep>dim_inner=num_groups<times>width_per_group<line_sep>w_mul=width_factor<line_sep>d_mul=depth_factor<line_sep>dim_res1=_round_width(self.dim_conv1 w_mul)<line_sep># Basis of temporal kernel sizes for each of the stage. temp_kernel=[[[5]] # conv1 temporal kernels. [[3]] # res2 temporal kernels. [[3]] # res3 temporal kernels. [[3]] # res4 temporal kernels. [[3]] # res5 temporal kernels. ]<line_sep>self.s1=VideoModelStem(dim_in=[dim_in] dim_out=[dim_res1] kernel=[temp_kernel[0][0]+[3 3]] stride=[[1 2 2]] padding=[[temp_kernel[0][0][0]<floordiv>2 1 1]] norm_module=self.norm_module stem_func_name="x3d_stem" )<line_sep># blob_in = s1 dim_in=dim_res1<line_sep>dim_out=dim_in<for_stmt>stage,block enumerate(self.block_basis)<block_start>dim_out=_round_width(block[1] w_mul)<line_sep>dim_inner=int(bottleneck_factor<times>dim_out)<line_sep>n_rep=_round_repeats(block[0] d_mul)<line_sep>prefix="s{}".format(stage+2)# start w res2 to follow convention s=ResStage(dim_in=[dim_in] dim_out=[dim_out] dim_inner=[dim_inner] temp_kernel_sizes=temp_kernel[1] stride=[block[2]] num_blocks=[n_rep] num_groups=[dim_inner]<if>use_channelwise_3x3x3<else>[num_groups] num_block_temp_kernel=[n_rep] nonlocal_inds=[[]] nonlocal_group=[1] nonlocal_pool=[[1 2 2] [1 2 2]] instantiation="dot_product" trans_func_name="x3d_transform" stride_1x1=<false> norm_module=self.norm_module dilation=[1] drop_connect_rate=0.0 )<line_sep>dim_in=dim_out<line_sep>self.add_module(prefix s)<block_end>spat_sz=int(math.ceil(image_size/32.0))<line_sep>self.head=X3DHead(dim_in=dim_out dim_inner=dim_inner dim_out=conv5_dim num_classes=num_classes pool_size=(frames_per_clip spat_sz spat_sz) dropout_rate=dropout_rate act_func=head_activation bn_lin5_on=bool(head_batchnorm) )<line_sep>init_weights(self fc_std_init bool(final_batchnorm_zero_init))<block_end><def_stmt>forward self x:Tensor# The original slowfast code was set up to use multiple paths, wrap the input <block_start>x=[x]# type:ignore <for_stmt>module self.children()<block_start>x=module(x)<block_end><return>x<block_end><def_stmt>training_step self batch batch_idx<block_start>x,y=batch<line_sep>x=self.forward(x)<line_sep>loss=getattr(F self.loss_name F.cross_entropy)(x y)<line_sep>self.log('train/loss' loss)<line_sep>self.log('train/acc' _accuracy(x y))<line_sep><return>loss<block_end><def_stmt>validation_step self batch batch_idx<block_start>x,y=batch<line_sep>x=self.forward(x)<line_sep>loss=getattr(F self.loss_name F.cross_entropy)(x y)<line_sep>self.log('val/loss' loss)<line_sep>self.log('val/acc' _accuracy(x y))<line_sep><return>loss<block_end><def_stmt>test_step self batch batch_idx<block_start>x,y=batch<line_sep>x=self.forward(x)<line_sep>loss=getattr(F self.loss_name F.cross_entropy)(x y)<line_sep>self.log('test/loss' loss)<line_sep>self.log('test/acc' _accuracy(x y))<line_sep><return>loss<block_end><block_end><def_stmt>_accuracy x:Tensor y:Tensor<block_start><return>torch.sum(x.argmax(dim=1)<eq>y)/len(y)<block_end><def_stmt>_round_width width multiplier min_depth=8 divisor=8<block_start>"""Round width of filters based on width multiplier."""<if_stmt><not>multiplier<block_start><return>width<block_end>width<augmul>multiplier<line_sep>min_depth=min_depth<or>divisor<line_sep>new_filters=max(min_depth int(width+divisor/2)<floordiv>divisor<times>divisor)<if_stmt>new_filters<l>0.9<times>width<block_start>new_filters<augadd>divisor<block_end><return>int(new_filters)<block_end><def_stmt>_round_repeats repeats multiplier<block_start>"""Round number of layers based on depth multiplier."""<line_sep>multiplier=multiplier<if_stmt><not>multiplier<block_start><return>repeats<block_end><return>int(math.ceil(multiplier<times>repeats))<block_end><def_stmt>c2_msra_fill module:nn.Module<arrow><none><block_start>""" Initialize `module.weight` using the "MSRAFill" implemented in Caffe2. Also initializes `module.bias` to 0. Args: module (torch.nn.Module): module to initialize. """<line_sep># pyre-ignore nn.init.kaiming_normal_(module.weight mode="fan_out" nonlinearity="relu")<if_stmt>module.bias<is><not><none># pyre-ignore <block_start>nn.init.constant_(module.bias 0)<block_end><block_end><def_stmt>init_weights model fc_init_std=0.01 zero_init_final_bn=<true><block_start>""" Performs ResNet style weight initialization. Args: fc_init_std (float): the expected standard deviation for fc layer. zero_init_final_bn (bool): if True, zero initialize the final bn for every bottleneck. """<for_stmt>m model.modules()<block_start><if_stmt>isinstance(m nn.Conv3d)<block_start>""" Follow the initialization method proposed in: {He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level performance on imagenet classification." arXiv preprint arXiv:1502.01852 (2015)} """<line_sep>c2_msra_fill(m)<block_end><elif_stmt>isinstance(m nn.BatchNorm3d)<block_start><if_stmt>(hasattr(m "transform_final_bn")<and>m.transform_final_bn<and>zero_init_final_bn)<block_start>batchnorm_weight=0.0<block_end><else_stmt><block_start>batchnorm_weight=1.0<block_end><if_stmt>m.weight<is><not><none><block_start>m.weight.data.fill_(batchnorm_weight)<block_end><if_stmt>m.bias<is><not><none><block_start>m.bias.data.zero_()<block_end><block_end><if_stmt>isinstance(m nn.Linear)<block_start>m.weight.data.normal_(mean=0.0 std=fc_init_std)<if_stmt>m.bias<is><not><none><block_start>m.bias.data.zero_()<block_end><block_end><block_end><block_end>
<import_from_stmt>arq.connections RedisSettings<import_from_stmt>.globals REDIS_IP REDIS_PORT<line_sep>settings=RedisSettings(host=REDIS_IP port=REDIS_PORT)<line_sep>
"""Migrations for Zinnia"""<line_sep>
"""Widgets Helper Library. A library of `ipywidgets` wrappers for notebook based reports and voila dashboards. The library includes both python code and html/css/js elements that can be found in the `./widgets` folder. """<import_stmt>os<import_from_stmt>jinja2 Template<def_stmt>stylesheet <block_start>"""Load a default CSS stylesheet from file."""<with_stmt>open(os.path.join(os.path.dirname(os.path.abspath(__file__)) "widgets" "style.css"))<as>f<block_start>style=f.read()<block_end><return>style<block_end><def_stmt>price_card ticker:str price:str price_color:str="neutral_color"<arrow>str<block_start>"""Prepare a styled HTML element of a 128 by 128 price card. Parameters ---------- ticker : str Instrument ticker for the price card price : str Instrument price as a string price_color : str, optional The color of the price. Accepts "up_color", "down_color" and default "neutral_color" Returns ------- str HTML code as string """<with_stmt>open(os.path.join(os.path.dirname(os.path.abspath(__file__)) "widgets" "card.j2"))<as>f<block_start>template=Template(f.read())<block_end>card=template.render(ticker=ticker price=price price_color=price_color)<line_sep><return>card<block_end>
<import_from_stmt>django.test TestCase<import_from_stmt>corehq.apps.accounting.models SoftwarePlanEdition<import_from_stmt>corehq.apps.accounting.tests.utils DomainSubscriptionMixin<import_from_stmt>corehq.apps.accounting.utils clear_plan_version_cache<import_from_stmt>corehq.apps.domain.models Domain<import_from_stmt>corehq.messaging.smsbackends.test.models SQLTestSMSBackend<import_from_stmt>corehq.apps.sms.api incoming send_sms_to_verified_number<import_from_stmt>corehq.apps.sms.messages MSG_OPTED_IN MSG_OPTED_OUT get_message<import_from_stmt>corehq.apps.sms.models SMS PhoneBlacklist PhoneNumber SQLMobileBackendMapping SQLMobileBackend<import_from_stmt>corehq.apps.sms.tests.util delete_domain_phone_numbers setup_default_sms_test_backend <import_from_stmt>corehq.form_processor.tests.utils FormProcessorTestUtils<class_stmt>OptTestCase(DomainSubscriptionMixin TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>super(OptTestCase cls).setUpClass()<line_sep>cls.domain='opt-test'<line_sep>cls.domain_obj=Domain(name=cls.domain)<line_sep>cls.domain_obj.sms_case_registration_enabled=<true><line_sep>cls.domain_obj.save()<line_sep>cls.setup_subscription(cls.domain SoftwarePlanEdition.ADVANCED)<line_sep>cls.backend,cls.backend_mapping=setup_default_sms_test_backend()<line_sep>cls.custom_backend=SQLTestSMSBackend.objects.create(name='MOBILE_BACKEND_CUSTOM_TEST' is_global=<true> hq_api_id=SQLTestSMSBackend.get_api_id() opt_in_keywords=['RESTART'] opt_out_keywords=['RESTOP'])<line_sep>cls.custom_backend_mapping=SQLMobileBackendMapping.objects.create(is_global=<true> backend_type=SQLMobileBackend.SMS prefix='1' backend=cls.custom_backend )<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>cls.backend_mapping.delete()<line_sep>cls.backend.delete()<line_sep>cls.custom_backend_mapping.delete()<line_sep>cls.custom_backend.delete()<line_sep>FormProcessorTestUtils.delete_all_cases(cls.domain)<line_sep>cls.teardown_subscriptions()<line_sep>cls.domain_obj.delete()<line_sep>clear_plan_version_cache()<line_sep>super(OptTestCase cls).tearDownClass()<block_end><def_stmt>tearDown self<block_start>PhoneBlacklist.objects.all().delete()<line_sep>SMS.objects.filter(domain=self.domain).delete()<line_sep>delete_domain_phone_numbers(self.domain)<block_end><def_stmt>get_last_sms self phone_number<block_start><return>SMS.objects.filter(domain=self.domain phone_number=phone_number).order_by('-date')[0]<block_end><def_stmt>test_opt_out_and_opt_in self<block_start>self.assertEqual(PhoneBlacklist.objects.count() 0)<line_sep>incoming('99912345678' 'join opt-test' 'GVI')<line_sep>v=PhoneNumber.get_two_way_number('99912345678')<line_sep>self.assertIsNotNone(v)<line_sep>incoming('99912345678' 'stop' 'GVI')<line_sep>self.assertEqual(PhoneBlacklist.objects.count() 1)<line_sep>phone_number=PhoneBlacklist.objects.get(phone_number='99912345678')<line_sep>self.assertFalse(phone_number.send_sms)<line_sep>self.assertEqual(phone_number.domain self.domain)<line_sep>self.assertIsNotNone(phone_number.last_sms_opt_out_timestamp)<line_sep>self.assertIsNone(phone_number.last_sms_opt_in_timestamp)<line_sep>sms=self.get_last_sms('+99912345678')<line_sep>self.assertEqual(sms.direction 'O')<line_sep>self.assertEqual(sms.text get_message(MSG_OPTED_OUT context=('START' )))<line_sep>incoming('99912345678' 'start' 'GVI')<line_sep>self.assertEqual(PhoneBlacklist.objects.count() 1)<line_sep>phone_number=PhoneBlacklist.objects.get(phone_number='99912345678')<line_sep>self.assertTrue(phone_number.send_sms)<line_sep>self.assertEqual(phone_number.domain self.domain)<line_sep>self.assertIsNotNone(phone_number.last_sms_opt_out_timestamp)<line_sep>self.assertIsNotNone(phone_number.last_sms_opt_in_timestamp)<line_sep>sms=self.get_last_sms('+99912345678')<line_sep>self.assertEqual(sms.direction 'O')<line_sep>self.assertEqual(sms.text get_message(MSG_OPTED_IN context=('STOP' )))<block_end><def_stmt>test_sending_to_opted_out_number self<block_start>self.assertEqual(PhoneBlacklist.objects.count() 0)<line_sep>incoming('99912345678' 'join opt-test' 'GVI')<line_sep>v=PhoneNumber.get_two_way_number('99912345678')<line_sep>self.assertIsNotNone(v)<line_sep>send_sms_to_verified_number(v 'hello')<line_sep>sms=self.get_last_sms('+99912345678')<line_sep>self.assertEqual(sms.direction 'O')<line_sep>self.assertEqual(sms.text 'hello')<line_sep>incoming('99912345678' 'stop' 'GVI')<line_sep>self.assertEqual(PhoneBlacklist.objects.count() 1)<line_sep>phone_number=PhoneBlacklist.objects.get(phone_number='99912345678')<line_sep>self.assertFalse(phone_number.send_sms)<line_sep>send_sms_to_verified_number(v 'hello')<line_sep>sms=self.get_last_sms('+99912345678')<line_sep>self.assertEqual(sms.direction 'O')<line_sep>self.assertEqual(sms.text 'hello')<line_sep>self.assertTrue(sms.error)<line_sep>self.assertEqual(sms.system_error_message SMS.ERROR_PHONE_NUMBER_OPTED_OUT)<line_sep>incoming('99912345678' 'start' 'GVI')<line_sep>self.assertEqual(PhoneBlacklist.objects.count() 1)<line_sep>phone_number=PhoneBlacklist.objects.get(phone_number='99912345678')<line_sep>self.assertTrue(phone_number.send_sms)<line_sep>send_sms_to_verified_number(v 'hello')<line_sep>sms=self.get_last_sms('+99912345678')<line_sep>self.assertEqual(sms.direction 'O')<line_sep>self.assertEqual(sms.text 'hello')<line_sep>self.assertFalse(sms.error)<line_sep>self.assertIsNone(sms.system_error_message)<block_end><def_stmt>test_custom_opt_keywords self<block_start>self.assertEqual(PhoneBlacklist.objects.count() 0)<line_sep>incoming('19912345678' 'join opt-test' 'TEST')<line_sep>v=PhoneNumber.get_two_way_number('19912345678')<line_sep>self.assertIsNotNone(v)<line_sep>send_sms_to_verified_number(v 'hello')<line_sep>sms=self.get_last_sms('+19912345678')<line_sep>self.assertEqual(sms.direction 'O')<line_sep>self.assertEqual(sms.text 'hello')<line_sep>incoming('19912345678' 'restop' 'TEST')<line_sep>self.assertEqual(PhoneBlacklist.objects.count() 1)<line_sep>phone_number=PhoneBlacklist.objects.get(phone_number='19912345678')<line_sep>self.assertFalse(phone_number.send_sms)<line_sep>send_sms_to_verified_number(v 'hello')<line_sep>sms=self.get_last_sms('+19912345678')<line_sep>self.assertEqual(sms.direction 'O')<line_sep>self.assertEqual(sms.text 'hello')<line_sep>self.assertTrue(sms.error)<line_sep>self.assertEqual(sms.system_error_message SMS.ERROR_PHONE_NUMBER_OPTED_OUT)<line_sep>incoming('19912345678' 'restart' 'TEST')<line_sep>self.assertEqual(PhoneBlacklist.objects.count() 1)<line_sep>phone_number=PhoneBlacklist.objects.get(phone_number='19912345678')<line_sep>self.assertTrue(phone_number.send_sms)<line_sep>send_sms_to_verified_number(v 'hello')<line_sep>sms=self.get_last_sms('+19912345678')<line_sep>self.assertEqual(sms.direction 'O')<line_sep>self.assertEqual(sms.text 'hello')<line_sep>self.assertFalse(sms.error)<line_sep>self.assertIsNone(sms.system_error_message)<block_end><block_end>
# MIT License # # Copyright (c) 2021 <NAME> and <NAME> and <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_stmt>torch.nn<as>nn<class_stmt>AddNorm(nn.Module)<block_start>""" Add & Normalization layer proposed in "Attention Is All You Need". Transformer employ a residual connection around each of the two sub-layers, (Multi-Head Attention & Feed-Forward) followed by layer normalization. """<def_stmt>__init__ self sublayer:nn.Module d_model:int=512<arrow><none><block_start>super(AddNorm self).__init__()<line_sep>self.sublayer=sublayer<line_sep>self.layer_norm=nn.LayerNorm(d_model)<block_end><def_stmt>forward self *args<block_start>residual=args[0]<line_sep>outputs=self.sublayer(*args)<if_stmt>isinstance(outputs tuple)<block_start><return>self.layer_norm(outputs[0]+residual) outputs[1]<block_end><return>self.layer_norm(outputs+residual)<block_end><block_end>
# ------------------------------------------------------------------------ # Copyright 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------ <import_from_stmt>configuration Configuration<line_sep># Default (very simple) compiler configuration <class_stmt>DefaultConfiguration(Configuration)<block_start><def_stmt>__init__ self context<block_start>Configuration.__init__(self context)<block_end># ------------------------------------------------------------ # Return test program to be used when checking for basic C++11 # support. # ------------------------------------------------------------ <def_stmt>_c99_test_program self<block_start><return>""" // Some headers found in C99. #include <stdbool.h> #include <stdint.h> int main() { struct foo { bool b; // C99 type int i; uint64_t q; // C99 type }; // Designated initializer. struct foo bar = { .b = false, .q = UINT64_MAX }; // Implicitly initialized field. return bar.i != 0; } """<block_end># -------------------------------------------------------------- # Get list of flags that could potentially enable C99 support. # # The default configuration assumes that no flag is needed to # enable C99 support. # -------------------------------------------------------------- <def_stmt>_c99_flags self<block_start><return>[]<block_end># ------------------------------------------------------------ # Return test program to be used when checking for basic C++11 # support. # ------------------------------------------------------------ <def_stmt>_cxx11_test_program self<block_start><return>""" int main() { int x = 3210; auto f = [x](){ return x; }; return f() != x; } """<block_end># -------------------------------------------------------------- # Get list of flags that could potentially enable C++11 support. # # The default configuration assumes that no flag is needed to # enable C++11 support. # -------------------------------------------------------------- <def_stmt>_cxx11_flags self<block_start><return>[]<block_end><block_end>
"""Test various api functions."""<import_stmt>unittest<import_from_stmt>unittest mock<import_stmt>time<import_from_stmt>blinkpy.helpers.util json_load Throttle time_to_seconds gen_uid<class_stmt>TestUtil(unittest.TestCase)<block_start>"""Test the helpers/util module."""<def_stmt>setUp self<block_start>"""Initialize the blink module."""<block_end><def_stmt>tearDown self<block_start>"""Tear down blink module."""<block_end><def_stmt>test_throttle self<block_start>"""Test the throttle decorator."""<line_sep>calls=[]<line_sep>@Throttle(seconds=5)<def_stmt>test_throttle <block_start>calls.append(1)<block_end>now=int(time.time())<line_sep>now_plus_four=now+4<line_sep>now_plus_six=now+6<line_sep>test_throttle()<line_sep>self.assertEqual(1 len(calls))<line_sep># Call again, still shouldn't fire test_throttle()<line_sep>self.assertEqual(1 len(calls))<line_sep># Call with force test_throttle(force=<true>)<line_sep>self.assertEqual(2 len(calls))<line_sep># Call without throttle, shouldn't fire test_throttle()<line_sep>self.assertEqual(2 len(calls))<line_sep># Fake time as 4 seconds from now <with_stmt>mock.patch("time.time" return_value=now_plus_four)<block_start>test_throttle()<block_end>self.assertEqual(2 len(calls))<line_sep># Fake time as 6 seconds from now <with_stmt>mock.patch("time.time" return_value=now_plus_six)<block_start>test_throttle()<block_end>self.assertEqual(3 len(calls))<block_end><def_stmt>test_throttle_per_instance self<block_start>"""Test that throttle is done once per instance of class."""<class_stmt>Tester<block_start>"""A tester class for throttling."""<def_stmt>test self<block_start>"""Test the throttle."""<line_sep><return><true><block_end><block_end>tester=Tester()<line_sep>throttled=Throttle(seconds=1)(tester.test)<line_sep>self.assertEqual(throttled() <true>)<line_sep>self.assertEqual(throttled() <none>)<block_end><def_stmt>test_throttle_multiple_objects self<block_start>"""Test that function is throttled even if called by multiple objects."""<line_sep>@Throttle(seconds=5)<def_stmt>test_throttle_method <block_start><return><true><block_end><class_stmt>Tester<block_start>"""A tester class for throttling."""<def_stmt>test self<block_start>"""Test function for throttle."""<line_sep><return>test_throttle_method()<block_end><block_end>tester1=Tester()<line_sep>tester2=Tester()<line_sep>self.assertEqual(tester1.test() <true>)<line_sep>self.assertEqual(tester2.test() <none>)<block_end><def_stmt>test_throttle_on_two_methods self<block_start>"""Test that throttle works for multiple methods."""<class_stmt>Tester<block_start>"""A tester class for throttling."""<line_sep>@Throttle(seconds=3)<def_stmt>test1 self<block_start>"""Test function for throttle."""<line_sep><return><true><block_end>@Throttle(seconds=5)<def_stmt>test2 self<block_start>"""Test function for throttle."""<line_sep><return><true><block_end><block_end>tester=Tester()<line_sep>now=time.time()<line_sep>now_plus_4=now+4<line_sep>now_plus_6=now+6<line_sep>self.assertEqual(tester.test1() <true>)<line_sep>self.assertEqual(tester.test2() <true>)<line_sep>self.assertEqual(tester.test1() <none>)<line_sep>self.assertEqual(tester.test2() <none>)<with_stmt>mock.patch("time.time" return_value=now_plus_4)<block_start>self.assertEqual(tester.test1() <true>)<line_sep>self.assertEqual(tester.test2() <none>)<block_end><with_stmt>mock.patch("time.time" return_value=now_plus_6)<block_start>self.assertEqual(tester.test1() <none>)<line_sep>self.assertEqual(tester.test2() <true>)<block_end><block_end><def_stmt>test_time_to_seconds self<block_start>"""Test time to seconds conversion."""<line_sep>correct_time="1970-01-01T00:00:05+00:00"<line_sep>wrong_time="1/1/1970 00:00:03"<line_sep>self.assertEqual(time_to_seconds(correct_time) 5)<line_sep>self.assertFalse(time_to_seconds(wrong_time))<block_end><def_stmt>test_json_load_bad_data self<block_start>"""Check that bad file is handled."""<line_sep>self.assertEqual(json_load("fake.file") <none>)<with_stmt>mock.patch("builtins.open" mock.mock_open(read_data=""))<block_start>self.assertEqual(json_load("fake.file") <none>)<block_end><block_end><def_stmt>test_gen_uid self<block_start>"""Test gen_uid formatting."""<line_sep>val1=gen_uid(8)<line_sep>val2=gen_uid(8 uid_format=<true>)<line_sep>self.assertEqual(len(val1) 16)<line_sep>self.assertTrue(val2.startswith("BlinkCamera_"))<line_sep>val2_cut=val2.split("_")<line_sep>val2_split=val2_cut[1].split("-")<line_sep>self.assertEqual(len(val2_split[0]) 8)<line_sep>self.assertEqual(len(val2_split[1]) 4)<line_sep>self.assertEqual(len(val2_split[2]) 4)<line_sep>self.assertEqual(len(val2_split[3]) 4)<line_sep>self.assertEqual(len(val2_split[4]) 12)<block_end><block_end>
<import_stmt>demistomock<as>demisto<import_from_stmt>CommonServerPython *<import_from_stmt>CommonServerUserPython *<line_sep>''' IMPORTS '''<import_stmt>json<import_stmt>requests<import_stmt>traceback<line_sep># Disable insecure warnings requests.packages.urllib3.disable_warnings()<line_sep>''' CONSTANTS '''<line_sep>INTEGRATION_CONTEXT_NAME='ImpervaWAF'<class_stmt>Client(BaseClient)<block_start>""" Client will implement the service API, and should not contain any Demisto logic. Should only do requests and return data. """<line_sep>session_id=''<def_stmt>do_request self method url_suffix json_data=<none><block_start><if_stmt><not>self.session_id<block_start>self.login()<block_end>res=self._http_request(method f'SecureSphere/api/v1/{url_suffix}' json_data=json_data headers={'Cookie':self.session_id} ok_codes=(200 401 406) resp_type='response')<if_stmt>res.status_code<eq>401<block_start>self.login()<line_sep>res=self._http_request(method f'SecureSphere/api/v1/{url_suffix}' json_data=json_data headers={'Cookie':self.session_id} ok_codes=(200 401 406) resp_type='response')<block_end><if_stmt>res.text<block_start>res=res.json()<block_end><else_stmt><block_start>res={}<block_end>extract_errors(res)<line_sep><return>res<block_end><def_stmt>login self<block_start>res=self._http_request('POST' 'SecureSphere/api/v1/auth/session' auth=self._auth)<line_sep>extract_errors(res)<line_sep>self.session_id=res.get('session-id')<block_end><def_stmt>get_ip_group_entities self group_name table_name<block_start>raw_res=self.do_request('GET' f'conf/ipGroups/{group_name}')<line_sep>entries=[]<for_stmt>entry raw_res.get('entries')<block_start>entries.append({'Type':entry.get('type') 'IpAddressFrom':entry.get('ipAddressFrom') 'IpAddressTo':entry.get('ipAddressTo') 'NetworkAddress':entry.get('networkAddress') 'CidrMask':entry.get('cidrMask')})<block_end>human_readable=tableToMarkdown(table_name entries removeNull=<true> headers=['Type' 'IpAddressFrom' 'IpAddressTo' 'NetworkAddress' 'CidrMask'])<line_sep>entry_context={f'{INTEGRATION_CONTEXT_NAME}.IpGroup(val.Name===obj.Name)':{'Name':group_name 'Entries':entries}}<line_sep><return>human_readable entry_context raw_res<block_end><def_stmt>get_custom_policy_outputs self policy_name table_name<block_start>raw_res=self.do_request('GET' f'conf/policies/security/webServiceCustomPolicies/{policy_name}')<line_sep>policy={'Name':policy_name 'Enabled':raw_res.get('enabled') 'OneAlertPerSession':raw_res.get('oneAlertPerSession') 'DisplayResponsePage':raw_res.get('displayResponsePage') 'Severity':raw_res.get('severity') 'Action':raw_res.get('action') 'FollowedAction':raw_res.get('followedAction') 'ApplyTo':raw_res.get('applyTo') 'MatchCriteria':raw_res.get('matchCriteria')}<line_sep>hr_policy=policy.copy()<del_stmt>hr_policy['MatchCriteria']<del_stmt>hr_policy['ApplyTo']<line_sep>human_readable=tableToMarkdown(table_name hr_policy removeNull=<true>)<if_stmt>raw_res.get('applyTo')<block_start>human_readable<augadd>'\n\n'+tableToMarkdown('Services to apply the policy to' raw_res.get('applyTo') removeNull=<true>)<block_end><for_stmt>match raw_res.get('matchCriteria' [])<block_start>tmp_match=match.copy()<line_sep>operation=match['operation']<line_sep>match_type=match['type']<line_sep># generate human readable for sourceIpAddresses type <if_stmt>match_type<eq>'sourceIpAddresses'<block_start><if_stmt>tmp_match.get('userDefined')<block_start><for_stmt>i,element enumerate(tmp_match['userDefined'])<block_start>tmp_match['userDefined'][i]={'IP Address':tmp_match['userDefined'][i]}<block_end>human_readable<augadd>'\n\n'+tableToMarkdown(f'Match operation: {operation}\n Source IP addresses:' tmp_match['userDefined'] removeNull=<true>)<block_end><if_stmt>tmp_match.get('ipGroups')<block_start><for_stmt>i,element enumerate(tmp_match['ipGroups'])<block_start>tmp_match['ipGroups'][i]={'Group name':tmp_match['ipGroups'][i]}<block_end>human_readable<augadd>'\n\n'+tableToMarkdown(f'Match operation: {operation}\n IP Groups:' tmp_match['ipGroups'] removeNull=<true>)<block_end><block_end># generate human readable for sourceGeolocation type <elif_stmt>match_type<eq>'sourceGeolocation'<block_start><if_stmt>tmp_match.get('values')<block_start><for_stmt>i,element enumerate(tmp_match['values'])<block_start>tmp_match['values'][i]={'Country name':tmp_match['values'][i]}<block_end>human_readable<augadd>'\n\n'+tableToMarkdown(f'Match operation: {operation}\n Countries to match:' tmp_match['values'] removeNull=<true>)<block_end><block_end><block_end>entry_context={f'{INTEGRATION_CONTEXT_NAME}.CustomWebPolicy(val.Name===obj.Name)':policy}<line_sep><return>human_readable entry_context raw_res<block_end><block_end><def_stmt>extract_errors res<block_start><if_stmt><not>isinstance(res list)<and>res.get('errors')<block_start>error_message=''<for_stmt>err res['errors']<block_start>error_message<augadd>f'error-code: {err.get("error-code")}, description: {err.get("description")}'<block_end><raise>Exception(error_message)<block_end><block_end><def_stmt>generate_policy_data_body args<block_start>severity=args.get('severity')<line_sep>action=args.get('action')<line_sep>followed_action=args.get('followed-action')<line_sep>body={}<if_stmt>args.get('enabled')<block_start>body['enabled']=args['enabled']<eq>'True'<block_end><if_stmt>args.get('one-alert-per-session')<block_start>body['oneAlertPerSession']=args['one-alert-per-session']<eq>'True'<block_end><if_stmt>args.get('display-response-page')<block_start>body['displayResponsePage']=args['display-response-page']<eq>'True'<block_end><if_stmt>severity<block_start>body['severity']=severity<block_end><if_stmt>action<block_start>body['action']=action<block_end><if_stmt>followed_action<block_start>body['followedAction']=followed_action<block_end><return>body<block_end><def_stmt>generate_match_criteria body args<block_start>geo_location_criteria_operation=args.get('geo-location-criteria-operation')<line_sep>ip_addresses_criteria_operation=args.get('ip-addresses-criteria-operation')<line_sep>ip_groups=args.get('ip-groups' '')<line_sep>ip_addreses=args.get('ip-addresses' '')<line_sep>country_names=args.get('country-names' '')<line_sep>match_criteria=[]<if_stmt>geo_location_criteria_operation<block_start><if_stmt><not>country_names<block_start><raise>Exception('country-names argument is empty')<block_end>geo_location_match_item={'type':'sourceGeolocation' 'operation':geo_location_criteria_operation 'values':country_names.split(',')}<line_sep>match_criteria.append(geo_location_match_item)<block_end><if_stmt>ip_addresses_criteria_operation<block_start><if_stmt><not>ip_groups<and><not>ip_addreses<block_start><raise>Exception('ip-groups and ip-addresses arguments are empty, please fill at least one of them')<block_end>ip_addresses_match_item={'type':'sourceIpAddresses' 'operation':ip_addresses_criteria_operation}<if_stmt>ip_groups<block_start>ip_addresses_match_item['ipGroups']=ip_groups.split(',')<block_end><if_stmt>ip_addreses<block_start>ip_addresses_match_item['userDefined']=ip_addreses.split(',')<block_end>match_criteria.append(ip_addresses_match_item)<block_end>body['matchCriteria']=match_criteria<line_sep><return>body<block_end><def_stmt>generate_ip_groups_entries args<block_start>entry_type=args.get('entry-type')<line_sep>ip_from=args.get('ip-address-from')<line_sep>ip_to=args.get('ip-address-to')<line_sep>network_address=args.get('network-address')<line_sep>cidr_mask=args.get('cidr-mask')<line_sep>operation=args.get('operation')<line_sep>json_entries=args.get('json-entries')<if_stmt><not>json_entries<block_start>entry={}<if_stmt>entry_type<eq>'single'<block_start>entry['ipAddressFrom']=ip_from<block_end><elif_stmt>entry_type<eq>'range'<block_start>entry['ipAddressFrom']=ip_from<line_sep>entry['ipAddressTo']=ip_to<block_end><elif_stmt>entry_type<eq>'network'<block_start>entry['networkAddress']=network_address<line_sep>entry['cidrMask']=cidr_mask<block_end><else_stmt><block_start><raise>Exception('entry-type argument is invalid')<block_end>entry['type']=entry_type<line_sep>entry['operation']=operation<line_sep>body={'entries':[entry]}<block_end><else_stmt><block_start><try_stmt><block_start>json_entries=json.loads(json_entries)<block_end><except_stmt>Exception<block_start><raise>Exception(f'Failed to parse json-entries as JSON data, '<concat>f' received object:\n{json_entries}')<block_end>body={'entries':json_entries}<block_end><return>body<block_end>@logger<def_stmt>test_module client args<block_start>raw_res=client.do_request('GET' 'conf/sites')<if_stmt>raw_res.get('sites')<block_start>demisto.results('ok')<block_end><block_end>@logger<def_stmt>ip_group_list_command client args<block_start>raw_res=client.do_request('GET' 'conf/ipGroups')<line_sep>groups=[]<if_stmt>raw_res.get('names')<block_start>groups=raw_res['names']<for_stmt>i,element enumerate(groups)<block_start>groups[i]={'Name':groups[i]}<block_end><block_end>human_readable=tableToMarkdown('IP groups' groups removeNull=<true>)<line_sep>entry_context={f'{INTEGRATION_CONTEXT_NAME}.IpGroup(val.Name===obj.Name)':groups}<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>ip_group_list_entries_command client args<block_start>group_name=args.get('ip-group-name')<line_sep>human_readable,entry_context,raw_res=client.get_ip_group_entities(group_name f'IP group entries for {group_name}')<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>ip_group_remove_entries_command client args<block_start>group_name=args.get('ip-group-name')<line_sep>raw_res=client.do_request('DELETE' f'conf/ipGroups/{group_name}/clear')<line_sep>return_outputs(f'The IP group {group_name} is now empty' {} raw_res)<block_end>@logger<def_stmt>sites_list_command client args<block_start>raw_res=client.do_request('GET' 'conf/sites')<line_sep>sites=[{'Name':site}<for>site raw_res.get('sites' [])]<line_sep>human_readable=tableToMarkdown('All sites in the system' sites removeNull=<true>)<line_sep>entry_context={f'{INTEGRATION_CONTEXT_NAME}.Site(val.Name===obj.Name)':sites}<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>server_groups_list_command client args<block_start>site=args.get('site-name')<line_sep>raw_res=client.do_request('GET' f'conf/serverGroups/{site}')<line_sep>server_groups=[]<if_stmt>raw_res.get('server-groups')<block_start>server_groups=raw_res['server-groups']<for_stmt>i,element enumerate(server_groups)<block_start>server_groups[i]={'Name':server_groups[i] 'SiteName':site}<block_end><block_end>human_readable=tableToMarkdown(f'Server groups in {site}' server_groups removeNull=<true>)<line_sep>entry_context={f'{INTEGRATION_CONTEXT_NAME}.ServerGroup(val.Name===obj.Name)':server_groups}<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>server_group_policies_list_command client args<block_start>site=args.get('site-name')<line_sep>server_group=args.get('server-group-name')<line_sep>raw_res=client.do_request('GET' f'conf/serverGroups/{site}/{server_group}/securityPolicies')<line_sep>policies=[]<for_stmt>policy raw_res<block_start>policies.append({'System':policy.get('system') 'PolicyName':policy.get('policy-name') 'PolicyType':policy.get('policy-type') 'ServerGroup':server_group 'SiteName':site})<block_end>human_readable=tableToMarkdown(f'Policies for {server_group}' policies removeNull=<true>)<line_sep>entry_context={f'{INTEGRATION_CONTEXT_NAME}.SecurityPolicy(val.PolicyName===obj.PolicyName)':policies}<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>custom_policy_list_command client args<block_start>raw_res=client.do_request('GET' 'conf/policies/security/webServiceCustomPolicies')<line_sep>policies=[]<if_stmt>raw_res.get('customWebPolicies')<block_start>policies=raw_res['customWebPolicies']<for_stmt>i,element enumerate(policies)<block_start>policies[i]={'Name':policies[i]}<block_end><block_end>human_readable=tableToMarkdown('Custom web policies' policies removeNull=<true>)<line_sep>entry_context={f'{INTEGRATION_CONTEXT_NAME}.CustomWebPolicy(val.Name===obj.Name)':policies}<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>get_custom_policy_command client args<block_start>policy_name=args.get('policy-name')<line_sep>human_readable,entry_context,raw_res=client.get_custom_policy_outputs(policy_name f'Policy data for {policy_name}')<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>create_ip_group_command client args<block_start>group_name=args.get('group-name')<line_sep>body=generate_ip_groups_entries(args)<line_sep>client.do_request('POST' f'conf/ipGroups/{group_name}' json_data=body)<line_sep>human_readable,entry_context,raw_res=client.get_ip_group_entities(group_name f'Group {group_name} created successfully')<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>update_ip_group_command client args<block_start>group_name=args.get('group-name')<line_sep>body=generate_ip_groups_entries(args)<line_sep>client.do_request('PUT' f'conf/ipGroups/{group_name}/data' json_data=body)<line_sep>human_readable,entry_context,raw_res=client.get_ip_group_entities(group_name f'Group {group_name} updated successfully')<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>delete_ip_group_command client args<block_start>group_name=args.get('group-name')<line_sep>raw_res=client.do_request('DELETE' f'conf/ipGroups/{group_name}')<line_sep>return_outputs(f'Group {group_name} deleted successfully' {} raw_res)<block_end>@logger<def_stmt>create_custom_policy_command client args<block_start>policy_name=args.get('policy-name')<line_sep>site=args.get('site-name-to-apply')<line_sep>server_group=args.get('server-group-name-to-apply')<line_sep>web_service=args.get('web-service-name-to-apply')<line_sep>match_criteria_json=args.get('match-criteria-json')<line_sep>body=generate_policy_data_body(args)<if_stmt>match_criteria_json<and><not>isinstance(match_criteria_json dict)<block_start><try_stmt><block_start>match_criteria_json=json.loads(match_criteria_json)<block_end><except_stmt>Exception<block_start><raise>Exception(f'Failed to parse match-criteria-json as JSON data,'<concat>f' received object:\n{match_criteria_json}')<block_end>body['matchCriteria']=match_criteria_json<block_end><else_stmt><block_start>body=generate_match_criteria(body args)<block_end>body['applyTo']=[{'siteName':site 'serverGroupName':server_group 'webServiceName':web_service}]<line_sep>client.do_request('POST' f'conf/policies/security/webServiceCustomPolicies/{policy_name}' json_data=body)<line_sep>human_readable,entry_context,raw_res=client.get_custom_policy_outputs(policy_name f'Policy {policy_name} created successfully')<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>update_custom_policy_command client args<block_start>policy_name=args.get('policy-name')<line_sep>site=args.get('site-name-to-apply')<line_sep>server_group=args.get('server-group-name-to-apply' '')<line_sep>web_service=args.get('web-service-name-to-apply' '')<line_sep>apply_operation=args.get('apply-operation' '')<line_sep>match_criteria_json=args.get('match-criteria-json')<line_sep>body=generate_policy_data_body(args)<if_stmt>match_criteria_json<and><not>isinstance(match_criteria_json dict)<block_start><try_stmt><block_start>match_criteria_json=json.loads(match_criteria_json)<block_end><except_stmt>Exception<block_start><raise>DemistoException(f'Failed to parse match-criteria-json as JSON data,'<concat>f' received object:\n{match_criteria_json}')<block_end>body['matchCriteria']=match_criteria_json<block_end><else_stmt><block_start>body=generate_match_criteria(body args)<block_end><if_stmt>apply_operation<block_start>body['applyTo']=[{'operation':apply_operation 'siteName':site 'serverGroupName':server_group 'webServiceName':web_service}]<block_end>client.do_request('PUT' f'conf/policies/security/webServiceCustomPolicies/{policy_name}' json_data=body)<line_sep>human_readable,entry_context,raw_res=client.get_custom_policy_outputs(policy_name f'Policy {policy_name} updated successfully')<line_sep>return_outputs(human_readable entry_context raw_res)<block_end>@logger<def_stmt>delete_custom_policy_command client args<block_start>policy_name=args.get('policy-name')<line_sep>raw_res=client.do_request('DELETE' f'conf/policies/security/webServiceCustomPolicies/{policy_name}')<line_sep>return_outputs(f'Policy {policy_name} deleted successfully' {} raw_res)<block_end><def_stmt>main <block_start>params=demisto.params()<line_sep># get the service API url base_url=params.get('url')<line_sep>verify_certificate=<not>params.get('insecure' <false>)<line_sep>proxy=params.get('proxy' <false>)<line_sep>credentials=params.get('credentials')<line_sep>username=credentials['identifier']<if>credentials<else>''<line_sep>password=credentials['password']<if>credentials<else>''<line_sep>LOG(f'Command being called is {demisto.command()}')<try_stmt><block_start>client=Client(base_url=base_url verify=verify_certificate auth=(username password) proxy=proxy)<line_sep>command=demisto.command()<line_sep>args=demisto.args()<line_sep>commands={'test-module':test_module 'imperva-waf-ip-group-list':ip_group_list_command 'imperva-waf-ip-group-list-entries':ip_group_list_entries_command 'imperva-waf-ip-group-remove-entries':ip_group_remove_entries_command 'imperva-waf-sites-list':sites_list_command 'imperva-waf-server-group-list':server_groups_list_command 'imperva-waf-server-group-list-policies':server_group_policies_list_command 'imperva-waf-web-service-custom-policy-list':custom_policy_list_command 'imperva-waf-web-service-custom-policy-get':get_custom_policy_command 'imperva-waf-ip-group-create':create_ip_group_command 'imperva-waf-ip-group-update-entries':update_ip_group_command 'imperva-waf-ip-group-delete':delete_ip_group_command 'imperva-waf-web-service-custom-policy-create':create_custom_policy_command 'imperva-waf-web-service-custom-policy-update':update_custom_policy_command 'imperva-waf-web-service-custom-policy-delete':delete_custom_policy_command }<if_stmt>command<in>commands<block_start>commands[command](client args)<block_end><else_stmt><block_start><raise>NotImplementedError(f'Command "{command}" is not implemented.')<block_end><block_end># Log exceptions <except_stmt>Exception<as>e<block_start>return_error(f'Unexpected error: {str(e)}' error=traceback.format_exc())<block_end><block_end><if_stmt>__name__<in>('__main__' '__builtin__' 'builtins')<block_start>main()<block_end>
<import_stmt>time<import_from_stmt>huobi.client.trade TradeClient<import_from_stmt>huobi.constant *<import_from_stmt>huobi.utils *<line_sep>trade_client=TradeClient(api_key=g_api_key secret_key=g_secret_key)<line_sep>client_order_id_header=str(int(time.time()))<line_sep>symbol_eosusdt="eosusdt"<line_sep>client_order_id_eos_01=client_order_id_header+symbol_eosusdt+"01"<line_sep>client_order_id_eos_02=client_order_id_header+symbol_eosusdt+"02"<line_sep>client_order_id_eos_03=client_order_id_header+symbol_eosusdt+"03"<line_sep>buy_limit_eos_01={"account_id":g_account_id "symbol":symbol_eosusdt "order_type":OrderType.BUY_LIMIT "source":OrderSource.API "amount":50 "price":0.12 "client_order_id":client_order_id_eos_01}<line_sep>buy_limit_eos_02={"account_id":g_account_id "symbol":symbol_eosusdt "order_type":OrderType.BUY_LIMIT "source":OrderSource.API "amount":7 "price":0.80 "client_order_id":client_order_id_eos_02}<line_sep>buy_limit_eos_03={"account_id":g_account_id "symbol":symbol_eosusdt "order_type":OrderType.BUY_LIMIT "source":OrderSource.API "amount":20 "price":0.252 "client_order_id":client_order_id_eos_03}<line_sep>order_config_list=[buy_limit_eos_01 buy_limit_eos_02 buy_limit_eos_03]<line_sep>create_result=trade_client.batch_create_order(order_config_list=order_config_list)<line_sep>LogInfo.output_list(create_result)<line_sep>order_id_list=[]<if_stmt>create_result<and>len(create_result)<block_start><for_stmt>item create_result<block_start>order_id_list.append(item.order_id)<block_end>result=trade_client.cancel_orders(symbol_eosusdt order_id_list)<line_sep>result.print_object()<block_end>
<import_stmt>jax<import_stmt>chex<import_from_stmt>typing Union Optional<import_from_stmt>.decoder Decoder<import_from_stmt>...utils ParameterReshaper<class_stmt>RandomDecoder(Decoder)<block_start><def_stmt>__init__ self num_encoding_dims:int placeholder_params:Union[chex.ArrayTree chex.Array] rng:chex.PRNGKey=jax.random.PRNGKey(0) rademacher:bool=<false> identity:bool=<false> n_devices:Optional[int]=<none> <block_start>super().__init__(num_encoding_dims placeholder_params identity n_devices)<line_sep>self.rademacher=rademacher<line_sep># Instantiate base reshaper class self.base_reshaper=ParameterReshaper(placeholder_params identity n_devices)<line_sep># Sample a random matrix - Gaussian or Rademacher (+1/-1) <if_stmt><not>self.rademacher<block_start>self.project_matrix=jax.random.normal(rng (self.num_encoding_dims self.base_reshaper.total_params))<block_end><else_stmt><block_start>self.project_matrix=jax.random.rademacher(rng (self.num_encoding_dims self.base_reshaper.total_params))<block_end><block_end><def_stmt>reshape self x:chex.Array<arrow>chex.ArrayTree<block_start>"""Perform reshaping for random projection case."""<line_sep># 1. Project parameters to raw dimensionality using pre-sampled matrix project_x=([email protected]_matrix)<line_sep># (popsize, num_enc_dim) x (num_enc_dim, num_dims) # 2. Reshape using base reshaper class x_reshaped=self.base_reshaper.reshape(project_x)<line_sep><return>x_reshaped<block_end><def_stmt>reshape_single self x:chex.Array<arrow>chex.ArrayTree<block_start>"""Reshape a single flat vector using random projection matrix."""<line_sep>x_re=x.reshape(1 self.num_encoding_dims)<line_sep># 1. Project parameters to raw dimensionality using pre-sampled matrix project_x=([email protected]_matrix).squeeze()<line_sep># 2. Reshape using base reshaper class x_reshaped=self.base_reshaper.reshape_single(project_x)<line_sep><return>x_reshaped<block_end><block_end>
<import_from_stmt>splashgen Component<class_stmt>CTAButton(Component)<block_start><def_stmt>__init__ self link:str text:str<arrow><none><block_start>self.link=link<line_sep>self.text=text<block_end><def_stmt>render self<arrow>str<block_start><return>f'<a href="{self.link}" class="btn btn-primary btn-lg px-4">{self.text}</a>'<block_end><block_end>
"""Implementation of the 'embed' operation."""<import_from_stmt>..lib Constant SymbolicKeyInstance macro sensitivity_transform<line_sep>@macro<async_keyword><def_stmt>embed info x<block_start>"""Return a constant that embeds the identity of the input node."""<line_sep>typ=sensitivity_transform(<await>x.get())<line_sep>key=SymbolicKeyInstance(x.node typ)<line_sep><return>Constant(key)<block_end>__operation_defaults__={"name":"embed" "registered_name":"embed" "mapping":embed "python_implementation":<none> }<line_sep>
""" Some analysis of informational content of TLDR-Auth and TLDR-PR """<import_stmt>os<import_stmt>csv<import_from_stmt>collections Counter defaultdict<line_sep>INFILE='tldr_analyze_nuggets/tldr_auth_pr_gold_nuggets_2020-03-31.csv'<line_sep># Q1: How many nuggets do TLDRs contain? # A: Interesting, both author and PR have nearly identical distributions: # From most to least common: 3 nuggets -> 2 nuggets -> 4 nuggets -> 1 nugget -> ... # Auth proportions: (34%) (26%) (18%) (11%) # PR proportions: (32%) (30%) (26%) ( 9%) author_num_nuggets_to_count={i:0<for>i range(0 7)}<line_sep>pr_num_nuggets_to_count={i:0<for>i range(0 7)}<with_stmt>open(INFILE)<as>f_in<block_start>reader=csv.DictReader(f_in)<for_stmt>row reader<block_start>num_nuggets=sum(map(int [row['area_field_topic'] row['problem_motivation'] row['mode_of_contrib'] row['details_descrip'] row['results_findings'] row['value_signif']]))<if_stmt>row['auth_pr']<eq>'auth_gold'<block_start>author_num_nuggets_to_count[num_nuggets]<augadd>1<block_end><if_stmt>row['auth_pr']<eq>'pr_gold'<block_start>pr_num_nuggets_to_count[num_nuggets]<augadd>1<block_end><block_end><block_end>print({k:f'{100<times>v/76:.2f}'<for>k,v author_num_nuggets_to_count.items()})<line_sep>print({k:f'{100<times>v/76:.2f}'<for>k,v pr_num_nuggets_to_count.items()})<line_sep># Q2: What are the most common TLDR templates? # A: Interesting, the top 2 templates (total 42 occurrences) are same between Authors and PRs. # a) (area_field_topic, mode_of_contrib, details_descrip) # b) (area_field_topic, mode_of_contrib) # After that, next 3 starts deviating a bit, but still with the same base: # authors = (area_field_topic, mode_of_contrib, results_findings) # (area_field_topic, problem_motivation, mode_of_contrib) # (area_field_topic, mode_of_contrib, details_descrip, value_signif) # pr = (area_field_topic, problem_motivation, mode_of_contrib, details_descrip) # = (area_field_topic, details_descrip) # = (area_field_topic, mode_of_contrib, results_findings) # same as top 3rd in Auth author_template_to_count=Counter()<line_sep>pr_template_to_count=Counter()<with_stmt>open(INFILE)<as>f_in<block_start>reader=csv.DictReader(f_in)<for_stmt>row reader<block_start>template=(row['area_field_topic'] row['problem_motivation'] row['mode_of_contrib'] row['details_descrip'] row['results_findings'] row['value_signif'])<if_stmt>row['auth_pr']<eq>'auth_gold'<block_start>author_template_to_count[template]<augadd>1<block_end><if_stmt>row['auth_pr']<eq>'pr_gold'<block_start>pr_template_to_count[template]<augadd>1<block_end><block_end><block_end>print(author_template_to_count.most_common())<line_sep>print(pr_template_to_count.most_common())<line_sep># Q3: How often does 'area_field_topic' and 'mode_of_contrib' co-occur? # n_auth = 48/76 = 63% # n_pr = 54/76 = 71% n_auth=0<line_sep>n_pr=0<with_stmt>open(INFILE)<as>f_in<block_start>reader=csv.DictReader(f_in)<for_stmt>row reader<block_start><if_stmt>row['area_field_topic']<eq>'1'<and>row['mode_of_contrib']<eq>'1'<block_start><if_stmt>row['auth_pr']<eq>'auth_gold'<block_start>n_auth<augadd>1<block_end><if_stmt>row['auth_pr']<eq>'pr_gold'<block_start>n_pr<augadd>1<block_end><block_end><block_end><block_end># Q4: Find examples with exactly the same nuggets but different styles # # H1-IBSgMz # B16yEqkCZ # SySpa-Z0Z # rJegl2C9K7 # HJWpQCa7z # rkgpCoRctm # rkxkHnA5tX # B1e9csRcFm # r1kj4ACp- # Hk91SGWR- # r1GaAjRcF7 # SkGMOi05FQ # pid_to_templates=defaultdict(set)<with_stmt>open(INFILE)<as>f_in<block_start>reader=csv.DictReader(f_in)<for_stmt>row reader<block_start>template=(row['area_field_topic'] row['problem_motivation'] row['mode_of_contrib'] row['details_descrip'] row['results_findings'] row['value_signif'])<line_sep>pid_to_templates[row['paper_id']].add(template)<block_end><block_end><for_stmt>pid,templates pid_to_templates.items()<block_start><if_stmt>len(templates)<eq>1<block_start>print(pid)<block_end><block_end>
<import_stmt>argparse<import_stmt>bot3<import_stmt>datetime<import_stmt>praw3<as>praw<import_stmt>random<import_stmt>sqlite3<import_stmt>string<import_stmt>subprocess<import_stmt>sys<import_stmt>time<import_stmt>tkinter<import_stmt>traceback<import_stmt>types<import_from_stmt>voussoirkit betterhelp<import_from_stmt>voussoirkit mutables<import_from_stmt>voussoirkit operatornotify<import_from_stmt>voussoirkit pipeable<import_from_stmt>voussoirkit sqlhelpers<import_from_stmt>voussoirkit vlogging<line_sep>log=vlogging.getLogger(__name__ 'sb')<line_sep>USERAGENT=''' /u/GoldenSights SubredditBirthdays data collection: Gathering the creation dates of subreddits for visualization. More at https://github.com/voussoir/reddit/tree/master/SubredditBirthdays '''.replace('\n' ' ').strip()<line_sep>LOWERBOUND_STR='2qh0j'<line_sep>LOWERBOUND_INT=4594339<line_sep>FORMAT_MEMBER='{idstr:>5s}, {human}, {nsfw}, {name:<25s} {subscribers:>10,}'<line_sep>FORMAT_MESSAGE_NEW='New: {idstr:>5s} : {human} : {nsfw} : {name} : {subscribers}'<line_sep>FORMAT_MESSAGE_UPDATE='Upd: {idstr:>5s} : {human} : {nsfw} : {name} : {subscribers} ({subscriber_diff})'<line_sep>RANKS_UP_TO=20000<line_sep># For the files sorted by subscriber count, display ranks up to this many. GOODCHARS=string.ascii_letters+string.digits+'_'<line_sep>DB_INIT=''' BEGIN; -------------------------------------------------------------------------------- CREATE TABLE IF NOT EXISTS subreddits( idint INT, idstr TEXT, created INT, human TEXT, name TEXT, nsfw INT, subscribers INT, jumble INT, subreddit_type INT, submission_type INT, last_scanned INT ); CREATE INDEX IF NOT EXISTS index_subreddits_idstr ON subreddits(idstr); CREATE INDEX IF NOT EXISTS index_subreddits_name ON subreddits(name); CREATE INDEX IF NOT EXISTS index_subreddits_created ON subreddits(created); CREATE INDEX IF NOT EXISTS index_subreddits_subscribers ON subreddits(subscribers); --CREATE INDEX IF NOT EXISTS index_subreddits_idint ON subreddits(idint); --CREATE INDEX IF NOT EXISTS index_subreddits_last_scanned ON subreddits(last_scanned); -------------------------------------------------------------------------------- CREATE TABLE IF NOT EXISTS suspicious( idint INT, idstr TEXT, name TEXT, subscribers INT, noticed INT ); -------------------------------------------------------------------------------- CREATE TABLE IF NOT EXISTS popular( idstr TEXT, last_seen INT ); CREATE INDEX IF NOT EXISTS index_popular_idstr on popular(idstr); CREATE INDEX IF NOT EXISTS index_popular_last_seen on popular(last_seen); -------------------------------------------------------------------------------- CREATE TABLE IF NOT EXISTS jumble( idstr TEXT, last_seen INT ); CREATE INDEX IF NOT EXISTS index_jumble_idstr on jumble(idstr); CREATE INDEX IF NOT EXISTS index_jumble_last_seen on jumble(last_seen); -------------------------------------------------------------------------------- COMMIT; '''<line_sep>sql=sqlite3.connect('D:\\git\\reddit\\subredditbirthdays\\sb.db')<line_sep>sqlhelpers.executescript(conn=sql script=DB_INIT)<line_sep>cur=sql.cursor()<line_sep># These numbers are used for interpreting the tuples that come from SELECT SQL_SUBREDDIT_COLUMNS=['idint' 'idstr' 'created' 'human' 'name' 'nsfw' 'subscribers' 'subreddit_type' 'submission_type' 'last_scanned' ]<line_sep>SQL_SUSPICIOUS_COLUMNS=['idint' 'idstr' 'name' 'subscribers' 'noticed' ]<line_sep>SQL_SUBREDDIT={key:index<for>(index key) enumerate(SQL_SUBREDDIT_COLUMNS)}<line_sep>noinfolist=[]<line_sep>monthnumbers={'Jan':'01' 'Feb':'02' 'Mar':'03' 'Apr':'04' 'May':'05' 'Jun':'06' 'Jul':'07' 'Aug':'08' 'Sep':'09' 'Oct':'10' 'Nov':'11' 'Dec':'12' }<line_sep>SUBREDDIT_TYPE={'public':0 'restricted':1 'private':2 'archived':3 <none>:4 'employees_only':5 'gold_restricted':6 'gold_only':7 'user':8 }<line_sep>SUBMISSION_TYPE={'any':0 'link':1 'self':2 <none>:3 }<line_sep>SUBREDDIT_TYPE_REVERSE={v:k<for>(k v) SUBREDDIT_TYPE.items()}<line_sep>SUBMISSION_TYPE_REVERSE={v:k<for>(k v) SUBMISSION_TYPE.items()}<line_sep>SUBMISSION_OBJ=praw.objects.Submission<line_sep>SUBREDDIT_OBJ=praw.objects.Subreddit<line_sep>COMMENT_OBJ=praw.objects.Comment<line_sep>r=<none><def_stmt>login <block_start><global>r<line_sep>print('Logging in.')<line_sep>r=praw.Reddit(USERAGENT)<line_sep>bot3.login(r)<block_end><def_stmt>base36encode number alphabet='0123456789abcdefghijklmnopqrstuvwxyz'<block_start>'''Converts an integer to a base36 string.'''<if_stmt><not>isinstance(number (int))<block_start><raise>TypeError('number must be an integer')<block_end>base36=''<line_sep>sign=''<if_stmt>number<l>0<block_start>sign='-'<line_sep>number=-number<block_end><if_stmt>0<le>number<l>len(alphabet)<block_start><return>sign+alphabet[number]<block_end><while_stmt>number<ne>0<block_start>number,i=divmod(number len(alphabet))<line_sep>base36=alphabet[i]+base36<block_end><return>sign+base36<block_end><def_stmt>base36decode number<block_start><return>int(number 36)<block_end><def_stmt>b36 i<block_start><if_stmt>type(i)<eq>int<block_start><return>base36encode(i)<block_end><if_stmt>type(i)<eq>str<block_start><return>base36decode(i)<block_end><block_end><def_stmt>chunklist inputlist chunksize<block_start><if_stmt>len(inputlist)<l>chunksize<block_start><return>[inputlist]<block_end><else_stmt><block_start>outputlist=[]<while_stmt>len(inputlist)<g>0<block_start>outputlist.append(inputlist[:chunksize])<line_sep>inputlist=inputlist[chunksize:]<block_end><return>outputlist<block_end><block_end><def_stmt>completesweep sleepy=0 orderby='subscribers desc' query=<none><block_start>cur=sql.cursor()<if_stmt>query<is><none><block_start><if_stmt>orderby<is><none><block_start>cur.execute('SELECT idstr FROM subreddits WHERE created > 0')<block_end><else_stmt><block_start>cur.execute('SELECT idstr FROM subreddits WHERE created > 0 ORDER BY %s'%orderby)<block_end><block_end><elif_stmt>query<eq>'restricted'<block_start>cur.execute('SELECT idstr FROM subreddits WHERE created > 0 AND subreddit_type != 0 ORDER BY subscribers DESC')<block_end><else_stmt><block_start>cur.execute(query)<block_end><try_stmt><block_start><while_stmt><true><block_start>hundred=(cur.fetchone()<for>x range(100))<line_sep>hundred=(row<for>row hundred<if>row<is><not><none>)<line_sep>hundred=[idstr<for>(idstr ) hundred]<if_stmt>len(hundred)<eq>0<block_start><break><block_end><for_stmt>retry range(20)<block_start><try_stmt><block_start>processmega(hundred commit=<false>)<line_sep><break><block_end><except_stmt>Exception<block_start>traceback.print_exc()<block_end><block_end>time.sleep(sleepy)<block_end><block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end><except_stmt>Exception<block_start>traceback.print_exc()<block_end>sql.commit()<block_end><def_stmt>fetchgenerator cur<block_start><while_stmt><true><block_start>fetch=cur.fetchone()<if_stmt>fetch<is><none><block_start><break><block_end><yield>fetch<block_end><block_end><def_stmt>get_jumble_subreddits <block_start>cur.execute('SELECT idstr FROM jumble')<line_sep>fetch=[x[0]<for>x cur.fetchall()]<line_sep>fetch=['\'%s\''%x<for>x fetch]<line_sep>fetch='('+','.join(fetch)+')'<line_sep>query='SELECT * FROM subreddits WHERE idstr IN %s'%fetch<line_sep>cur.execute(query)<line_sep>subreddits=cur.fetchall()<line_sep>#subreddits = [] #for subreddit in fetch: # cur.execute('SELECT * FROM subreddits WHERE idstr == ?', [subreddit]) # subreddits.append(cur.fetchone()) <return>subreddits<block_end><def_stmt>get_newest_sub <block_start>brandnewest=list(r.get_new_subreddits(limit=1))[0]<line_sep><return>brandnewest.id<block_end><def_stmt>get_now <block_start><return>datetime.datetime.now(datetime.timezone.utc).timestamp()<block_end><def_stmt>humanize timestamp<block_start>day=datetime.datetime.utcfromtimestamp(timestamp)<line_sep>human=datetime.datetime.strftime(day "%b %d %Y %H:%M:%S UTC")<line_sep><return>human<block_end><def_stmt>modernize limit=<none><block_start>cur.execute('SELECT * FROM subreddits ORDER BY created DESC LIMIT 1')<line_sep>finalitem=cur.fetchone()<line_sep>print('Current final item:')<line_sep>print(finalitem[SQL_SUBREDDIT['idstr']] finalitem[SQL_SUBREDDIT['human']] finalitem[SQL_SUBREDDIT['name']])<line_sep>finalid=finalitem[SQL_SUBREDDIT['idint']]<line_sep>print('Newest item:')<line_sep>newestid=get_newest_sub()<line_sep>print(newestid)<line_sep>newestid=b36(newestid)<if_stmt>limit<is><not><none><block_start>newestid=min(newestid finalid+limit-1)<block_end>modernlist=[b36(x)<for>x range(finalid newestid+1)]<if_stmt>len(modernlist)<g>0<block_start>processmega(modernlist commit=<false>)<line_sep>sql.commit()<block_end><block_end><def_stmt>modernize_forever limit=10000<block_start><while_stmt><true><block_start><try_stmt><block_start>modernize(limit=limit)<block_end><except_stmt>Exception<block_start>log.warning(traceback.format_exc())<block_end>time.sleep(300)<block_end><block_end><def_stmt>modsfromid subid<block_start><if_stmt>'t5_'<not><in>subid<block_start>subid='t5_'+subid<block_end>subreddit=r.get_info(thing_id=subid)<line_sep>mods=list(subreddit.get_moderators())<for_stmt>m mods<block_start>print(m)<block_end><return>mods<block_end><def_stmt>normalize_subreddit_object thing<block_start>''' Given a string, Subreddit, Submission, or Comment object, return a Subreddit object. '''<if_stmt>isinstance(thing SUBREDDIT_OBJ)<block_start><return>thing<block_end><if_stmt>isinstance(thing str)<block_start><return>r.get_subreddit(thing)<block_end><if_stmt>isinstance(thing (SUBMISSION_OBJ COMMENT_OBJ))<block_start><return>thing.subreddit<block_end><raise>ValueError('Dont know how to normalize' type(thing))<block_end><def_stmt>process subreddit commit=<true> <block_start>''' Retrieve the API info for the subreddit and save it to the database subreddit: The subreddit(s) to process. Can be an individual or list of: strings or Subreddit, Submission, or Comment objects. '''<line_sep>subreddits=[]<line_sep>processed_subreddits=[]<if_stmt>isinstance(subreddit (tuple list set types.GeneratorType))<block_start>subreddits=iter(subreddit)<block_end><else_stmt><block_start>subreddits=[subreddit]<block_end><for_stmt>subreddit subreddits<block_start>subreddit=normalize_subreddit_object(subreddit)<line_sep>processed_subreddits.append(subreddit)<line_sep>created=subreddit.created_utc<line_sep>created_human=humanize(subreddit.created_utc)<line_sep>idstr=subreddit.id<line_sep>is_nsfw=int(subreddit.over18<or>0)<line_sep>name=subreddit.display_name<line_sep>subscribers=subreddit.subscribers<or>0<line_sep>subreddit_type=SUBREDDIT_TYPE[subreddit.subreddit_type]<line_sep>submission_type=SUBMISSION_TYPE[subreddit.submission_type]<line_sep>now=int(get_now())<line_sep>cur.execute('SELECT * FROM subreddits WHERE idstr == ?' [idstr])<line_sep>f=cur.fetchone()<if_stmt>f<is><none><block_start>message=FORMAT_MESSAGE_NEW.format(idstr=idstr human=created_human nsfw=is_nsfw name=name subscribers=subscribers )<line_sep>print(message)<line_sep>data={'idint':b36(idstr) 'idstr':idstr 'created':created 'human':created_human 'nsfw':is_nsfw 'name':name 'subscribers':subscribers 'subreddit_type':subreddit_type 'submission_type':submission_type 'last_scanned':now }<line_sep>(qmarks bindings)=sqlhelpers.insert_filler(SQL_SUBREDDIT_COLUMNS data)<line_sep>query='INSERT INTO subreddits VALUES(%s)'%qmarks<line_sep>cur.execute(query bindings)<block_end><else_stmt><block_start>old_subscribers=f[SQL_SUBREDDIT['subscribers']]<line_sep>subscriber_diff=subscribers-old_subscribers<if_stmt>subscribers<eq>0<and>old_subscribers<g>2<and>subreddit_type<ne>SUBREDDIT_TYPE['private']<block_start>print('SUSPICIOUS %s'%name)<line_sep>data={'idint':b36(idstr) 'idstr':idstr 'name':name 'subscribers':old_subscribers 'noticed':int(get_now()) }<line_sep>(qmarks bindings)=sqlhelpers.insert_filler(SQL_SUSPICIOUS_COLUMNS data)<line_sep>query='INSERT INTO suspicious VALUES(%s)'%qmarks<line_sep>cur.execute(query bindings)<block_end>message=FORMAT_MESSAGE_UPDATE.format(idstr=idstr human=created_human nsfw=is_nsfw name=name subscribers=subscribers subscriber_diff=subscriber_diff)<line_sep>print(message)<line_sep>data={'idstr':idstr 'subscribers':subscribers 'subreddit_type':subreddit_type 'submission_type':submission_type 'last_scanned':now }<line_sep>(query bindings)=sqlhelpers.update_filler(data where_key='idstr')<line_sep>query='UPDATE subreddits %s'%query<line_sep>cur.execute(query bindings)<line_sep>#cur.execute(''' # UPDATE subreddits SET # subscribers = @subscribers, # subreddit_type = @subreddit_type, # submission_type = @submission_type, # last_scanned = @last_scanned # WHERE idstr == @idstr # ''', data) <block_end>processed_subreddits.append(subreddit)<block_end><if_stmt>commit<block_start>sql.commit()<block_end><return>processed_subreddits<block_end><def_stmt>process_input <block_start><while_stmt><true><block_start>x=input('p> ')<try_stmt><block_start>process(x)<block_end><except_stmt>KeyboardInterrupt<block_start><break><block_end><except_stmt>Exception<block_start>traceback.print_exc()<block_end><block_end><block_end><def_stmt>processmega srinput isrealname=<false> chunksize=100 docrash=<false> commit=<true><block_start>''' `srinput` can be a list of subreddit IDs or fullnames, or display names if `isrealname` is also True. isrealname: Interpret `srinput` as a list of actual subreddit names, not IDs. chunksize: The number of fullnames to get from api/info at once. docrash: If False, ignore HTTPExceptions and keep moving forward. '''<line_sep><global>noinfolist<if_stmt>type(srinput)<eq>str<block_start>srinput=srinput.replace(' ' '')<line_sep>srinput=srinput.split(',')<block_end><if_stmt>isrealname<block_start><for_stmt>subname srinput<block_start>process(subname)<block_end><return><block_end>processed_subreddits=[]<line_sep>remaining=len(srinput)<for_stmt>x range(len(srinput))<block_start><if_stmt>'t5_'<not><in>srinput[x]<block_start>srinput[x]='t5_'+srinput[x]<block_end><block_end>srinput=chunklist(srinput chunksize)<for_stmt>subset srinput<block_start><try_stmt><block_start>print(subset[0]+' - '+subset[-1] remaining)<line_sep>subreddits=r.get_info(thing_id=subset)<try_stmt><block_start><for_stmt>sub subreddits<block_start>processed_subreddits.extend(process(sub commit=commit))<block_end><block_end><except_stmt>TypeError<block_start>traceback.print_exc()<line_sep>noinfolist=subset[:]<if_stmt>len(noinfolist)<eq>1<block_start>print('Received no info. See variable `noinfolist`')<block_end><else_stmt>#for item in noinfolist: # processmega([item]) <block_start><pass><block_end><block_end>remaining<augsub>len(subset)<block_end><except_stmt>praw.errors.HTTPException<as>e<block_start>traceback.print_exc()<line_sep>print(vars(e))<if_stmt>docrash<block_start><raise><block_end><block_end><block_end><return>processed_subreddits<block_end><def_stmt>processrand count doublecheck=<false> sleepy=0<block_start>''' Gets random IDs between a known lower bound and the newest collection, and pass them into processmega(). count: How many you want doublecheck: Should it reroll duplicates before running sleepy: Used to sleep longer than the required 2 seconds '''<line_sep>lower=LOWERBOUND_INT<line_sep>cur.execute('SELECT * FROM subreddits ORDER BY idstr DESC LIMIT 1')<line_sep>upper=cur.fetchone()[SQL_SUBREDDIT['idstr']]<line_sep>print('<'+b36(lower)+',' upper+'>' end=', ')<line_sep>upper=b36(upper)<line_sep>totalpossible=upper-lower<line_sep>print(totalpossible 'possible')<line_sep>rands=set()<for_stmt>x range(count)<block_start>rand=random.randint(lower upper)<line_sep>rand=b36(rand)<if_stmt>doublecheck<block_start><while_stmt>rand<in>rands<block_start>rand=random.randint(lower upper)<line_sep>rand=b36(rand)<block_end><block_end>rands.add(rand)<block_end>processmega(rands)<block_end><def_stmt>show <block_start>file_all_time=open('show\\all-time.txt' 'w')<line_sep>file_all_name=open('show\\all-name.txt' 'w')<line_sep>file_all_subscribers=open('show\\all-subscribers.txt' 'w')<line_sep>file_dirty_time=open('show\\dirty-time.txt' 'w')<line_sep>file_dirty_name=open('show\\dirty-name.txt' 'w')<line_sep>file_dirty_subscribers=open('show\\dirty-subscribers.txt' 'w')<line_sep>file_jumble_sfw=open('show\\jumble.txt' 'w')<line_sep>file_jumble_nsfw=open('show\\jumble-nsfw.txt' 'w')<line_sep>file_duplicates=open('show\\duplicates.txt' 'w')<line_sep>file_missing=open('show\\missing.txt' 'w')<line_sep>file_stats=open('show\\statistics.txt' 'w')<line_sep>file_readme=open('README.md' 'r')<line_sep>cur.execute('SELECT COUNT(idstr) FROM subreddits WHERE created != 0')<line_sep>itemcount_valid=cur.fetchone()[0]<line_sep>itemcount_nsfw=0<line_sep>name_lengths={}<line_sep>print(itemcount_valid 'subreddits')<line_sep>print('Writing time files.')<line_sep>cur.execute('SELECT * FROM subreddits WHERE created !=0 ORDER BY created ASC')<for_stmt>item fetchgenerator(cur)<block_start>itemf=memberformat(item)<line_sep>print(itemf file=file_all_time)<if_stmt>int(item[SQL_SUBREDDIT['nsfw']])<eq>1<block_start>print(itemf file=file_dirty_time)<line_sep>itemcount_nsfw<augadd>1<block_end><block_end>file_all_time.close()<line_sep>file_dirty_time.close()<line_sep>print('Writing name files and duplicates.')<line_sep>previousitem=<none><line_sep>inprogress=<false><line_sep>cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY LOWER(name) ASC')<for_stmt>item fetchgenerator(cur)<block_start><if_stmt>previousitem<is><not><none><and>item[SQL_SUBREDDIT['name']]<eq>previousitem[SQL_SUBREDDIT['name']]<block_start>print(memberformat(previousitem) file=file_duplicates)<line_sep>inprogress=<true><block_end><elif_stmt>inprogress<block_start>print(memberformat(previousitem) file=file_duplicates)<line_sep>inprogress=<false><block_end>previousitem=item<line_sep>name_length=len(item[SQL_SUBREDDIT['name']])<line_sep>name_lengths[name_length]=name_lengths.get(name_length 0)+1<line_sep>itemf=memberformat(item)<line_sep>print(itemf file=file_all_name)<if_stmt>int(item[SQL_SUBREDDIT['nsfw']])<eq>1<block_start>print(itemf file=file_dirty_name)<block_end><block_end>file_duplicates.close()<line_sep>file_all_name.close()<line_sep>file_dirty_name.close()<line_sep>name_lengths={'%02d'%k:v<for>(k v) name_lengths.items()}<line_sep>print('Writing subscriber files.')<line_sep>ranks={'all':1 'nsfw':1}<def_stmt>write_with_rank itemf ranktype filehandle<block_start>index=ranks[ranktype]<if_stmt>index<le>RANKS_UP_TO<block_start>itemf<augadd>'{:>9,}'.format(index)<block_end>print(itemf file=filehandle)<line_sep>ranks[ranktype]<augadd>1<block_end>cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY subscribers DESC')<for_stmt>item fetchgenerator(cur)<block_start>itemf=memberformat(item)<line_sep>write_with_rank(itemf 'all' file_all_subscribers)<if_stmt>int(item[SQL_SUBREDDIT['nsfw']])<eq>1<block_start>write_with_rank(itemf 'nsfw' file_dirty_subscribers)<block_end><block_end>file_all_subscribers.close()<line_sep>file_dirty_subscribers.close()<line_sep>print('Writing jumble.')<for_stmt>item get_jumble_subreddits()<block_start>itemf=memberformat(item)<if_stmt>int(item[SQL_SUBREDDIT['nsfw']])<eq>0<block_start>print(itemf file=file_jumble_sfw)<block_end><else_stmt><block_start>print(itemf file=file_jumble_nsfw)<block_end><block_end>file_jumble_sfw.close()<line_sep>file_jumble_nsfw.close()<line_sep>print('Writing missing.')<line_sep>cur.execute('SELECT * FROM subreddits WHERE created == 0 ORDER BY idstr ASC')<for_stmt>item fetchgenerator(cur)<block_start>print(item[SQL_SUBREDDIT['idstr']] file=file_missing)<block_end>file_missing.close()<line_sep>print('Writing statistics.')<line_sep>headline='Collected {0:,} subreddits\n'.format(itemcount_valid)<line_sep>statisticoutput=headline+'\n\n'<line_sep>statisticoutput<augadd>' SFW: {0:,}\n'.format(itemcount_valid-itemcount_nsfw)<line_sep>statisticoutput<augadd>'NSFW: {0:,}\n\n\n'.format(itemcount_nsfw)<line_sep>statisticoutput<augadd>'Subreddit type:\n'<line_sep>subreddit_types=list(SUBREDDIT_TYPE_REVERSE.keys())<line_sep>subreddit_types.sort()<line_sep>subreddit_types=[SUBREDDIT_TYPE_REVERSE[k]<for>k subreddit_types]<for_stmt>subreddit_type subreddit_types<block_start>index=SUBREDDIT_TYPE[subreddit_type]<line_sep>cur.execute('SELECT COUNT(*) FROM subreddits WHERE created != 0 AND subreddit_type == ?' [index])<line_sep>count=cur.fetchone()[0]<line_sep>statisticoutput<augadd>'{:>16s}: {:,}\n'.format(str(subreddit_type) count)<block_end>statisticoutput<augadd>'\n'<line_sep>statisticoutput<augadd>'Submission type (None means approved submitters only or inaccessible):\n'<line_sep>submission_types=list(SUBMISSION_TYPE_REVERSE.keys())<line_sep>submission_types.sort()<line_sep>submission_types=[SUBMISSION_TYPE_REVERSE[k]<for>k submission_types]<for_stmt>submission_type submission_types<block_start>index=SUBMISSION_TYPE[submission_type]<line_sep>cur.execute('SELECT COUNT(*) FROM subreddits WHERE created != 0 AND submission_type == ?' [index])<line_sep>count=cur.fetchone()[0]<line_sep>statisticoutput<augadd>'{:>16s}: {:,}\n'.format(str(submission_type) count)<block_end>statisticoutput<augadd>'\n\n'<line_sep>cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY created DESC limit 20000')<line_sep>last20k=cur.fetchall()<line_sep>timediff=last20k[0][SQL_SUBREDDIT['created']]-last20k[-1][SQL_SUBREDDIT['created']]<line_sep>statisticoutput<augadd>'Over the last 20,000 subreddits:\n'<line_sep>statisticoutput<augadd>'%.2f subs are created each hour\n'%(20000/(timediff/3600))<line_sep>statisticoutput<augadd>'%.2f subs are created each day\n\n\n'%(20000/(timediff/86400))<line_sep>################################ # Breakdown by time period # hour of day, day of week, day of month, month of year, month-year, year <def_stmt>datetimedict statsdict strf<block_start>statsdict[strf]=statsdict.get(strf 0)+1<block_end>hoddict={}<line_sep>dowdict={}<line_sep>domdict={}<line_sep>moydict={}<line_sep>myrdict={}<line_sep>yerdict={}<line_sep>print(' performing time breakdown')<line_sep>cur.execute('SELECT * FROM subreddits WHERE created != 0')<for_stmt>item fetchgenerator(cur)<block_start>dt=datetime.datetime.utcfromtimestamp(item[SQL_SUBREDDIT['created']])<line_sep>datetimedict(hoddict dt.strftime('%H'))# 01 datetimedict(dowdict dt.strftime('%A'))# Monday datetimedict(domdict dt.strftime('%d'))# 01 datetimedict(moydict dt.strftime('%B'))# January datetimedict(myrdict dt.strftime('%b%Y'))# Jan2015 datetimedict(yerdict dt.strftime('%Y'))<block_end># 2015 print(' forming columns')<line_sep>plotnum=0<line_sep>mapping=[{'label':'hour of day' 'specialsort':<none> 'dict':hoddict} {'label':'day of week' 'specialsort':'day' 'dict':dowdict} {'label':'day of month' 'specialsort':<none> 'dict':domdict} {'label':'month of year' 'specialsort':'month' 'dict':moydict} {'label':'year' 'specialsort':<none> 'dict':yerdict} {'label':'month-year' 'specialsort':'monthyear' 'dict':myrdict} {'label':'name length' 'specialsort':<none> 'dict':name_lengths} ]<for_stmt>(index collection) enumerate(mapping)<block_start>d=collection['dict']<line_sep>dkeys_primary=list(d.keys())<line_sep>dkeys_primary.sort(key=d.get)<line_sep>dkeys_secondary=specialsort(dkeys_primary collection['specialsort'])<line_sep>dvals=[d[x]<for>x dkeys_secondary]<line_sep>statisticoutput<augadd>collection['label']+'\n'<for_stmt>(keyindex key) enumerate(dkeys_primary)<block_start>val=d[key]<line_sep>val='{0:,}'.format(val)<line_sep>spacer=34-(len(key)+len(val))<line_sep>spacer='.'<times>spacer<line_sep>statisticoutput<augadd>key+spacer+val<line_sep>statisticoutput<augadd>' '<times>8<line_sep>key=dkeys_secondary[keyindex]<line_sep>val=d[key]<line_sep>val='{0:,}'.format(val)<line_sep>spacer=34-(len(key)+len(val))<line_sep>spacer='.'<times>spacer<line_sep>statisticoutput<augadd>key+spacer+val<line_sep>statisticoutput<augadd>'\n'<block_end>statisticoutput<augadd>'\n'<if_stmt>d<is>name_lengths<block_start>upperlabel='Name Lengths'<block_end><else_stmt><block_start>upperlabel='Subreddits created - %s'%collection['label']<block_end>plotbars(filename=upperlabel upperlabel=upperlabel inputdata=[dkeys_secondary dvals] colormid='#43443a' forcezero=<true> )<line_sep>plotnum<augadd>1<if_stmt>d<is>myrdict# In addition to the total month graph, plot the last 15 months <block_start>plotbars(filename=upperlabel+' short' upperlabel=upperlabel+' short' inputdata=[dkeys_secondary[-15:] dvals[-15:]] colorbg='#272822' colorfg='#000' colormid='#43443a' forcezero=<true> )<line_sep>plotnum<augadd>1<block_end><block_end># # Breakdown by time period ################################ print(statisticoutput file=file_stats)<line_sep>file_stats.close()<line_sep>print('Updating Readme')<line_sep>readmelines=file_readme.readlines()<line_sep>file_readme.close()<line_sep>readmelines[3]='#####'+headline<line_sep>readmelines[5]='#####[Today\'s jumble](http://reddit.com/r/%s)\n'%jumble(nsfw=<false>)<line_sep>file_readme=open('README.md' 'w')<line_sep>file_readme.write(''.join(readmelines))<line_sep>file_readme.close()<line_sep>time.sleep(2)<line_sep>subprocess.call('PNGCREATOR.bat' shell=<true> cwd='spooky')<line_sep>print()<block_end><def_stmt>memberformat member<block_start>member=FORMAT_MEMBER.format(idstr=member[SQL_SUBREDDIT['idstr']] human=member[SQL_SUBREDDIT['human']] nsfw=member[SQL_SUBREDDIT['nsfw']] name=member[SQL_SUBREDDIT['name']] subscribers=member[SQL_SUBREDDIT['subscribers']] )<line_sep><return>member<block_end><def_stmt>dictadding targetdict item<block_start><if_stmt>item<not><in>targetdict<block_start>targetdict[item]=1<block_end><else_stmt><block_start>targetdict[item]=targetdict[item]+1<block_end><return>targetdict<block_end><def_stmt>specialsort inlist mode=<none><block_start><if_stmt>mode<eq>'month'<block_start><return>['January' 'February' 'March' 'April' 'May' 'June' 'July' 'August' 'September' 'October' 'November' 'December']<block_end><if_stmt>mode<eq>'day'<block_start><return>['Sunday' 'Monday' 'Tuesday' 'Wednesday' 'Thursday' 'Friday' 'Saturday']<block_end><if_stmt>mode<eq>'monthyear'<block_start>td={}<for_stmt>item inlist<block_start>nitem=item<line_sep>nitem=item.replace(item[:3] monthnumbers[item[:3]])<line_sep>nitem=nitem[3:]+nitem[:3]<line_sep>td[item]=nitem<block_end>tdkeys=list(td.keys())<line_sep>#print(td) tdkeys.sort(key=td.get)<line_sep>#print(tdkeys) <return>tdkeys<block_end><if_stmt>mode<is><none><block_start><return>sorted(inlist)<block_end><block_end><def_stmt>search query='' casesense=<false> filterout=[] subscribers=0 nsfwmode=2 doreturn=<false> sort=<none> <block_start>''' Search for a subreddit by name *str query = The search query "query" = results where "query" is in the name "*query" = results where "query" is at the end of the name "query*" = results where "query" is at the beginning of the name "*query*" = results where "query" is in the middle of the name bool casesense = is the search case sensitive list filterout = [list, of, words] to omit from search. Follows casesense int subscribers = minimum number of subscribers int nsfwmode = 0 - Clean only 1 - Dirty only 2 - All int sort = The integer representing the sql column to sort by. Defaults to no sort. '''<line_sep>querys=''.join([c<for>c query<if>c<in>GOODCHARS])<line_sep>queryx='%%{term}%%'.format(term=querys)<if_stmt>'!'<in>query<block_start>cur.execute('SELECT * FROM subreddits WHERE name LIKE ?' [querys])<line_sep><return>cur.fetchone()<block_end><if_stmt>nsfwmode<in>[0 1]<block_start>cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ? AND nsfw=?' [queryx subscribers nsfwmode])<block_end><else_stmt><block_start>cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ?' [queryx subscribers])<block_end>results=[]<if_stmt>casesense<is><false><block_start>querys=querys.lower()<line_sep>filterout=[x.lower()<for>x filterout]<block_end><if_stmt>'*'<in>query<block_start>positional=<true><line_sep>front=query[-1]<eq>'*'<line_sep>back=query[0]<eq>'*'<if_stmt>front<and>back<block_start>mid=<true><line_sep>front=<false><line_sep>back=<false><block_end><else_stmt><block_start>mid=<false><block_end><block_end><else_stmt><block_start>positional=<false><block_end>lenq=len(querys)<for_stmt>item fetchgenerator(cur)<block_start>name=item[SQL_SUBREDDIT['name']]<if_stmt>casesense<is><false><block_start>name=name.lower()<block_end><if_stmt>querys<not><in>name#print('%s not in %s' % (querys, name)) <block_start><continue><block_end><if_stmt>(positional<and>front)<and>(name[:lenq]<ne>querys)#print('%s not front %s (%s)' % (querys, name, name[:lenq])) <block_start><continue><block_end><if_stmt>(positional<and>back)<and>(name[-lenq:]<ne>querys)#print('%s not back %s (%s)' % (querys, name, name[-lenq:])) <block_start><continue><block_end><if_stmt>(positional<and>mid)<and>(querys<not><in>name[1:-1])#print('%s not mid %s (%s)' % (querys, name, name[1:-1])) <block_start><continue><block_end><if_stmt>any(filters<in>name<for>filters filterout)#print('%s not filter %s' % (querys, name)) <block_start><continue><block_end>results.append(item)<block_end><if_stmt>len(results)<eq>0<block_start><if_stmt>doreturn<block_start><return>[]<block_end><else_stmt><block_start><return><block_end><block_end><if_stmt>sort<is><not><none><block_start>is_numeric=isinstance(results[0][sort] int)<if_stmt>is_numeric<block_start>results.sort(key=<lambda>x:x[sort] reverse=<true>)<block_end><else_stmt><block_start>results.sort(key=<lambda>x:x[sort].lower())<block_end><block_end><if_stmt>doreturn<is><true><block_start><return>results<block_end><else_stmt><block_start><for_stmt>item results<block_start>print(item)<block_end><block_end><block_end><def_stmt>findwrong <block_start>cur.execute('SELECT * FROM subreddits WHERE name != ?' ['?'])<line_sep>fetch=cur.fetchall()<line_sep>fetch.sort(key=<lambda>x:x[SQL_SUBREDDIT['idstr']])<line_sep>#sorted by ID fetch=fetch[25:]<line_sep>pos=0<line_sep>wrongs=[]<while_stmt>pos<l>len(fetch)-5<block_start><if_stmt>fetch[pos][1]<g>fetch[pos+1][1]<block_start>wrongs.append(str(fetch[pos-1]))<line_sep>wrongs.append(str(fetch[pos]))<line_sep>wrongs.append(str(fetch[pos+1])+"\n")<block_end>pos<augadd>1<block_end><for_stmt>wrong wrongs<block_start>print(wrong)<block_end><block_end><def_stmt>processjumble count nsfw=<false><block_start><for_stmt>x range(count)<block_start>sub=r.get_random_subreddit(nsfw=nsfw)<line_sep>process(sub commit=<false>)<line_sep>last_seen=int(get_now())<line_sep>cur.execute('SELECT * FROM jumble WHERE idstr == ?' [sub.id])<if_stmt>cur.fetchone()<is><none><block_start>cur.execute('INSERT INTO jumble VALUES(?, ?)' [sub.id last_seen])<block_end><else_stmt><block_start>cur.execute('UPDATE jumble SET last_seen = ? WHERE idstr == ?' [sub.id last_seen])<block_end><block_end>sql.commit()<block_end><def_stmt>processpopular count sort='hot'<block_start>subreddit=r.get_subreddit('popular')<if_stmt>sort<eq>'hot'<block_start>submissions=subreddit.get_hot(limit=count)<block_end><elif_stmt>sort<eq>'new'<block_start>submissions=subreddit.get_new(limit=count)<block_end><else_stmt><block_start><raise>ValueError(sort)<block_end>submissions=list(submissions)<line_sep>subreddit_ids=list({submission.subreddit_id<for>submission submissions})<line_sep>subreddits=processmega(subreddit_ids commit=<false>)<line_sep>last_seen=int(get_now())<for_stmt>subreddit subreddits<block_start>cur.execute('SELECT * FROM popular WHERE idstr == ?' [subreddit.id])<if_stmt>cur.fetchone()<is><none><block_start>cur.execute('INSERT INTO popular VALUES(?, ?)' [subreddit.id last_seen])<block_end><else_stmt><block_start>cur.execute('UPDATE popular SET last_seen = ? WHERE idstr == ?' [last_seen subreddit.id])<block_end><block_end>sql.commit()<block_end><def_stmt>jumble count=20 nsfw=<false><block_start>subreddits=get_jumble_subreddits()<if_stmt>nsfw<is><not><none><block_start>subreddits=[x<for>x subreddits<if>x[SQL_SUBREDDIT['nsfw']]<eq>int(bool(nsfw))]<block_end>random.shuffle(subreddits)<line_sep>subreddits=subreddits[:count]<line_sep>subreddits=[f[:-1]<for>f subreddits]<line_sep>jumble_string=[x[SQL_SUBREDDIT['name']]<for>x subreddits]<line_sep>jumble_string='+'.join(jumble_string)<line_sep>output=[jumble_string subreddits]<line_sep><return>jumble_string<block_end><def_stmt>rounded x rounding=100<block_start><return>int(round(x/rounding))<times>rounding<block_end><def_stmt>plotbars filename inputdata upperlabel='Subreddits created' colorbg="#fff" colorfg="#000" colormid="#888" forcezero=<false> <block_start>''' Create postscript vectors of data filename = Name of the file without extension inputdata = A list of two lists. First list has the x axis labels, second list has the y axis data. x label 14 coresponds to y datum 14, etc. '''<line_sep>print(' Printing' filename)<line_sep>t=tkinter.Tk()<line_sep>canvas=tkinter.Canvas(t width=3840 height=2160 bg=colorbg)<line_sep>canvas.pack()<line_sep>#Y axis canvas.create_line(430 250 430 1755 width=10 fill=colorfg)<line_sep>#X axis canvas.create_line(430 1750 3590 1750 width=10 fill=colorfg)<line_sep>dkeys=inputdata[0]<line_sep>dvals=inputdata[1]<line_sep>entrycount=len(dkeys)<line_sep>availablespace=3140<line_sep>availableheight=1490<line_sep>entrywidth=availablespace/entrycount<line_sep>#print(dkeys, dvals, "Width:", entrywidth) smallest=min(dvals)<line_sep>bottom=int(smallest<times>0.75)-5<line_sep>bottom=0<if>bottom<l>8<else>rounded(bottom 10)<if_stmt>forcezero<block_start>bottom=0<block_end>largest=max(dvals)<line_sep>top=int(largest+(largest/5))<line_sep>top=rounded(top 10)<line_sep>print(bottom top)<line_sep>span=top-bottom<line_sep>perpixel=span/availableheight<line_sep>curx=445<line_sep>cury=1735<line_sep>labelx=420<line_sep>labely=255<line_sep>#canvas.create_text(labelx, labely, text=str(top), font=("Consolas", 72), anchor="e") labelspan=130<line_sep>canvas.create_text(175 100 text=upperlabel font=("Consolas" 72) anchor="w" fill=colorfg)<for_stmt>x range(12)<block_start>value=int(top-((labely-245)<times>perpixel))<line_sep>value=rounded(value 10)<line_sep>value='{0:,}'.format(value)<line_sep>canvas.create_text(labelx labely text=value font=("Consolas" 72) anchor="e" fill=colorfg)<line_sep>canvas.create_line(430 labely 3590 labely width=2 fill=colormid)<line_sep>labely<augadd>labelspan<block_end><for_stmt>entrypos range(entrycount)<block_start>entry=dkeys[entrypos]<line_sep>entryvalue=dvals[entrypos]<line_sep>entryx0=curx+10<line_sep>entryx1=entryx0+(entrywidth-10)<line_sep>curx<augadd>entrywidth<line_sep>entryy0=cury<line_sep>entryy1=entryvalue-bottom<line_sep>entryy1=entryy1/perpixel<line_sep>#entryy1 -= bottom #entryy1 /= perpixel entryy1=entryy0-entryy1<line_sep>#print(perpixel, entryy1) #print(entry, entryx0,entryy0, entryx1, entryy1) canvas.create_rectangle(entryx0 entryy0 entryx1 entryy1 fill=colorfg outline=colorfg)<line_sep>font0x=entryx0+(entrywidth/2)<line_sep>font0y=entryy1-5<line_sep>font1y=1760<line_sep>entryvalue=round(entryvalue)<line_sep>fontsize0=len(str(entryvalue))<line_sep>fontsize0=round(entrywidth/fontsize0)+3<line_sep>fontsize0=100<if>fontsize0<g>100<else>fontsize0<line_sep>fontsize1=len(str(entry))<line_sep>fontsize1=round(1.5<times>entrywidth/fontsize1)+5<line_sep>fontsize1=60<if>fontsize1<g>60<else>fontsize1<line_sep>canvas.create_text(font0x font0y text=entryvalue font=("Consolas" fontsize0) anchor="s" fill=colorfg)<line_sep>canvas.create_text(font0x font1y text=entry font=("Consolas" fontsize1) anchor="n" fill=colorfg)<line_sep>canvas.update()<block_end>print(' Done')<line_sep>canvas.postscript(file=f'spooky\\{filename}.ps' width=3840 height=2160)<line_sep>t.geometry("1x1+1+1")<line_sep>t.update()<line_sep>t.destroy()<block_end><def_stmt>_idle <block_start><while_stmt><true><block_start><try_stmt><block_start>modernize()<line_sep>processpopular(100 'new')<line_sep>processjumble(30 nsfw=<false>)<line_sep>processjumble(30 nsfw=<true>)<line_sep>print('Great job!')<block_end><except_stmt>Exception<block_start>traceback.print_exc()<block_end>time.sleep(180)<block_end><block_end># Command line ##################################################################################### DOCSTRING=''' Subreddit Birthdays =================== {modernize_forever} {modernize_once} '''<line_sep>SUB_DOCSTRINGS=dict(modernize_forever=''' modernize_forever: Gather new subreddits forever. ''' modernize_once=''' modernize_once: Gather new subreddits once. ''' )<line_sep>DOCSTRING=betterhelp.add_previews(DOCSTRING SUB_DOCSTRINGS)<line_sep>NOTIFY_EVERY_LINE=mutables.Boolean(<false>)<line_sep>@pipeable.ctrlc_return1<def_stmt>modernize_once_argparse args<block_start>login()<line_sep>modernize(limit=args.limit)<line_sep><return>0<block_end>@pipeable.ctrlc_return1<def_stmt>modernize_forever_argparse args<block_start>login()<line_sep>NOTIFY_EVERY_LINE.set(<true>)<line_sep>modernize_forever()<line_sep><return>0<block_end>@operatornotify.main_decorator(subject='sb' notify_every_line=NOTIFY_EVERY_LINE)@vlogging.main_decorator<def_stmt>main argv<block_start>parser=argparse.ArgumentParser(description=DOCSTRING)<line_sep>subparsers=parser.add_subparsers()<line_sep>p_modernize_once=subparsers.add_parser('modernize_once' aliases=['modernize-once'])<line_sep>p_modernize_once.add_argument('--limit' default=<none>)<line_sep>p_modernize_once.set_defaults(func=modernize_once_argparse)<line_sep>p_modernize_forever=subparsers.add_parser('modernize_forever' aliases=['modernize-forever'])<line_sep>p_modernize_forever.set_defaults(func=modernize_forever_argparse)<line_sep><return>betterhelp.subparser_main(argv parser DOCSTRING SUB_DOCSTRINGS)<block_end><if_stmt>__name__<eq>'__main__'<block_start><raise>SystemExit(main(sys.argv[1:]))<block_end>
<import_from_stmt>pathlib Path<import_from_stmt>.common PathIsh Visit Source last Loc Results DbVisit Context Res<line_sep># add deprecation warning so eventually this may converted to a namespace package? <import_stmt>warnings<line_sep>warnings.warn("DEPRECATED! Please import directly from 'promnesia.common', e.g. 'from promnesia.common import Visit, Source, Results'" DeprecationWarning)<line_sep>
<import_stmt>cv2<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>matplotlib.offsetbox OffsetImage AnnotationBbox<import_stmt>numpy<as>np<def_stmt>plot_img img_name location index zoom=0.1<block_start>plt.scatter(index accs[epochs.index(index)]+1 color='#0D7377' linewidths=0.5 marker='v')<line_sep>plt.plot((index location[0]) (accs[epochs.index(index)] location[1]) '--' color='#0D7377' alpha=1)<line_sep>img=plt.imread(f'./gan_images/{img_name}.png')<line_sep>img=cv2.resize(img (350 350))<line_sep># img = img[50:-50, 50:-50, :] ax=plt.gca()<line_sep>im=OffsetImage(img zoom=zoom)<line_sep>ab=AnnotationBbox(im location xycoords='data' frameon=<true> pad=0.2)<line_sep>ax.add_artist(ab)<line_sep>ax.update_datalim(np.column_stack(list(location)))<line_sep>ax.autoscale()<line_sep><return>ab<block_end><def_stmt>smooth_data accs weight<block_start>last=accs[0]<for_stmt>i range(1 len(accs))<block_start>accs[i]=last<times>weight+(1-weight)<times>accs[i]<line_sep>last=accs[i]<block_end><return>accs<block_end>epochs=[0 10 20 30 40 50 100 150 200 300 400 500]<line_sep>accs=[51.95 67.50 71.26 77.34 77.67 77.35 78.14 79.99 78.21 77.94 80.51 76.49]<line_sep>accs=smooth_data(accs 0.7)<line_sep>accs_ci=[0.66 0.71 0.68 0.62 0.63 0.64 0.63 0.60 0.63 0.64 0.60 0.67]<line_sep>training_from_scratch=[51.64]<times>len(accs)<line_sep>bottom=[acc-ci<for>acc,ci zip(accs accs_ci)]<line_sep>top=[acc+ci<for>acc,ci zip(accs accs_ci)]<line_sep>plt.plot(epochs accs color='b' label='LASIUM-N')<line_sep>plt.plot(epochs bottom '--' color='#32E0C4' alpha=0.2)<line_sep>plt.plot(epochs top '--' color='#32E0C4' alpha=0.2)<line_sep>plt.plot(epochs training_from_scratch '--' color='r' alpha=0.5 label='baseline')<line_sep>plt.fill_between(epochs bottom top color='#32E0C4' alpha=.1)<line_sep>plt.xticks([10 30 50 100 200 300 400 500])<line_sep>plt.xlabel('# GAN training epochs' fontsize=14)<line_sep>plt.yticks([40 50 60 70 80 100])<line_sep>plt.ylabel('Accuracy (%)' fontsize=14)<line_sep># plt images plot_img('00_4' location=(10 85) index=0)<line_sep>plot_img('10_4' location=(40 90) index=10)<line_sep>plot_img('30_4' location=(70 85) index=30)<line_sep>plot_img('50_4' location=(100 90) index=50)<line_sep>plot_img('100_4' location=(130 85) index=100)<line_sep>plot_img('200_4' location=(190 90) index=200)<line_sep>plot_img('300_4' location=(300 85) index=300)<line_sep>plot_img('400_4' location=(400 90) index=400)<line_sep>plot_img('500_4' location=(500 85) index=500)<line_sep>plt.scatter(0 accs[epochs.index(0)]+1 color='#0D7377' linewidths=0.5 marker='v' label='Generated image at epoch')<line_sep>plt.subplots_adjust(bottom=0.1 top=0.9 right=0.98 left=0.1)<line_sep>plt.legend(loc='best')<line_sep># plt.show() plt.savefig('./outputs/accuracy_based_on_gan.pdf' dpi=300)<line_sep>
<def_stmt>extractKitchennovelCom item<block_start>''' Parser for 'kitchennovel.com' '''<line_sep>vol,chp,frag,postfix=extractVolChapterFragmentPostfix(item['title'])<if_stmt><not>(chp<or>vol)<or>"preview"<in>item['title'].lower()<block_start><return><none><block_end>tagmap=[('Strange World Alchemist Chef' 'Strange World Alchemist Chef' 'translated') ('Imperial Chef Rookie' 'Imperial Chef Rookie' 'translated') ('Daddy Fantasy World' 'Daddy Fantasy World' 'translated') ('Here Comes the Lady Chef' 'Here Comes the Lady Chef' 'translated') ('Different World Okonomiyaki Chain Store' 'Different World Okonomiyaki Chain Store' 'translated') ('Strange World Little Cooking Saint' 'Strange World Little Cooking Saint' 'translated') ('Fine Food Broadcastor' 'Fine Food Broadcaster' 'translated') ('Kitchen Xiuzhen' 'Kitchen Xiuzhen' 'translated') ('Reborn - Super Chef' 'Reborn - Super Chef' 'translated') ('The Taming of the Black Bellied Scholar' 'The Taming of the Black Bellied Scholar' 'translated') ('The Feast' 'The Feast' 'translated') ('PRC' 'PRC' 'translated') ('Loiterous' 'Loiterous' 'oel') ]<for_stmt>tagname,name,tl_type tagmap<block_start><if_stmt>tagname<in>item['tags']<block_start><return>buildReleaseMessageWithType(item name vol chp frag=frag postfix=postfix tl_type=tl_type)<block_end><block_end><return><false><block_end>
# -*- coding: utf-8 -*- """ Created on Sun Jun 12 07:17:17 2016 @author: ericgrimson """<line_sep>#import numpy as np <import_stmt>pylab<as>plt<line_sep>mySamples=[]<line_sep>myLinear=[]<line_sep>myQuadratic=[]<line_sep>myCubic=[]<line_sep>myExponential=[]<for_stmt>i range(0 30)<block_start>mySamples.append(i)<line_sep>myLinear.append(i)<line_sep>myQuadratic.append(i<power>2)<line_sep>myCubic.append(i<power>3)<line_sep>myExponential.append(1.5<power>i)<block_end># first trial plt.plot(mySamples myLinear)<line_sep>plt.plot(mySamples myQuadratic)<line_sep>plt.plot(mySamples myCubic)<line_sep>plt.plot(mySamples myExponential)<line_sep># second trial #plt.figure('lin') #plt.plot(mySamples, myLinear) #plt.figure('quad') #plt.plot(mySamples, myQuadratic) #plt.figure('cube') #plt.plot(mySamples, myCubic) #plt.figure('expo') #plt.plot(mySamples, myExponential) # third trial #plt.figure('lin') #plt.xlabel('sample points') #plt.ylabel('linear function') #plt.plot(mySamples, myLinear) #plt.figure('quad') #plt.plot(mySamples, myQuadratic) #plt.figure('cube') #plt.plot(mySamples, myCubic) #plt.figure('expo') #plt.plot(mySamples, myExponential) #plt.figure('quad') #plt.ylabel('quadratic function') # fourth trial #plt.figure('lin') #plt.plot(mySamples, myLinear) #plt.figure('quad') #plt.plot(mySamples, myQuadratic) #plt.figure('cube') #plt.plot(mySamples, myCubic) #plt.figure('expo') #plt.plot(mySamples, myExponential) #plt.figure('lin') #plt.title('Linear') #plt.figure('quad') #plt.title('Quadratic') #plt.figure('cube') #plt.title('Cubic') #plt.figure('expo') #plt.title('Exponential') # fifth trial #plt.figure('lin') #plt.clf() #plt.plot(mySamples, myLinear) #plt.figure('quad') #plt.clf() #plt.plot(mySamples, myQuadratic) #plt.figure('cube') #plt.clf() #plt.plot(mySamples, myCubic) #plt.figure('expo') #plt.clf() #plt.plot(mySamples, myExponential) #plt.figure('lin') #plt.title('Linear') #plt.figure('quad') #plt.title('Quadratic') #plt.figure('cube') #plt.title('Cubic') #plt.figure('expo') #plt.title('Exponential') # sixth trial #plt.figure('lin') #plt.clf() #plt.ylim(0,1000) #plt.plot(mySamples, myLinear) #plt.figure('quad') #plt.clf() #plt.ylim(0,1000) #plt.plot(mySamples, myQuadratic) #plt.figure('lin') #plt.title('Linear') #plt.figure('quad') #plt.title('Quadratic') # seventh trial #plt.figure('lin quad') #plt.clf() #plt.plot(mySamples, myLinear) #plt.plot(mySamples, myQuadratic) #plt.figure('cube exp') #plt.clf() #plt.plot(mySamples, myCubic) #plt.plot(mySamples, myExponential) #plt.figure('lin quad') #plt.title('Linear vs. Quadratic') #plt.figure('cube exp') #plt.title('Cubic vs. Exponential') # eighth trial #plt.figure('lin quad') #plt.clf() #plt.plot(mySamples, myLinear, label = 'linear') #plt.plot(mySamples, myQuadratic, label = 'quadratic') #plt.legend(loc = 'upper left') #plt.title('Linear vs. Quadratic') #plt.figure('cube exp') #plt.clf() #plt.plot(mySamples, myCubic, label = 'cubic') #plt.plot(mySamples, myExponential, label = 'exponential') #plt.legend() #plt.title('Cubic vs. Exponential') # ninth trial #plt.figure('lin quad') #plt.clf() #plt.plot(mySamples, myLinear, 'b-', label = 'linear') #plt.plot(mySamples, myQuadratic,'ro', label = 'quadratic') #plt.legend(loc = 'upper left') #plt.title('Linear vs. Quadratic') #plt.figure('cube exp') #plt.clf() #plt.plot(mySamples, myCubic, 'g^', label = 'cubic') #plt.plot(mySamples, myExponential, 'r--',label = 'exponential') #plt.legend() #plt.title('Cubic vs. Exponential') # tenth trial #plt.figure('lin quad') #plt.clf() #plt.plot(mySamples, myLinear, 'b-', label = 'linear', linewidth = 2.0) #plt.plot(mySamples, myQuadratic,'r', label = 'quadratic', linewidth = 3.0) #plt.legend(loc = 'upper left') #plt.title('Linear vs. Quadratic') #plt.figure('cube exp') #plt.clf() #plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 4.0) #plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 5.0) #plt.legend() #plt.title('Cubic vs. Exponential') # eleventh trial #plt.figure('lin quad') #plt.clf() #plt.subplot(211) #plt.ylim(0, 900) #plt.plot(mySamples, myLinear, 'b-', label = 'linear', linewidth = 2.0) #plt.subplot(212) #plt.ylim(0, 900) #plt.plot(mySamples, myQuadratic,'r', label = 'quadratic', linewidth = 3.0) #plt.legend(loc = 'upper left') #plt.title('Linear vs. Quadratic') #plt.figure('cube exp') #plt.clf() #plt.subplot(121) #plt.ylim(0, 140000) #plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 4.0) #plt.subplot(122) #plt.ylim(0, 140000) #plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 5.0) #plt.legend() #plt.title('Cubic vs. Exponential') # twelfth trial #plt.figure('cube exp log') #plt.clf() #plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 2.0) #plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 4.0) #plt.yscale('log') #plt.legend() #plt.title('Cubic vs. Exponential') #plt.figure('cube exp linear') #plt.clf() #plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 2.0) #plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 4.0) #plt.legend() #plt.title('Cubic vs. Exponential')
<import_stmt>re<import_from_stmt>nltk.tree ParentedTree<import_from_stmt>Giveme5W1H.extractor.candidate Candidate<import_from_stmt>Giveme5W1H.extractor.extractors.abs_extractor AbsExtractor<class_stmt>ActionExtractor(AbsExtractor)<block_start>""" The ActionExtractor tries to extract the main actor and his action. """<def_stmt>__init__ self weights:(float float float)=(0.9 0.095 0.005) minimal_length_of_tokens:int=3<block_start>self._minimal_length_of_tokens=minimal_length_of_tokens<line_sep># weights used in the candidate evaluation: # (position, frequency, named entity) self.weights=weights<block_end><def_stmt>_extract_candidates self document<block_start>""" Extracts possible agents/actions pairs from a given document. Candidates are chosen if they belong to an coref-chain and is part of a NP-VP-NP pattern :param document: The Document to be analyzed. :type document: Document :return: A List of Tuples containing all agents, actions and their position in the document. """<line_sep># retrieve results from preprocessing corefs=document.get_corefs()<line_sep>trees=document.get_trees()<line_sep>candidates=[]<for_stmt>cluster corefs<block_start><for_stmt>mention corefs[cluster]# Check if mention is the subject of the sentence by matching the NP-VP-NP pattern. # # "One common way of defining the subject of a sentence S in English is as the noun phrase that is the # child of S and the sibling of VP" (http://www.nltk.org/book/ch08.html) <block_start><for_stmt>pattern self._evaluate_tree(trees[mention['sentNum']-1])<block_start>np_string=''.join([p[0]['nlpToken']['originalText']<for>p pattern[0]])<if_stmt>re.sub(r'\s+' '' mention['text'])<in>np_string<block_start>candidate_object=Candidate()<line_sep>candidate_object.set_sentence_index(pattern[2])<line_sep>candidate_object.set_raw([pattern[0] pattern[1] cluster mention['id']])<line_sep>candidates.append(candidate_object)<block_end><block_end><block_end><block_end>document.set_candidates(self.get_id() candidates)<block_end><def_stmt>_evaluate_tree self sentence_root<block_start>""" Examines the passed syntactic tree to determine if it matches a NP-VP-NP pattern This is executed per sentence :param sentence_root: A tree to be analyzed :type sentence_root: ParentedTree :return: A list of Tuples containing the agent and the action described in the sentence. """<line_sep>candidates=[]<for_stmt>subtree sentence_root.subtrees()<block_start><if_stmt>subtree.label()<eq>'NP'<and>subtree.parent().label()<eq>'S'# Skip NPs containing a VP <block_start><if_stmt>any(list(subtree.subtrees(filter=<lambda>t:t.label()<eq>'VP')))<block_start><continue><block_end># check siblings for VP sibling=subtree.right_sibling()<while_stmt>sibling<is><not><none><block_start><if_stmt>sibling.label()<eq>'VP'# this gives a tuple to find the way from sentence to leaf # tree_position = subtree.leaf_treeposition(0) <block_start>entry=[subtree.pos() self.cut_what(sibling self._minimal_length_of_tokens).pos() sentence_root.stanfordCoreNLPResult['index']]<line_sep>candidates.append(entry)<line_sep><break><block_end>sibling=sibling.right_sibling()<block_end><block_end><block_end><return>candidates<block_end><def_stmt>_evaluate_candidates self document<block_start>""" Calculate a confidence score based on number of mentions, position in text and entailment of named entities for extracted candidates. :param document: The parsed document :type document: Document :param candidates: Extracted candidates to evaluate. :type candidates:[([(String,String)], ([(String,String)])] :return: A list of evaluated and ranked candidates """<line_sep>ranked_candidates=[]<line_sep>doc_len=document.get_len()<line_sep>doc_ner=document.get_ner()<line_sep>doc_coref=document.get_corefs()<if_stmt>any(doc_coref.values())# get length of longest coref chain for normalization <block_start>max_len=len(max(doc_coref.values() key=len))<block_end><else_stmt><block_start>max_len=1<block_end><for_stmt>candidate document.get_candidates(self.get_id())<block_start>candidateParts=candidate.get_raw()<line_sep>verb=candidateParts[1][0][0]['nlpToken']['originalText'].lower()<line_sep># VP beginning with say/said often contain no relevant action and are therefor skipped. <if_stmt>verb.startswith('say')<or>verb.startswith('said')<block_start><continue><block_end>coref_chain=doc_coref[candidateParts[2]]<line_sep># first parameter used for ranking is the number of mentions, we use the length of the coref chain score=(len(coref_chain)/max_len)<times>self.weights[1]<line_sep>representative=<none><line_sep>contains_ne=<false><line_sep>mention_type=''<for_stmt>mention coref_chain<block_start><if_stmt>mention['id']<eq>candidateParts[3]<block_start>mention_type=mention['type']<if_stmt>mention['sentNum']<l>doc_len# The position (sentence number) is another important parameter for scoring. # This is inspired by the inverted pyramid. <block_start>score<augadd>((doc_len-mention['sentNum']+1)/doc_len)<times>self.weights[0]<block_end><block_end><if_stmt>mention['isRepresentativeMention']# The representative name for this chain has been found. <block_start>tmp=document._sentences[mention['sentNum']-1]['tokens'][mention['headIndex']-1]<line_sep>representative=((tmp['originalText'] tmp) tmp['pos'])<try_stmt># these dose`t work, if some special characters are present <block_start><if_stmt>representative[-1][1]<eq>'POS'<block_start>representative=representative[:-1]<block_end><block_end><except_stmt>IndexError<block_start><pass><block_end><block_end><if_stmt><not>contains_ne# If the current mention doesn't contain a named entity, check the other members of the chain <block_start><for_stmt>token doc_ner[mention['sentNum']-1][mention['headIndex']-1:mention['endIndex']-1]<block_start><if_stmt>token[1]<in>['PERSON' 'ORGANIZATION' 'LOCATION']<block_start>contains_ne=<true><line_sep><break><block_end><block_end><block_end><block_end><if_stmt>contains_ne# the last important parameter is the entailment of a named entity <block_start>score<augadd>self.weights[2]<block_end><if_stmt>score<g>0# normalize the scoring <block_start>score<augdiv>sum(self.weights)<block_end><if_stmt>mention_type<eq>'PRONOMINAL'# use representing mention if the agent is only a pronoun <block_start>rp_format_fix=[(({'nlpToken':representative[0][1]} representative[0][1]['pos']))]<line_sep>ranked_candidates.append((rp_format_fix candidateParts[1] score candidate.get_sentence_index()))<block_end><else_stmt><block_start>ranked_candidates.append((candidateParts[0] candidateParts[1] score candidate.get_sentence_index()))<block_end><block_end># split results who=[(c[0] c[2] c[3])<for>c ranked_candidates]<line_sep>what=[(c[1] c[2] c[3])<for>c ranked_candidates]<line_sep># Transform who to object oriented list o_who=self._filterAndConvertToObjectOrientedList(who)<line_sep># Filter by text o_who_clean=self._filter_candidate_dublicates(o_who)<line_sep>document.set_answer('who' o_who_clean)<line_sep># Transform who to object oriented list o_what=self._filterAndConvertToObjectOrientedList(what)<line_sep># Filter by text o_what_clean=self._filter_candidate_dublicates(o_what)<line_sep>document.set_answer('what' o_what_clean)<block_end><def_stmt>_filterAndConvertToObjectOrientedList self list<block_start>max=0<line_sep>candidates=self._filter_duplicates(list)<for_stmt>candidate candidates<block_start><if_stmt>candidate.get_score()<g>max<block_start>max=candidate.get_score()<block_end><block_end># normalize <for_stmt>candidate candidates<block_start>score=candidate.get_score()<line_sep>candidate.set_score(score/max)<block_end># sort candidates.sort(key=<lambda>x:x.get_score() reverse=<true>)<line_sep><return>candidates<block_end><def_stmt>cut_what self tree min_length=0 length=0<block_start>""" This function is used to shorten verbphrases, it recursively traverses the parse tree depth first. :param tree: Tree to cut :type tree: ParentedTree :param min_length: Desired minimal length of tokens :type min_length: Integer :param length: Number of tokens already included by the upper level function :type length: Integer :return: A subtree """<if_stmt>type(tree[0])<is><not>ParentedTree# we found a leaf <block_start><return>ParentedTree(tree.label() [tree[0]])<block_end><else_stmt><block_start>children=[]<for_stmt>sub tree<block_start>child=self.cut_what(sub min_length length)<line_sep>length<augadd>len(child.leaves())<line_sep>children.append(child)<if_stmt>sub.label()<eq>'NP'<block_start>sibling=sub.right_sibling()<if_stmt>length<l>min_length<and>sibling<is><not><none><and>sibling.label()<eq>'PP'<block_start>children.append(sibling.copy(deep=<true>))<block_end><break><block_end><block_end><return>ParentedTree(tree.label() children)<block_end><block_end><block_end>
""" settings """<import_from_future_stmt> absolute_import division print_function unicode_literals<import_stmt>json<import_stmt>os<import_from_stmt>PySide QtCore<import_stmt>logging<import_from_stmt>mcedit2.util directories<line_sep>log=logging.getLogger(__name__)<line_sep>_settings=<none><def_stmt>Settings <block_start><global>_settings<if_stmt>_settings<is><none><block_start>_settings=MCESettings()<block_end><return>_settings<block_end><class_stmt>MCESettingsOption(QtCore.QObject)<block_start><def_stmt>__init__ self settings key valueType=<none> default=<none> *args **kwargs<block_start>super(MCESettingsOption self).__init__(*args **kwargs)<line_sep>self.settings=settings<line_sep>self.key=key<line_sep>self.valueType=valueType<line_sep>self.default=default<block_end><def_stmt>value self default=<none><block_start><if_stmt>default<is><none><block_start>default=self.default<block_end><if_stmt>self.valueType<eq>"json"<block_start>value=self.settings.jsonValue(self.key default)<block_end><else_stmt><block_start>value=self.settings.value(self.key default)<if_stmt>self.valueType<is>bool<block_start><if_stmt>isinstance(value basestring)<block_start>value=value.lower()<eq>"true"<block_end><block_end><elif_stmt>self.valueType<block_start>value=self.valueType(value)<block_end><block_end><return>value<block_end><def_stmt>setValue self value<block_start><if_stmt>self.valueType<eq>"json"<block_start><return>self.settings.setJsonValue(self.key value)<block_end><else_stmt><block_start><return>self.settings.setValue(self.key value)<block_end><block_end>valueChanged=QtCore.Signal(object)<def_stmt>jsonValue self default=<none><block_start><return>self.settings.jsonValue(self.key default)<block_end><def_stmt>setJsonValue self value<block_start><return>self.settings.setJsonValue(self.key value)<block_end><def_stmt>connectAndCall self callback<block_start>""" Connect `callback` to this option's `valueChanged` signal, then call it with the value of this option. :param callback: :type callback: :return: :rtype: """<line_sep>self.valueChanged.connect(callback)<line_sep>callback(self.value())<block_end><block_end><class_stmt>MCESettingsNamespace(object)<block_start><def_stmt>__init__ self rootSettings prefix<block_start>self.rootSettings=rootSettings<if_stmt><not>prefix.endswith("/")<block_start>prefix=prefix+"/"<block_end>self.prefix=prefix<block_end><def_stmt>getOption self key type=<none> default=<none><block_start>""" Parameters ---------- key: str type: bool | int | float | str default: Any Returns ------- option: MCESettingsOption """<line_sep><return>self.rootSettings.getOption(self.prefix+key type default)<block_end><block_end><class_stmt>MCESettings(QtCore.QSettings)<block_start><def_stmt>__init__ self *args **kwargs<block_start>""" Subclass of QSettings. Adds a `getOption` method which returns an individual option as its own object. Adds one signal for each setting, emitted when its value is changed. Also provides json encoded methods to work around a bug in PySide. QSettings, under PySide, does not reliably infer that a settings value should be read as a QStringList. jsonValue and setJsonValue methods are provided that will automatically encode/decode the given value to or from json :rtype: MCESettings """<line_sep>dataDir=directories.getUserFilesDirectory()<line_sep>iniPath=os.path.join(dataDir "mcedit2.ini")<line_sep>log.info("Loading app settings from %s" iniPath)<line_sep>super(MCESettings self).__init__(iniPath QtCore.QSettings.IniFormat *args **kwargs)<line_sep>self.options={}<line_sep>#= defaultdict(lambda: QtCore.Signal(object)) <block_end><def_stmt>getNamespace self prefix<block_start>""" Return an MCESettingsNamespace object which can be used to access settings whose keys are all prefixed by the given prefix :param prefix: :type prefix: :return: :rtype: """<line_sep><return>MCESettingsNamespace(self prefix)<block_end><def_stmt>getSignal self key<block_start>""" Returns a signal to be triggered when the setting `key` is changed. The signal handler receives one argument: the setting's new value. :param key: Settings key :type key: str :rtype: None """<line_sep><return>self.getOption(key).valueChanged<block_end><def_stmt>emitSignal self key val<block_start>option=self.options.get(key)<if_stmt>option<block_start>option.valueChanged.emit(val)<block_end><block_end><def_stmt>setValue self key val<block_start>old=self.value(key)<if_stmt>old<ne>val<block_start>log.info("Setting %r changed to (%.40r)(...) (was (%.40r)(...))" key val old)<line_sep>super(MCESettings self).setValue(key val)<line_sep>self.emitSignal(key val)<block_end><block_end><def_stmt>jsonValue self key default=<none><block_start>value=self.value(key <none>)<if_stmt>value<is><not><none><block_start><try_stmt><block_start><return>json.loads(value)<block_end><except_stmt>ValueError<as>e# No JSON object could be decoded <block_start>log.error("Failed to decode setting %s: %s" key e)<line_sep><return>default<block_end><block_end><else_stmt><block_start><return>default<block_end><block_end><def_stmt>setJsonValue self key value<block_start>self.setValue(key json.dumps(value))<block_end><def_stmt>getOption self key type=<none> default=<none><block_start>""" Return an object that represents the setting at 'key'. The object may be used to get and set the value and get the value's valueChanged signal. Among other uses, the object's setValue attribute may be connected to the valueChanged signal of an input field. :param key: :type key: :return: :rtype: """<line_sep>option=self.options.get(key)<if_stmt>option<block_start><return>option<block_end>option=MCESettingsOption(self key type default)<line_sep>self.options[key]=option<line_sep><return>option<block_end><block_end>
<import_stmt>subprocess<import_stmt>requests<import_stmt>tempfile<import_stmt>os<import_stmt>logging<import_from_stmt>asv.plugins.conda _find_conda Conda<import_from_stmt>asv.console log<import_from_stmt>asv util<line_sep>logging.getLogger("requests").setLevel(logging.WARNING)<line_sep>OGGM_CONDA_ENV_URL=("https://raw.githubusercontent.com/OGGM/"<concat>"OGGM-dependency-list/master/Linux-64/{0}")<line_sep>OGGM_CONDA_ENVS={"36":"oggmdev-1.2.0.202002022248_20200202_py36.yml" "37":"oggmdev-1.2.0.202002022248_20200202_py37.yml" }<class_stmt>OggmVirtualenv(Conda)<block_start>tool_name="oggm_conda"<def_stmt>_setup self<block_start>log.info("Creating oggm conda environment for {0}".format(self.name))<line_sep>env_file=tempfile.NamedTemporaryFile(mode="w" delete=<false> suffix=".yml")<try_stmt><block_start>pyver=str(self._python).replace("." "")[:2]<line_sep>oggm_env=OGGM_CONDA_ENVS[pyver]<line_sep>req=requests.get(OGGM_CONDA_ENV_URL.format(oggm_env))<line_sep>req.raise_for_status()<for_stmt>line req.text.splitlines()<block_start><if_stmt>line.startswith("prefix:")<block_start><continue><block_end><elif_stmt>line.startswith("name:")<block_start>env_file.write("name: {0}\n".format(self.name))<block_end><else_stmt><block_start>env_file.write(line+"\n")<block_end><block_end>env_file.close()<line_sep>self._conda_channels=["conda-forge" "defaults"]<line_sep>self._conda_environment_file=env_file.name<line_sep><return>super()._setup()<block_end><except_stmt>Exception<as>exc<block_start><if_stmt>os.path.isfile(env_file.name)<block_start><with_stmt>open(env_file.name "r")<as>f<block_start>text=f.read()<block_end>log.info("oggm conda env create failed: in {} with:\n{}".format(self._path text))<block_end><raise><block_end><finally_stmt><block_start>os.unlink(env_file.name)<block_end><block_end><block_end>
# -*- coding: utf-8 -*- # Copyright (c) 2018 Tigera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>datetime<line_sep>ZERO=datetime.timedelta(0)<class_stmt>UTC(datetime.tzinfo)<block_start><def_stmt>utcoffset self dt<block_start><return>ZERO<block_end><def_stmt>tzname self dt<block_start><return>"UTC"<block_end><def_stmt>dst self dt<block_start><return>ZERO<block_end><block_end>utc=UTC()<def_stmt>timestamp_now <block_start>utc_now=datetime.datetime.now(utc)<line_sep><return>utc_now.strftime('%Y-%m-%dT%H:%M:%SZ')<line_sep># e.g. 2015-05-19T20:32:12Z <block_end>
# -*- coding: utf-8 -*- """Preview Code for 'Inf-Net: Automatic COVID-19 Lung Infection Segmentation from CT Scans' submit to Transactions on Medical Imaging, 2020. First Version: Created on 2020-05-13 (@author: <NAME>) """<line_sep># ---- base lib ----- <import_stmt>os<import_stmt>argparse<import_from_stmt>datetime datetime<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>random<import_stmt>shutil<import_from_stmt>scipy misc<line_sep># ---- torch lib ---- <import_stmt>torch<import_from_stmt>torch.autograd Variable<import_stmt>torch.nn.functional<as>F<line_sep># ---- custom lib ---- # NOTES: Here we nly provide Res2Net, you can also replace it with other backbones <import_from_stmt>Code.model_lung_infection.InfNet_Res2Net Inf_Net<as>Network<import_from_stmt>Code.utils.dataloader_LungInf get_loader test_dataset<import_from_stmt>Code.utils.utils clip_gradient adjust_lr AvgMeter<import_from_stmt>Code.utils.format_conversion binary2edge<def_stmt>joint_loss pred mask<block_start>weit=1+5<times>torch.abs(F.avg_pool2d(mask kernel_size=31 stride=1 padding=15)-mask)<line_sep>wbce=F.binary_cross_entropy_with_logits(pred mask reduce='none')<line_sep>wbce=(weit<times>wbce).sum(dim=(2 3))/weit.sum(dim=(2 3))<line_sep>pred=torch.sigmoid(pred)<line_sep>inter=((pred<times>mask)<times>weit).sum(dim=(2 3))<line_sep>union=((pred+mask)<times>weit).sum(dim=(2 3))<line_sep>wiou=1-(inter+1)/(union-inter+1)<line_sep><return>(wbce+wiou).mean()<block_end><def_stmt>trainer train_loader model optimizer epoch opt total_step<block_start>model.train()<line_sep># ---- multi-scale training ---- size_rates=[0.75 1 1.25]# replace your desired scale loss_record1,loss_record2,loss_record3,loss_record4,loss_record5=AvgMeter() AvgMeter() AvgMeter() AvgMeter() AvgMeter()<for_stmt>i,pack enumerate(train_loader start=1)<block_start><for_stmt>rate size_rates<block_start>optimizer.zero_grad()<line_sep># ---- data prepare ---- images,gts,edges=pack<line_sep>images=Variable(images).cuda()<line_sep>gts=Variable(gts).cuda()<line_sep>edges=Variable(edges).cuda()<line_sep># ---- rescale ---- trainsize=int(round(opt.trainsize<times>rate/32)<times>32)<if_stmt>rate<ne>1<block_start>images=F.upsample(images size=(trainsize trainsize) mode='bilinear' align_corners=<true>)<line_sep>gts=F.upsample(gts size=(trainsize trainsize) mode='bilinear' align_corners=<true>)<line_sep>edges=F.upsample(edges size=(trainsize trainsize) mode='bilinear' align_corners=<true>)<block_end># ---- forward ---- lateral_map_5,lateral_map_4,lateral_map_3,lateral_map_2,lateral_edge=model(images)<line_sep># ---- loss function ---- loss5=joint_loss(lateral_map_5 gts)<line_sep>loss4=joint_loss(lateral_map_4 gts)<line_sep>loss3=joint_loss(lateral_map_3 gts)<line_sep>loss2=joint_loss(lateral_map_2 gts)<line_sep>loss1=torch.nn.BCEWithLogitsLoss()(lateral_edge edges)<line_sep>loss=loss1+loss2+loss3+loss4+loss5<line_sep># ---- backward ---- loss.backward()<line_sep>clip_gradient(optimizer opt.clip)<line_sep>optimizer.step()<line_sep># ---- recording loss ---- <if_stmt>rate<eq>1<block_start>loss_record1.update(loss1.data opt.batchsize)<line_sep>loss_record2.update(loss2.data opt.batchsize)<line_sep>loss_record3.update(loss3.data opt.batchsize)<line_sep>loss_record4.update(loss4.data opt.batchsize)<line_sep>loss_record5.update(loss5.data opt.batchsize)<block_end><block_end># ---- train visualization ---- <if_stmt>i%5<eq>0<or>i<eq>total_step<block_start>print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], [lateral-edge: {:.4f}, '<concat>'lateral-2: {:.4f}, lateral-3: {:0.4f}, lateral-4: {:0.4f}, lateral-5: {:0.4f}]'.format(datetime.now() epoch opt.epoch i total_step loss_record1.show() loss_record2.show() loss_record3.show() loss_record4.show() loss_record5.show()))<block_end><block_end># ---- save model_lung_infection ---- save_path='Snapshots/{}/'.format(opt.train_save)<line_sep>os.makedirs(save_path exist_ok=<true>)<if_stmt>(epoch+1)%10<eq>0<block_start>torch.save(model.state_dict() save_path+'Semi-Inf-Net-%d.pth'%(epoch+1))<line_sep>print('[Saving Snapshot:]' save_path+'Semi-Inf-Net-%d.pth'%(epoch+1))<block_end><block_end><def_stmt>train_module _train_path _train_save _resume_snapshot<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--epoch' type=int default=10 help='epoch number')<line_sep>parser.add_argument('--lr' type=float default=3e-4 help='learning rate')<line_sep>parser.add_argument('--batchsize' type=int default=16 help='training batch size')<line_sep>parser.add_argument('--trainsize' type=int default=352 help='training dataset size')<line_sep>parser.add_argument('--clip' type=float default=0.5 help='gradient clipping margin')<line_sep>parser.add_argument('--decay_rate' type=float default=0.1 help='decay rate of learning rate')<line_sep>parser.add_argument('--decay_epoch' type=int default=50 help='every n epochs decay learning rate')<line_sep>parser.add_argument('--train_path' type=str default=_train_path)<line_sep>parser.add_argument('--train_save' type=str default=_train_save)<line_sep>parser.add_argument('--resume_snapshot' type=str default=_resume_snapshot)<line_sep>opt=parser.parse_args()<line_sep># ---- build models ---- torch.cuda.set_device(0)<line_sep>model=Network(channel=32 n_class=1).cuda()<line_sep>model.load_state_dict(torch.load(opt.resume_snapshot))<line_sep>params=model.parameters()<line_sep>optimizer=torch.optim.Adam(params opt.lr)<line_sep>image_root='{}/Imgs/'.format(opt.train_path)<line_sep>gt_root='{}/GT/'.format(opt.train_path)<line_sep>edge_root='{}/Edge/'.format(opt.train_path)<line_sep>train_loader=get_loader(image_root gt_root edge_root batchsize=opt.batchsize trainsize=opt.trainsize)<line_sep>total_step=len(train_loader)<line_sep>print("#"<times>20 "Start Training" "#"<times>20)<for_stmt>epoch range(1 opt.epoch)<block_start>adjust_lr(optimizer opt.lr epoch opt.decay_rate opt.decay_epoch)<line_sep>trainer(train_loader=train_loader model=model optimizer=optimizer epoch=epoch opt=opt total_step=total_step)<block_end><block_end><def_stmt>inference_module _data_path _save_path _pth_path<block_start>model=Network(channel=32 n_class=1)<line_sep>model.load_state_dict(torch.load(_pth_path))<line_sep>model.cuda()<line_sep>model.eval()<line_sep>os.makedirs(_save_path exist_ok=<true>)<line_sep># FIXME image_root='{}/'.format(_data_path)<line_sep># gt_root = '{}/mask/'.format(data_path) test_loader=test_dataset(image_root image_root 352)<for_stmt>i range(test_loader.size)<block_start>image,name=test_loader.load_data()<line_sep>#gt = np.asarray(gt, np.float32) #gt /= (gt.max() + 1e-8) image=image.cuda()<line_sep>lateral_map_5,lateral_map_4,lateral_map_3,lateral_map_2,lateral_edge=model(image)<line_sep>res=lateral_map_2# final segmentation #res = F.upsample(res, size=gt.shape, mode='bilinear', align_corners=False) res=res.sigmoid().data.cpu().numpy().squeeze()<line_sep>res=(res-res.min())/(res.max()-res.min()+1e-8)<line_sep>misc.imsave(_save_path+'/'+name res)<block_end><block_end><def_stmt>movefiles _src_dir _dst_dir<block_start>os.makedirs(_dst_dir exist_ok=<true>)<for_stmt>file_name os.listdir(_src_dir)<block_start>shutil.copyfile(os.path.join(_src_dir file_name) os.path.join(_dst_dir file_name))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>slices='./Dataset/TrainingSet/LungInfection-Train/Pseudo-label/DataPrepare'<line_sep>slices_dir=slices+'/Imgs_split'<line_sep>slices_pred_seg_dir=slices+'/pred_seg_split'<line_sep>slices_pred_edge_dir=slices+'/pred_edge_split'<line_sep># NOTES: Hybrid-label = Doctor-label + Pseudo-label semi='./Dataset/TrainingSet/LungInfection-Train/Pseudo-label/DataPrepare/Hybrid-label'<line_sep>semi_img=semi+'/Imgs'<line_sep>semi_mask=semi+'/GT'<line_sep>semi_edge=semi+'/Edge'<if_stmt>(<not>os.path.exists(semi_img))<or>(len(os.listdir(semi_img))<ne>50)<block_start>shutil.copytree('Dataset/TrainingSet/LungInfection-Train/Doctor-label/Imgs' semi_img)<line_sep>shutil.copytree('Dataset/TrainingSet/LungInfection-Train/Doctor-label/GT' semi_mask)<line_sep>shutil.copytree('Dataset/TrainingSet/LungInfection-Train/Doctor-label/Edge' semi_edge)<line_sep>print('Copy done')<block_end><else_stmt><block_start>print('Check done')<block_end>slices_lst=os.listdir(slices_dir)<line_sep>random.shuffle(slices_lst)<line_sep>print("#"<times>20 "\nStart Training (Inf-Net)\nThis code is written for 'Inf-Net: Automatic COVID-19 Lung "<concat>"Infection Segmentation from CT Scans', 2020, arXiv.\n"<concat>"----\nPlease cite the paper if you use this code and dataset. "<concat>"And any questions feel free to contact me "<concat>"via E-mail (<EMAIL>)\n----\n" "#"<times>20)<for_stmt>i,split_name enumerate(slices_lst)<block_start>print('\n[INFO] {} ({}/320)'.format(split_name i))<line_sep># ---- inference ---- test_aux_dir=os.path.join(slices_dir split_name)<line_sep>test_aux_save_dir=os.path.join(slices_pred_seg_dir split_name)<if_stmt>i<eq>0<block_start>snapshot_dir='./Snapshots/save_weights/Inf-Net/Inf-Net-100.pth'<block_end><else_stmt><block_start>snapshot_dir='./Snapshots/semi_training/Semi-Inf-Net_{}/Semi-Inf-Net-10.pth'.format(i-1)<block_end>inference_module(_data_path=test_aux_dir _save_path=test_aux_save_dir _pth_path=snapshot_dir)<line_sep>os.makedirs(os.path.join(slices_pred_edge_dir split_name) exist_ok=<true>)<for_stmt>pred_name os.listdir(test_aux_save_dir)<block_start>edge_tmp=binary2edge(os.path.join(test_aux_save_dir pred_name))<line_sep>cv2.imwrite(os.path.join(slices_pred_edge_dir split_name pred_name) edge_tmp)<block_end># ---- move generation ---- movefiles(test_aux_dir semi_img)<line_sep>movefiles(test_aux_save_dir semi_mask)<line_sep>movefiles(os.path.join(slices_pred_edge_dir split_name) semi_edge)<line_sep># ---- training ---- train_module(_train_path=semi _train_save='semi_training/Semi-Inf-Net_{}'.format(i) _resume_snapshot=snapshot_dir)<block_end># move img/pseudo-label into `./Dataset/TrainingSet/LungInfection-Train/Pseudo-label` shutil.copytree(semi_img './Dataset/TrainingSet/LungInfection-Train/Pseudo-label/Imgs')<line_sep>shutil.copytree(semi_mask './Dataset/TrainingSet/LungInfection-Train/Pseudo-label/GT')<line_sep>shutil.copytree(semi_edge 'Dataset/TrainingSet/LungInfection-Train/Pseudo-label/Edge')<line_sep>print('Pseudo Label Generated!')<block_end>
"""Test environment wrapper."""<import_stmt>gym<class_stmt>AutoStopEnv(gym.Wrapper)<block_start>"""Environment wrapper that stops episode at step max_episode_length."""<def_stmt>__init__ self env=<none> env_name='' max_episode_length=100<block_start>"""Create an AutoStepEnv. Args: env (gym.Env): Environment to be wrapped. env_name (str): Name of the environment. max_episode_length (int): Maximum length of the episode. """<if_stmt>env_name<block_start>super().__init__(gym.make(env_name))<block_end><else_stmt><block_start>super().__init__(env)<block_end>self._episode_step=0<line_sep>self._max_episode_length=max_episode_length<block_end><def_stmt>step self action<block_start>"""Step the wrapped environment. Args: action (np.ndarray): the action. Returns: np.ndarray: Next observation float: Reward bool: Termination signal dict: Environment information """<line_sep>self._episode_step<augadd>1<line_sep>next_obs,reward,done,info=self.env.step(action)<if_stmt>self._episode_step<eq>self._max_episode_length<block_start>done=<true><line_sep>self._episode_step=0<block_end><return>next_obs reward done info<block_end><def_stmt>reset self **kwargs<block_start>"""Reset the wrapped environment. Args: **kwargs: Keyword arguments. Returns: np.ndarray: Initial observation. """<line_sep><return>self.env.reset(**kwargs)<block_end><block_end>
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_stmt>pytest<import_from_stmt>kmip.services kmip_client<import_from_stmt>kmip.pie client<as>pclient<def_stmt>pytest_addoption parser<block_start>parser.addoption("--config" action="store" default="client" help="Config file section name for client configuration settings")<block_end>@pytest.fixture(scope="class")<def_stmt>client request<block_start>config=request.config.getoption("--config")<line_sep>client=kmip_client.KMIPProxy(config=config)<line_sep>client.open()<def_stmt>finalize <block_start>client.close()<block_end>request.addfinalizer(finalize)<line_sep>request.cls.client=client<block_end>@pytest.fixture(scope="class")<def_stmt>simple request<block_start>config=request.config.getoption("--config")<line_sep>client=pclient.ProxyKmipClient(config=config)<line_sep>client.open()<def_stmt>finalize <block_start>client.close()<block_end>request.addfinalizer(finalize)<line_sep>request.cls.client=client<block_end>
""" module level docstring is not included """<line_sep># this line is not code # `tty` was chosen for stability over python versions (so we don't get diffrent results # on different computers, that has different versions of Python). # # According to https://github.com/python/cpython/tree/master/Lib (at 2021-04-23) `tty` # was last changed in 2001, so chances of this being changed in the future are slim. <import_stmt>tty<line_sep>s=""" all these lines are code """<line_sep>print(s)<def_stmt>func <block_start>""" this string is a doc-string. Although the module-level docstring is not considered code, this one apparently is ยฏ\_(ใƒ„)_/ยฏ """<line_sep><pass><block_end>
<import_stmt>torch<import_from_stmt>torch nn Tensor<class_stmt>ConvModule(nn.Sequential)<block_start><def_stmt>__init__ self c1 c2 k s=1 p=0 d=1 g=1<block_start>super().__init__(nn.Conv2d(c1 c2 k s p d g bias=<false>) nn.BatchNorm2d(c2) nn.ReLU(<true>))<block_end><block_end>
######################################### # AndroidSpeechRecognition.py # more info @: http://myrobotlab.org/service/AndroidSpeechRecognition ######################################### # start the service androidspeechrecognition=Runtime.start("androidspeechrecognition" "AndroidSpeechRecognition")<line_sep># start mouth marySpeech=Runtime.start("marySpeech" "MarySpeech")<line_sep># shutdown microphone if robot speaking androidspeechrecognition.attach(marySpeech)<line_sep># auto rearm microphone androidspeechrecognition.setAutoListen(<true>)<line_sep>androidspeechrecognition.addCommand("turn on the light" "python" "lightOn")<line_sep>androidspeechrecognition.addCommand("turn off the light" "python" "lightOff")<def_stmt>lightOn <block_start>marySpeech.speakBlocking("light is on")<block_end><def_stmt>lightOff <block_start>marySpeech.speakBlocking("light is off")<block_end>
"""CNN Module."""<import_stmt>warnings<import_stmt>akro<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>garage InOutSpec<import_from_stmt>garage.torch expand_var NonLinearity output_height_2d output_width_2d <line_sep># pytorch v1.6 issue, see https://github.com/pytorch/pytorch/issues/42305 # pylint: disable=abstract-method <class_stmt>CNNModule(nn.Module)<block_start>"""Convolutional neural network (CNN) model in pytorch. Args: spec (garage.InOutSpec): Specification of inputs and outputs. The input should be in 'NCHW' format: [batch_size, channel, height, width]. Will print a warning if the channel size is not 1 or 3. If output_space is specified, then a final linear layer will be inserted to map to that dimensionality. If output_space is None, it will be filled in with the computed output space. image_format (str): Either 'NCHW' or 'NHWC'. Should match the input specification. Gym uses NHWC by default, but PyTorch uses NCHW by default. hidden_channels (tuple[int]): Number of output channels for CNN. For example, (3, 32) means there are two convolutional layers. The filter for the first conv layer outputs 3 channels and the second one outputs 32 channels. kernel_sizes (tuple[int]): Dimension of the conv filters. For example, (3, 5) means there are two convolutional layers. The filter for first layer is of dimension (3 x 3) and the second one is of dimension (5 x 5). strides (tuple[int]): The stride of the sliding window. For example, (1, 2) means there are two convolutional layers. The stride of the filter for first layer is 1 and that of the second layer is 2. paddings (tuple[int]): Amount of zero-padding added to both sides of the input of a conv layer. padding_mode (str): The type of padding algorithm to use, i.e. 'constant', 'reflect', 'replicate' or 'circular' and by default is 'zeros'. hidden_nonlinearity (callable or torch.nn.Module): Activation function for intermediate dense layer(s). It should return a torch.Tensor. Set it to None to maintain a linear activation. hidden_b_init (callable): Initializer function for the bias of intermediate dense layer(s). The function should return a torch.Tensor. max_pool (bool): Bool for using max-pooling or not. pool_shape (tuple[int]): Dimension of the pooling layer(s). For example, (2, 2) means that all pooling layers are of the same shape (2, 2). pool_stride (tuple[int]): The strides of the pooling layer(s). For example, (2, 2) means that all the pooling layers have strides (2, 2). layer_normalization (bool): Bool for using layer normalization or not. hidden_w_init (callable): Initializer function for the weight of intermediate dense layer(s). The function should return a torch.Tensor. enable_cudnn_benchmarks (bool): Whether to enable cudnn benchmarks in `torch`. If enabled, the backend selects the CNN benchamark algorithm with the best performance. """<def_stmt>__init__ self spec image_format hidden_channels * # Many things after this are ints or tuples of ints. kernel_sizes strides paddings=0 padding_mode='zeros' hidden_nonlinearity=nn.ReLU hidden_w_init=nn.init.xavier_uniform_ hidden_b_init=nn.init.zeros_ max_pool=<false> pool_shape=<none> pool_stride=1 layer_normalization=<false> enable_cudnn_benchmarks=<true><block_start>super().__init__()<assert_stmt>len(hidden_channels)<g>0<line_sep># PyTorch forces us to use NCHW internally. in_channels,height,width=_check_spec(spec image_format)<line_sep>self._format=image_format<line_sep>kernel_sizes=expand_var('kernel_sizes' kernel_sizes len(hidden_channels) 'hidden_channels')<line_sep>strides=expand_var('strides' strides len(hidden_channels) 'hidden_channels')<line_sep>paddings=expand_var('paddings' paddings len(hidden_channels) 'hidden_channels')<line_sep>pool_shape=expand_var('pool_shape' pool_shape len(hidden_channels) 'hidden_channels')<line_sep>pool_stride=expand_var('pool_stride' pool_stride len(hidden_channels) 'hidden_channels')<line_sep>self._cnn_layers=nn.Sequential()<line_sep>torch.backends.cudnn.benchmark=enable_cudnn_benchmarks<line_sep># In case there are no hidden channels, handle output case. out_channels=in_channels<for_stmt>i,out_channels enumerate(hidden_channels)<block_start>conv_layer=nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=kernel_sizes[i] stride=strides[i] padding=paddings[i] padding_mode=padding_mode)<line_sep>height=output_height_2d(conv_layer height)<line_sep>width=output_width_2d(conv_layer width)<line_sep>hidden_w_init(conv_layer.weight)<line_sep>hidden_b_init(conv_layer.bias)<line_sep>self._cnn_layers.add_module(f'conv_{i}' conv_layer)<if_stmt>layer_normalization<block_start>self._cnn_layers.add_module(f'layer_norm_{i}' nn.LayerNorm((out_channels height width)))<block_end><if_stmt>hidden_nonlinearity<block_start>self._cnn_layers.add_module(f'non_linearity_{i}' NonLinearity(hidden_nonlinearity))<block_end><if_stmt>max_pool<block_start>pool=nn.MaxPool2d(kernel_size=pool_shape[i] stride=pool_stride[i])<line_sep>height=output_height_2d(pool height)<line_sep>width=output_width_2d(pool width)<line_sep>self._cnn_layers.add_module(f'max_pooling_{i}' pool)<block_end>in_channels=out_channels<block_end>output_dims=out_channels<times>height<times>width<if_stmt>spec.output_space<is><none><block_start>final_spec=InOutSpec(spec.input_space akro.Box(low=-np.inf high=np.inf shape=(output_dims )))<line_sep>self._final_layer=<none><block_end><else_stmt><block_start>final_spec=spec<line_sep># Checked at start of __init__ self._final_layer=nn.Linear(output_dims spec.output_space.shape[0])<block_end>self.spec=final_spec<block_end># pylint: disable=arguments-differ <def_stmt>forward self x<block_start>"""Forward method. Args: x (torch.Tensor): Input values. Should match image_format specified at construction (either NCHW or NCWH). Returns: List[torch.Tensor]: Output values """<line_sep># Transform single values into batch, if necessary. <if_stmt>len(x.shape)<eq>3<block_start>x=x.unsqueeze(0)<block_end># This should be the single place in torch that image normalization # happens <if_stmt>isinstance(self.spec.input_space akro.Image)<block_start>x=torch.div(x 255.0)<block_end><assert_stmt>len(x.shape)<eq>4<if_stmt>self._format<eq>'NHWC'# Convert to internal NCHW format <block_start>x=x.permute((0 3 1 2))<block_end><for_stmt>layer self._cnn_layers<block_start>x=layer(x)<block_end><if_stmt>self._format<eq>'NHWC'# Convert back to NHWC (just in case) <block_start>x=x.permute((0 2 3 1))<block_end># Remove non-batch dimensions x=x.reshape(x.shape[0] -1)<line_sep># Apply final linearity, if it was requested. <if_stmt>self._final_layer<is><not><none><block_start>x=self._final_layer(x)<block_end><return>x<block_end><block_end><def_stmt>_check_spec spec image_format<block_start>"""Check that an InOutSpec is suitable for a CNNModule. Args: spec (garage.InOutSpec): Specification of inputs and outputs. The input should be in 'NCHW' format: [batch_size, channel, height, width]. Will print a warning if the channel size is not 1 or 3. If output_space is specified, then a final linear layer will be inserted to map to that dimensionality. If output_space is None, it will be filled in with the computed output space. image_format (str): Either 'NCHW' or 'NHWC'. Should match the input specification. Gym uses NHWC by default, but PyTorch uses NCHW by default. Returns: tuple[int, int, int]: The input channels, height, and width. Raises: ValueError: If spec isn't suitable for a CNNModule. """<line_sep># pylint: disable=no-else-raise input_space=spec.input_space<line_sep>output_space=spec.output_space<line_sep># Don't use isinstance, since akro.Space is guaranteed to inherit from # gym.Space <if_stmt>getattr(input_space 'shape' <none>)<is><none><block_start><raise>ValueError(f'input_space to CNNModule is {input_space}, but should be an '<concat>f'akro.Box or akro.Image')<block_end><elif_stmt>len(input_space.shape)<ne>3<block_start><raise>ValueError(f'Input to CNNModule is {input_space}, but should have three '<concat>f'dimensions.')<block_end><if_stmt>(output_space<is><not><none><and><not>(hasattr(output_space 'shape')<and>len(output_space.shape)<eq>1))<block_start><raise>ValueError(f'output_space to CNNModule is {output_space}, but should be '<concat>f'an akro.Box with a single dimension or None')<block_end><if_stmt>image_format<eq>'NCHW'<block_start>in_channels=spec.input_space.shape[0]<line_sep>height=spec.input_space.shape[1]<line_sep>width=spec.input_space.shape[2]<block_end><elif_stmt>image_format<eq>'NHWC'<block_start>height=spec.input_space.shape[0]<line_sep>width=spec.input_space.shape[1]<line_sep>in_channels=spec.input_space.shape[2]<block_end><else_stmt><block_start><raise>ValueError(f'image_format has value {image_format!r}, but must be either '<concat>f"'NCHW' or 'NHWC'")<block_end><if_stmt>in_channels<not><in>(1 3)<block_start>warnings.warn(f'CNNModule input has {in_channels} channels, but '<concat>f'1 or 3 channels are typical. Consider changing the CNN '<concat>f'image_format.')<block_end><return>in_channels height width<block_end>
"""Tests for graphmode_tensornetwork."""<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensornetwork contract connect flatten_edges_between contract_between Node <import_stmt>pytest<class_stmt>GraphmodeTensorNetworkTest(tf.test.TestCase)<block_start><def_stmt>test_basic_graphmode self# pylint: disable=not-context-manager <block_start><with_stmt>tf.compat.v1.Graph().as_default()<block_start>a=Node(tf.ones(10) backend="tensorflow")<line_sep>b=Node(tf.ones(10) backend="tensorflow")<line_sep>e=connect(a[0] b[0])<line_sep>final_tensor=contract(e).get_tensor()<line_sep>sess=tf.compat.v1.Session()<line_sep>final_val=sess.run(final_tensor)<line_sep>self.assertAllClose(final_val 10.0)<block_end><block_end><def_stmt>test_gradient_decent self# pylint: disable=not-context-manager <block_start><with_stmt>tf.compat.v1.Graph().as_default()<block_start>a=Node(tf.Variable(tf.ones(10)) backend="tensorflow")<line_sep>b=Node(tf.ones(10) backend="tensorflow")<line_sep>e=connect(a[0] b[0])<line_sep>final_tensor=contract(e).get_tensor()<line_sep>opt=tf.compat.v1.train.GradientDescentOptimizer(0.001)<line_sep>train_op=opt.minimize(final_tensor)<line_sep>sess=tf.compat.v1.Session()<line_sep>sess.run(tf.compat.v1.global_variables_initializer())<line_sep>self.assertAllClose(sess.run(final_tensor) 10.0)<line_sep>sess.run(train_op)<line_sep>self.assertLess(sess.run(final_tensor) 10.0)<block_end><block_end><def_stmt>test_dynamic_network_sizes self<block_start>@tf.function<def_stmt>f x n<block_start>x_slice=x[:n]<line_sep>n1=Node(x_slice backend="tensorflow")<line_sep>n2=Node(x_slice backend="tensorflow")<line_sep>e=connect(n1[0] n2[0])<line_sep><return>contract(e).get_tensor()<block_end>x=np.ones(10)<line_sep>self.assertAllClose(f(x tf.convert_to_tensor(2)) 2.0)<line_sep>self.assertAllClose(f(x tf.convert_to_tensor(3)) 3.0)<block_end>@pytest.mark.skip(reason="Test fails due to probable bug in tensorflow 2.0.0")<def_stmt>test_dynamic_network_sizes_contract_between self<block_start>@tf.function<def_stmt>f x n<block_start>x_slice=x[<ellipsis> :n]<line_sep>n1=Node(x_slice backend="tensorflow")<line_sep>n2=Node(x_slice backend="tensorflow")<line_sep>connect(n1[0] n2[0])<line_sep>connect(n1[1] n2[1])<line_sep>connect(n1[2] n2[2])<line_sep><return>contract_between(n1 n2).get_tensor()<block_end>x=tf.ones((3 4 5))<line_sep>self.assertAllClose(f(x tf.convert_to_tensor(2)) 24.0)<line_sep>self.assertAllClose(f(x tf.convert_to_tensor(3)) 36.0)<block_end><def_stmt>test_dynamic_network_sizes_flatten_standard self<block_start>@tf.function<def_stmt>f x n<block_start>x_slice=x[<ellipsis> :n]<line_sep>n1=Node(x_slice backend="tensorflow")<line_sep>n2=Node(x_slice backend="tensorflow")<line_sep>connect(n1[0] n2[0])<line_sep>connect(n1[1] n2[1])<line_sep>connect(n1[2] n2[2])<line_sep><return>contract(flatten_edges_between(n1 n2)).get_tensor()<block_end>x=np.ones((3 4 5))<line_sep>self.assertAllClose(f(x tf.convert_to_tensor(2)) 24.0)<line_sep>self.assertAllClose(f(x tf.convert_to_tensor(3)) 36.0)<block_end><def_stmt>test_dynamic_network_sizes_flatten_trace self<block_start>@tf.function<def_stmt>f x n<block_start>x_slice=x[<ellipsis> :n]<line_sep>n1=Node(x_slice backend="tensorflow")<line_sep>connect(n1[0] n1[2])<line_sep>connect(n1[1] n1[3])<line_sep><return>contract(flatten_edges_between(n1 n1)).get_tensor()<block_end>x=np.ones((3 4 3 4 5))<line_sep>self.assertAllClose(f(x tf.convert_to_tensor(2)) np.ones((2 ))<times>12)<line_sep>self.assertAllClose(f(x tf.convert_to_tensor(3)) np.ones((3 ))<times>12)<block_end><def_stmt>test_batch_usage self <block_start><def_stmt>build_tensornetwork tensors<block_start>a=Node(tensors[0] backend="tensorflow")<line_sep>b=Node(tensors[1] backend="tensorflow")<line_sep>e=connect(a[0] b[0])<line_sep><return>contract(e).get_tensor()<block_end>tensors=[np.ones((5 10)) np.ones((5 10))]<line_sep>result=tf.map_fn(build_tensornetwork tensors dtype=tf.float64)<line_sep>np.testing.assert_allclose(result np.ones(5)<times>10)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
<import_stmt>tapiriik.database<line_sep>tapiriik.database.db=tapiriik.database._connection["tapiriik_test"]<line_sep>tapiriik.database.cachedb=tapiriik.database._connection["tapiriik_cache_test"]<import_from_stmt>tapiriik.testing *<import_stmt>unittest<line_sep>unittest.main()<line_sep>tapiriik.database._connection.drop_database("tapiriik_test")<line_sep>tapiriik.database._connection.drop_database("tapiriik_cache_test")<line_sep>
# # Copyright 2021 Red Hat Inc. # SPDX-License-Identifier: Apache-2.0 # """Azure Client Configuration."""<import_from_stmt>azure.common.credentials ServicePrincipalCredentials<import_from_stmt>azure.mgmt.costmanagement CostManagementClient<import_from_stmt>azure.mgmt.resource ResourceManagementClient<import_from_stmt>azure.mgmt.storage StorageManagementClient<import_from_stmt>azure.storage.blob BlobServiceClient<import_from_stmt>msrestazure.azure_cloud AZURE_CHINA_CLOUD<import_from_stmt>msrestazure.azure_cloud AZURE_GERMAN_CLOUD<import_from_stmt>msrestazure.azure_cloud AZURE_PUBLIC_CLOUD<import_from_stmt>msrestazure.azure_cloud AZURE_US_GOV_CLOUD<class_stmt>AzureClientFactory<block_start>"""Azure client factory. This class holds the Azure credentials and can create Service Clients for querying the Azure Service APIs. Args: subscription_id (str): Subscription ID tenant_id (str): Tenant ID for your Azure Subscription client_id (str): Service Principal Application ID client_secret (str): Service Principal Password cloud (str): Cloud selector, must be one of ['china', 'germany', 'public', 'usgov'] """<def_stmt>__init__ self subscription_id tenant_id client_id client_secret cloud="public"<block_start>"""Constructor."""<line_sep>self._subscription_id=subscription_id<line_sep>clouds={"china":AZURE_CHINA_CLOUD "germany":AZURE_GERMAN_CLOUD "public":AZURE_PUBLIC_CLOUD "usgov":AZURE_US_GOV_CLOUD }<line_sep>self._credentials=ServicePrincipalCredentials(client_id=client_id secret=client_secret tenant=tenant_id cloud_environment=clouds.get(cloud "public"))<block_end>@property<def_stmt>credentials self<block_start>"""Service Principal Credentials property."""<line_sep><return>self._credentials<block_end>@property<def_stmt>cost_management_client self<block_start>"""Get cost management client with subscription and credentials."""<line_sep><return>CostManagementClient(self.credentials self.subscription_id)<block_end>@property<def_stmt>resource_client self<block_start>"""Return a resource client."""<line_sep><return>ResourceManagementClient(self.credentials self.subscription_id)<block_end>@property<def_stmt>storage_client self<block_start>"""Get storage client with subscription and credentials."""<line_sep><return>StorageManagementClient(self.credentials self.subscription_id)<block_end>@property<def_stmt>subscription_id self<block_start>"""Subscription ID property."""<line_sep><return>self._subscription_id<block_end><def_stmt>cloud_storage_account self resource_group_name storage_account_name<block_start>"""Get a BlobServiceClient."""<line_sep>storage_account_keys=self.storage_client.storage_accounts.list_keys(resource_group_name storage_account_name)<line_sep># Add check for keys and a get value key=storage_account_keys.keys[0]<line_sep>connect_str=(f"DefaultEndpointsProtocol=https;"<concat>f"AccountName={storage_account_name};"<concat>f"AccountKey={key.value};"<concat>f"EndpointSuffix=core.windows.net")<line_sep><return>BlobServiceClient.from_connection_string(connect_str)<block_end><block_end>
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 <import_stmt>dns.resolver<class_stmt>RegionLookupError(Exception)<block_start>"""Rasied when there was a problem when looking up the active region"""<line_sep><pass><block_end><def_stmt>active_region <block_start>qname='global.health.amazonaws.com'<try_stmt><block_start>answers=dns.resolver.resolve(qname 'CNAME')<block_end><except_stmt>Exception<as>e<block_start><raise>RegionLookupError('Failed to resolve {}'.format(qname) e)<block_end><if_stmt>len(answers)<ne>1<block_start><raise>RegionLookupError('Failed to get a single answer when resolving {}'.format(qname))<block_end>name=str(answers[0].target)# e.g. health.us-east-1.amazonaws.com. region_name=name.split('.')[1]# Region name is the 1st in split('.') -> ['health', 'us-east-1', 'amazonaws', 'com', ''] <return>region_name<block_end>
# Copyright 2021, Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>absl.testing absltest<import_stmt>tensorflow<as>tf<import_from_stmt>dual_encoder keras_layers<line_sep>l2_normalize_fn=<lambda>x:tf.keras.backend.l2_normalize(x axis=-1)<class_stmt>KerasLayersTest(absltest.TestCase)<block_start><def_stmt>test_masked_average_3d self<block_start>masked_average_layer=keras_layers.MaskedAverage(1)<line_sep>inputs=tf.constant([[[0.5 0.3] [0.4 0.1] [0.4 0.1]] [[0.6 0.8] [0.5 0.4] [0.4 0.1]] [[0.9 0.4] [0.4 0.1] [0.4 0.1]] [[0.9 0.4] [0.4 0.1] [0.4 0.1]] ])<line_sep>mask=tf.constant([[<true> <true> <true>] [<false> <false> <true>] [<true> <false> <false>] [<false> <false> <false>]])<line_sep>output_average=masked_average_layer.call(inputs mask=mask)<line_sep>output_mask=masked_average_layer.compute_mask(inputs mask=mask)<line_sep>expected_average=tf.constant([[1.3/3 0.5/3] [0.4 0.1] [0.9 0.4] [0.0 0.0]])<line_sep>expected_mask=<none><line_sep>tf.debugging.assert_near(expected_average output_average)<line_sep>self.assertEqual(expected_mask output_mask)<block_end><def_stmt>test_masked_average_4d self<block_start>masked_average_layer=keras_layers.MaskedAverage(2)<line_sep>inputs=tf.constant([[[[0.5 0.3] [0.4 0.1] [0.4 0.1]] [[0.6 0.8] [0.5 0.4] [0.4 0.1]]] [[[0.6 0.8] [0.5 0.4] [0.4 0.1]] [[0.6 0.8] [0.5 0.4] [0.4 0.1]]] [[[0.9 0.4] [0.4 0.1] [0.4 0.1]] [[0.6 0.8] [0.5 0.4] [0.4 0.1]]] [[[0.9 0.4] [0.4 0.1] [0.4 0.1]] [[0.6 0.8] [0.5 0.4] [0.4 0.1]]] ])<line_sep>mask=tf.constant([[[<true> <true> <true>] [<true> <false> <true>]] [[<false> <false> <true>] [<false> <false> <false>]] [[<true> <false> <false>] [<true> <true> <true>]] [[<false> <false> <false>] [<true> <false> <false>]]])<line_sep>output_average=masked_average_layer.call(inputs mask=mask)<line_sep>output_mask=masked_average_layer.compute_mask(inputs mask=mask)<line_sep>expected_average=tf.constant([[[1.3/3 0.5/3] [0.5 0.45]] [[0.4 0.1] [0.0 0.0]] [[0.9 0.4] [0.5 1.3/3]] [[0.0 0.0] [0.6 0.8]] ])<line_sep>expected_mask=tf.constant([[<true> <true>] [<true> <false>] [<true> <true>] [<false> <true>]])<line_sep>tf.debugging.assert_near(expected_average output_average)<line_sep>tf.debugging.assert_equal(expected_mask output_mask)<block_end><def_stmt>test_masked_average_raises_error self<block_start>masked_average_layer=keras_layers.MaskedAverage(1)<line_sep>inputs=tf.constant([[[0.5 0.3] [0.4 0.1] [0.4 0.1]] [[0.6 0.8] [0.5 0.4] [0.4 0.1]] [[0.9 0.4] [0.4 0.1] [0.4 0.1]] ])<line_sep>mask=<none><with_stmt>self.assertRaises(ValueError)<block_start>masked_average_layer.call(inputs mask=mask)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>masked_average_layer.compute_mask(inputs mask=mask)<block_end><block_end><def_stmt>test_masked_reshape self<block_start>masked_reshape_layer=keras_layers.MaskedReshape((4 4 2 1) (4 4 2))<line_sep>inputs=tf.constant([[[1.0] [2.0] [0.5] [0.4] [0.4] [0.1] [0.0] [0.0]] [[0.4] [0.1] [0.0] [0.0] [0.0] [0.0] [0.6] [0.8]] [[0.9] [0.4] [0.5] [3.0] [0.9] [0.4] [0.5] [3.0]] [[0.0] [0.0] [0.6] [0.8] [0.4] [0.1] [0.0] [0.0]] ])<line_sep>mask=tf.constant([[<true> <false> <true> <true> <true> <false> <false> <false>] [<true> <false> <true> <true> <true> <true> <false> <true>] [<false> <true> <true> <false> <true> <true> <true> <true>] [<false> <true> <true> <true> <true> <false> <false> <true>]])<line_sep>output=masked_reshape_layer.call(inputs mask=mask)<line_sep>output_mask=masked_reshape_layer.compute_mask(inputs mask=mask)<line_sep>expected_output=tf.constant([[[[1.0] [2.0]] [[0.5] [0.4]] [[0.4] [0.1]] [[0.0] [0.0]]] [[[0.4] [0.1]] [[0.0] [0.0]] [[0.0] [0.0]] [[0.6] [0.8]]] [[[0.9] [0.4]] [[0.5] [3.0]] [[0.9] [0.4]] [[0.5] [3.0]]] [[[0.0] [0.0]] [[0.6] [0.8]] [[0.4] [0.1]] [[0.0] [0.0]]] ])<line_sep>expected_mask=tf.constant([[[<true> <false>] [<true> <true>] [<true> <false>] [<false> <false>]] [[<true> <false>] [<true> <true>] [<true> <true>] [<false> <true>]] [[<false> <true>] [<true> <false>] [<true> <true>] [<true> <true>]] [[<false> <true>] [<true> <true>] [<true> <false>] [<false> <true>]]])<line_sep>tf.debugging.assert_near(expected_output output)<line_sep>tf.debugging.assert_equal(expected_mask output_mask)<block_end><def_stmt>test_masked_reshape_unknown_batch_size self<block_start>masked_reshape_layer=keras_layers.MaskedReshape((-1 4 2 1) (-1 4 2))<line_sep>inputs=tf.constant([[[1.0] [2.0] [0.5] [0.4] [0.4] [0.1] [0.0] [0.0]] [[0.4] [0.1] [0.0] [0.0] [0.0] [0.0] [0.6] [0.8]] [[0.9] [0.4] [0.5] [3.0] [0.9] [0.4] [0.5] [3.0]] [[0.0] [0.0] [0.6] [0.8] [0.4] [0.1] [0.0] [0.0]] ])<line_sep>mask=tf.constant([[<true> <false> <true> <true> <true> <false> <false> <false>] [<true> <false> <true> <true> <true> <true> <false> <true>] [<false> <true> <true> <false> <true> <true> <true> <true>] [<false> <true> <true> <true> <true> <false> <false> <true>]])<line_sep>output=masked_reshape_layer.call(inputs mask=mask)<line_sep>output_mask=masked_reshape_layer.compute_mask(inputs mask=mask)<line_sep>expected_output=tf.constant([[[[1.0] [2.0]] [[0.5] [0.4]] [[0.4] [0.1]] [[0.0] [0.0]]] [[[0.4] [0.1]] [[0.0] [0.0]] [[0.0] [0.0]] [[0.6] [0.8]]] [[[0.9] [0.4]] [[0.5] [3.0]] [[0.9] [0.4]] [[0.5] [3.0]]] [[[0.0] [0.0]] [[0.6] [0.8]] [[0.4] [0.1]] [[0.0] [0.0]]] ])<line_sep>expected_mask=tf.constant([[[<true> <false>] [<true> <true>] [<true> <false>] [<false> <false>]] [[<true> <false>] [<true> <true>] [<true> <true>] [<false> <true>]] [[<false> <true>] [<true> <false>] [<true> <true>] [<true> <true>]] [[<false> <true>] [<true> <true>] [<true> <false>] [<false> <true>]]])<line_sep>tf.debugging.assert_near(expected_output output)<line_sep>tf.debugging.assert_equal(expected_mask output_mask)<block_end><def_stmt>test_masked_reshape_raises_error self<block_start>masked_reshape_layer=keras_layers.MaskedReshape((-1 4 2 1) (-1 4 2))<line_sep>inputs=tf.constant([[[1.0] [2.0] [0.5] [0.4] [0.4] [0.1] [0.0] [0.0]] [[0.4] [0.1] [0.0] [0.0] [0.0] [0.0] [0.6] [0.8]] [[0.9] [0.4] [0.5] [3.0] [0.9] [0.4] [0.5] [3.0]] [[0.0] [0.0] [0.6] [0.8] [0.4] [0.1] [0.0] [0.0]] ])<line_sep>mask=<none><with_stmt>self.assertRaises(ValueError)<block_start>masked_reshape_layer.call(inputs mask=mask)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>masked_reshape_layer.compute_mask(inputs mask=mask)<block_end><block_end><def_stmt>test_embedding_spreadout_regularizer_dot_product self<block_start>weights=tf.constant([[1.0 0.0 0.0] [2.0 2.0 2.0] [0.1 0.2 0.3] [0.3 0.2 0.1] [0.0 1.0 0.0]])<line_sep>regularizer=keras_layers.EmbeddingSpreadoutRegularizer(spreadout_lambda=0.1 normalization_fn=<none> l2_regularization=0.0)<line_sep># Similarities without diagonal looks like: # 0.0 2.0 0.1 0.3 0.0 # 2.0 0.0 1.2 1.2 2.0 # 0.1 1.2 0.0 0.1 0.2 # 0.3 1.2 0.1 0.0 0.2 # 0.0 2.0 0.2 0.2 0.0 loss=regularizer(weights)<line_sep># L2 norm of above similarities. expected_loss=0.47053161424<line_sep>tf.debugging.assert_near(expected_loss loss)<line_sep>regularizer=keras_layers.EmbeddingSpreadoutRegularizer(spreadout_lambda=0.1 normalization_fn=<none> l2_regularization=1.0)<line_sep>l2_regularizer=tf.keras.regularizers.l2(1.0)<line_sep>loss=regularizer(weights)<line_sep>expected_loss=0.47053161424+l2_regularizer(weights)<line_sep>tf.debugging.assert_near(expected_loss loss)<block_end><def_stmt>test_embedding_spreadout_regularizer_cosine_similarity self<block_start>weights=tf.constant([[1.0 0.0 0.0] [2.0 2.0 2.0] [0.1 0.2 0.3] [0.3 0.2 0.1] [0.0 1.0 0.0]])<line_sep>regularizer=keras_layers.EmbeddingSpreadoutRegularizer(spreadout_lambda=0.1 normalization_fn=l2_normalize_fn l2_regularization=0.0)<line_sep>loss=regularizer(weights)<line_sep># L2 norm of above similarities. expected_loss=0.2890284<line_sep>tf.debugging.assert_near(expected_loss loss)<line_sep>regularizer=keras_layers.EmbeddingSpreadoutRegularizer(spreadout_lambda=0.1 normalization_fn=l2_normalize_fn l2_regularization=1.0)<line_sep>l2_regularizer=tf.keras.regularizers.l2(1.0)<line_sep>loss=regularizer(weights)<line_sep>expected_loss=0.2890284+l2_regularizer(weights)<line_sep>tf.debugging.assert_near(expected_loss loss)<block_end><def_stmt>test_embedding_spreadout_regularizer_no_spreadout self<block_start>weights=tf.constant([[1.0 0.0 0.0] [2.0 2.0 2.0] [0.1 0.2 0.3] [0.3 0.2 0.1] [0.0 1.0 0.0]])<line_sep>regularizer=keras_layers.EmbeddingSpreadoutRegularizer(spreadout_lambda=0.0 normalization_fn=<none> l2_regularization=0.0)<line_sep>loss=regularizer(weights)<line_sep>expected_loss=0.0<line_sep>tf.debugging.assert_near(expected_loss loss)<line_sep># Test that L2 normalization behaves normally. regularizer=keras_layers.EmbeddingSpreadoutRegularizer(spreadout_lambda=0.0 normalization_fn=<none> l2_regularization=0.1)<line_sep>l2_regularizer=tf.keras.regularizers.l2(0.1)<line_sep>loss=regularizer(weights)<line_sep>l2_loss=l2_regularizer(weights)<line_sep>tf.debugging.assert_near(l2_loss loss)<line_sep># Test that normalization_fn has no effect. regularizer=keras_layers.EmbeddingSpreadoutRegularizer(spreadout_lambda=0.0 normalization_fn=l2_normalize_fn l2_regularization=0.1)<line_sep>l2_regularizer=tf.keras.regularizers.l2(0.1)<line_sep>loss=regularizer(weights)<line_sep>l2_loss=l2_regularizer(weights)<line_sep>tf.debugging.assert_near(l2_loss loss)<block_end><def_stmt>test_embedding_spreadout_regularizer_get_config self<block_start>weights=tf.constant([[1.0 0.0 0.0] [2.0 2.0 2.0] [0.1 0.2 0.3] [0.3 0.2 0.1] [0.0 1.0 0.0]])<line_sep>regularizer=keras_layers.EmbeddingSpreadoutRegularizer(spreadout_lambda=0.0 normalization_fn=l2_normalize_fn l2_regularization=0.1)<line_sep>config=regularizer.get_config()<line_sep>expected_config={'spreadout_lambda':0.0 'normalization_fn':l2_normalize_fn 'l2_regularization':0.1}<line_sep>new_regularizer=(keras_layers.EmbeddingSpreadoutRegularizer.from_config(config))<line_sep>l2_regularizer=tf.keras.regularizers.l2(0.1)<line_sep>loss=new_regularizer(weights)<line_sep>l2_loss=l2_regularizer(weights)<line_sep>self.assertEqual(config expected_config)<line_sep>tf.debugging.assert_near(l2_loss loss)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
<import_from_stmt>localtileserver examples<def_stmt>test_get_blue_marble <block_start>client=examples.get_blue_marble()<assert_stmt>client.metadata()<block_end><def_stmt>test_get_virtual_earth <block_start>client=examples.get_virtual_earth()<assert_stmt>client.metadata()<block_end><def_stmt>test_get_arcgis <block_start>client=examples.get_arcgis()<assert_stmt>client.metadata()<block_end><def_stmt>test_get_elevation <block_start>client=examples.get_elevation()<assert_stmt>client.metadata()<block_end><def_stmt>test_get_bahamas <block_start>client=examples.get_bahamas()<assert_stmt>client.metadata()<block_end><def_stmt>test_get_pine_gulch <block_start>client=examples.get_pine_gulch()<assert_stmt>client.metadata()<block_end><def_stmt>test_get_landsat <block_start>client=examples.get_landsat()<assert_stmt>client.metadata()<block_end><def_stmt>test_get_san_francisco <block_start>client=examples.get_san_francisco()<assert_stmt>client.metadata()<block_end><def_stmt>test_get_oam2 <block_start>client=examples.get_oam2()<assert_stmt>client.metadata()<block_end><def_stmt>test_get_elevation_us <block_start>client=examples.get_elevation_us()<assert_stmt>client.metadata()<block_end>
<import_stmt>boto3<import_from_stmt>botocore.config Config<import_stmt>argparse<import_stmt>json<import_stmt>base64<import_stmt>sys<line_sep>parser=argparse.ArgumentParser(description='Uses a specified CMK to encrypt QnABot Lambdas and Parameter Store settings')<line_sep>parser.add_argument("region" help="AWS Region")<line_sep>parser.add_argument("stack_arn" help="the arn of the QnABot CloudFormation Stack")<line_sep>parser.add_argument("cmk_arn" help="the ARN of the Customer Master Key to use for encryption")<line_sep>parser.add_argument("target_s3_bucket" help="the Name of the S3 bucket to use for server access logs")<line_sep>args=type('' () {})()<line_sep>args=parser.parse_args()<line_sep>client_config=Config(region_name=args.region)<line_sep>lambda_client=boto3.client('lambda' config=client_config)<line_sep>iam_client=boto3.client('iam' config=client_config)<line_sep>role_paginator=iam_client.get_paginator('list_role_policies')<line_sep>kms_client=boto3.client("kms" config=client_config)<line_sep>cloudformation_client=boto3.client('cloudformation' config=client_config)<line_sep>ssm_client=boto3.client('ssm' config=client_config)<line_sep>s3_client=boto3.client('s3' config=client_config)<line_sep>ddb_client=boto3.client('dynamodb' config=client_config)<line_sep>sts_client=boto3.client('sts' config=client_config)<line_sep>kinesis_client=boto3.client('firehose' config=client_config)<line_sep>policy_name="CMKPolicy4"<line_sep>policy_document={"Version":"2012-10-17" "Statement":[{"Effect":"Allow" "Action":["kms:Decrypt" "kms:Encrypt" "kms:GenerateDataKey"] "Resource":args.cmk_arn}]}<line_sep>cmk_roles_logical_ids=['S3AccessRole' 'FirehoseESS3Role' 'AdminRole' 'ExportRole' 'ImportRole' 'ApiGatewayRole' 'ESCognitoRole' 'KibanaRole' ]<line_sep>cmk_roles_physical_ids=[]<def_stmt>assign_role role_name<block_start>role_iterator=role_paginator.paginate(RoleName=role_name PaginationConfig={'MaxItems':1000 'PageSize':1000})<line_sep>print(f"Updating role {role_name}...")<line_sep>cmk_policy_exists=<false><for_stmt>role role_iterator<block_start><if_stmt>policy_name<in>role["PolicyNames"]<block_start>cmk_policy_exists=<true><line_sep><break><block_end><block_end><if_stmt><not>cmk_policy_exists<block_start>iam_client.put_role_policy(RoleName=role_name PolicyName=policy_name PolicyDocument=json.dumps(policy_document))<block_end><block_end><def_stmt>put_key_policy stackname roles<block_start>response=kms_client.get_key_policy(KeyId=args.cmk_arn PolicyName='default')<line_sep>policy=response['Policy'].replace("\n" "")<line_sep>policy=json.loads(policy)<line_sep>caller_identity=sts_client.get_caller_identity()<line_sep>new_statement=[]<for_stmt>statement policy["Statement"]<block_start><if_stmt>(statement["Sid"]<ne>stackname)<block_start>new_statement.append(statement)<block_end><block_end>policy["Statement"]=new_statement<line_sep>formatted_roles=[]<for_stmt>role roles<block_start>formatted_roles.append(f"arn:aws:iam::{caller_identity['Account']}:role/{role}")<block_end>policy["Statement"].append({"Sid":stackname "Effect":"Allow" "Principal":{"AWS":formatted_roles} "Action":["kms:Encrypt" "kms:Decrypt" "kms:GenerateDataKey"] "Resource":"*"})<line_sep>print(f"Updating policy for key {args.cmk_arn}")<line_sep>kms_client.put_key_policy(KeyId=args.cmk_arn PolicyName="default" Policy=json.dumps(policy))<line_sep>print(f"Policy for key {args.cmk_arn} updated.")<block_end><def_stmt>process_stacks stackname<block_start>paginator=cloudformation_client.get_paginator('list_stack_resources')<line_sep>response_iterator=paginator.paginate(StackName=stackname PaginationConfig={'MaxItems':10000#, })<for_stmt>response response_iterator<block_start>lambda_resources=filter(<lambda>x:x["ResourceType"]<eq>"AWS::Lambda::Function" response["StackResourceSummaries"])<for_stmt>lambda_func lambda_resources<block_start>lambda_client.update_function_configuration(FunctionName=lambda_func["PhysicalResourceId"] KMSKeyArn=args.cmk_arn)<line_sep>print(f"Updated function {lambda_func['PhysicalResourceId']} in stack {stackname}")<line_sep>lambda_configuration=lambda_client.get_function_configuration(FunctionName=lambda_func["PhysicalResourceId"])<line_sep>role_name=lambda_configuration["Role"].split("/")[-1]<line_sep>assign_role(role_name)<block_end>ssm_parameters=filter(<lambda>x:x["ResourceType"]<eq>"AWS::SSM::Parameter" response["StackResourceSummaries"])<for_stmt>parameter ssm_parameters<block_start>parameter_name=parameter["PhysicalResourceId"]<line_sep>parameter_response=ssm_client.get_parameter(Name=parameter_name WithDecryption=<true>)<line_sep>parameter_value=parameter_response['Parameter']['Value']<line_sep>description=parameter_response['Parameter']["Description"]<if>"Decription"<in>parameter_response['Parameter']<else>""<line_sep>ssm_client.put_parameter(Name=parameter_name Description=description Value=parameter_value Type='SecureString' KeyId=args.cmk_arn Overwrite=<true> )<block_end>s3_buckets=filter(<lambda>x:x["ResourceType"]<eq>"AWS::S3::Bucket" response["StackResourceSummaries"])<for_stmt>bucket s3_buckets<block_start>s3_client.put_bucket_encryption(Bucket=bucket["PhysicalResourceId"] ServerSideEncryptionConfiguration={'Rules':[{'ApplyServerSideEncryptionByDefault':{'SSEAlgorithm':'aws:kms' 'KMSMasterKeyID':args.cmk_arn}} ]})<line_sep>print(f"Encryption set for {bucket['PhysicalResourceId']}")<line_sep>s3_client.put_bucket_logging(Bucket=bucket["PhysicalResourceId"] BucketLoggingStatus={'LoggingEnabled':{'TargetBucket':args.target_s3_bucket 'TargetPrefix':bucket["PhysicalResourceId"]+'/'}})<line_sep>print(f"Access Logs set for {bucket['PhysicalResourceId']}")<block_end>ddb_tables=filter(<lambda>x:x["ResourceType"]<eq>"AWS::DynamoDB::Table" response["StackResourceSummaries"])<for_stmt>table ddb_tables<block_start>table_description=ddb_client.describe_table(TableName=table["PhysicalResourceId"])<if_stmt>('SSEDescription'<not><in>table_description["Table"]<or>'KMSMasterKeyArn'<not><in>table_description["Table"]['SSEDescription']<or>table_description["Table"]['SSEDescription']['KMSMasterKeyArn']<ne>args.cmk_arn)<block_start>ddb_client.update_table(TableName=table["PhysicalResourceId"] SSESpecification={'Enabled':<true> 'SSEType':'KMS' 'KMSMasterKeyId':args.cmk_arn})<block_end><block_end>kinesis_streams=filter(<lambda>x:x["ResourceType"]<eq>"AWS::KinesisFirehose::DeliveryStream" response["StackResourceSummaries"])<for_stmt>stream kinesis_streams<block_start>stream_response=kinesis_client.describe_delivery_stream(DeliveryStreamName=stream["PhysicalResourceId"])<if_stmt>('KeyType'<not><in>stream_response['DeliveryStreamDescription']['DeliveryStreamEncryptionConfiguration']<or>(stream_response['DeliveryStreamDescription']['DeliveryStreamEncryptionConfiguration']['KeyType']<ne>"CUSTOMER_MANAGED_CMK"<and>stream_response['DeliveryStreamDescription']['DeliveryStreamEncryptionConfiguration']['KeyARN']<ne>args.cmk_arn))<block_start>kinesis_client.start_delivery_stream_encryption(DeliveryStreamName=stream["PhysicalResourceId"] DeliveryStreamEncryptionConfigurationInput={'KeyARN':args.cmk_arn 'KeyType':'CUSTOMER_MANAGED_CMK'})<block_end><block_end>role_resources=filter(<lambda>x:'LambdaRole'<in>x["LogicalResourceId"]<or>x["LogicalResourceId"]<in>cmk_roles_logical_ids response["StackResourceSummaries"])<for_stmt>role_resource role_resources<block_start>print(f"role_resource: {role_resource['PhysicalResourceId']}")<line_sep>cmk_roles_physical_ids.append(role_resource["PhysicalResourceId"])<line_sep>assign_role(role_resource["PhysicalResourceId"])<block_end><block_end><block_end>process_stacks(args.stack_arn)<line_sep>paginator=cloudformation_client.get_paginator('list_stack_resources')<line_sep>response_iterator=paginator.paginate(StackName=args.stack_arn PaginationConfig={'MaxItems':10000 })<for_stmt>response response_iterator<block_start>stacks=filter(<lambda>x:x["ResourceType"]<eq>"AWS::CloudFormation::Stack" response["StackResourceSummaries"])<for_stmt>stack stacks<block_start>print(f"Processing stack {stack['PhysicalResourceId']}")<line_sep>process_stacks(stack["PhysicalResourceId"])<block_end><block_end>put_key_policy(args.stack_arn cmk_roles_physical_ids)<line_sep>
# coding=utf-8 <import_from_stmt>aip AipOcr<import_stmt>re<line_sep>opt_aux_word=['ใ€Š' 'ใ€‹']<def_stmt>get_file_content file<block_start><with_stmt>open(file 'rb')<as>fp<block_start><return>fp.read()<block_end><block_end><def_stmt>image_to_str name client<block_start>image=get_file_content(name)<line_sep>text_result=client.basicGeneral(image)<line_sep>print(text_result)<line_sep>result=get_question_and_options(text_result)<line_sep><return>result<block_end><def_stmt>init_baidu_ocr baidu_ocr_config<block_start>app_id,api_key,secret_key=baidu_ocr_config<line_sep>client=AipOcr(app_id api_key secret_key)<line_sep><return>client<block_end># {'words_result': [{'words': '11.ไปฃ่กจไฝœไน‹ไธ€ๆ˜ฏใ€Š่’™ๅจœไธฝ่ŽŽ็š„็œผ'}, # {'words': 'ๆณชใ€‹็š„ๆญŒๆ‰‹ๆ˜ฏ?'}, {'words': 'ๆž—ๅฟ—้ข–'}, # {'words': 'ๆž—ๅฟ—็‚ซ'}, {'words': 'ๆž—ๅฟ—็Žฒ'}], # 'log_id': 916087026228727188, 'words_result_num': 5} <def_stmt>get_question_and_options text<block_start><if_stmt>'error_code'<in>text<block_start>print('่ฏท็กฎไฟ็™พๅบฆOCR้…็ฝฎๆญฃ็กฎ')<line_sep>exit(-1)<block_end><if_stmt>text['words_result_num']<eq>0<block_start><return>'' []<block_end>result_arr=text['words_result']<line_sep>option_arr=[]<line_sep>question_str=''<line_sep>question_obj,options_obj=get_question(result_arr)<for_stmt>question question_obj<block_start>word=question['words']<line_sep>word=re.sub('^\d+\.*' '' word)<line_sep>question_str<augadd>word<block_end><for_stmt>option options_obj<block_start>word=option['words']<if_stmt>word.startswith('ใ€Š')<block_start>word=word[1:]<block_end><if_stmt>word.endswith('ใ€‹')<block_start>word=word[:-1]<block_end>print(word)<line_sep>option_arr.append(word)<block_end>print(question_str)<line_sep>print(option_arr)<line_sep><return>question_str option_arr<block_end># ๅ…ˆๆŒ‰'๏ผŸ'ๅˆ†ๅ‰ฒ้—ฎ้ข˜ๅ’Œ็ญ”ๆกˆ๏ผŒ่‹ฅๆ— ้—ฎๅท๏ผŒ็”จ็ดขๅผ•ๅˆ†ๅ‰ฒ <def_stmt>get_question result_arr<block_start>result_num=len(result_arr)<line_sep>index=-1<line_sep>question_obj,options_obj=[] []<for_stmt>i,result enumerate(result_arr)<block_start><if_stmt>'?'<in>result['words']<block_start>index=i<line_sep><break><block_end><block_end><if_stmt>index<g>-1<block_start>question_obj=result_arr[:index+1]<line_sep>options_obj=result_arr[index+1:]<line_sep><return>question_obj options_obj<block_end><else_stmt># ๆŒ‰็…ง็ป้ชŒ๏ผŒ4ไธช็ป“ๆžœไธบ1่กŒ้—ฎ้ข˜๏ผŒ5ใ€6ไธชไธบ2่กŒ้—ฎ้ข˜๏ผŒ8ไธชไปฅไธŠไธบๅ…ฌๅธƒ็ญ”ๆกˆ <block_start><if_stmt>result_num<le>4<block_start>question_obj=result_arr[:1]<line_sep>options_obj=result_arr[1:]<block_end><elif_stmt>result_num<eq>5<block_start>question_obj=result_arr[:2]<line_sep>options_obj=result_arr[2:]<block_end><elif_stmt>result_num<eq>6# ๆš‚ๆ—ถ <block_start>question_obj=result_arr[:2]<line_sep>options_obj=result_arr[2:]<block_end><elif_stmt>result_num<eq>7<or>result_num<eq>8<block_start>question_obj=result_arr[:3]<line_sep>options_obj=result_arr[3:]<block_end><return>question_obj options_obj<block_end><block_end>
<import_stmt>cPickle<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<line_sep>PATH='./cifar-10-batches-py'<line_sep>TARGETPATH='/home/comp/csshshi/tensorflow/cifar-10-batches-py'<line_sep>TEST_FILES=['test_batch']<line_sep>FILES=['data_batch_1' 'data_batch_2' 'data_batch_3' 'data_batch_4' 'data_batch_5']<line_sep>TRAIN_COUNT=50000<line_sep>EVAL_COUNT=10000<line_sep>IMAGE_SIZE=32<line_sep>NUM_CLASSES=10<line_sep>unpickled={}<def_stmt>unpickle file<block_start>dict=unpickled.get(file)<if_stmt>dict<block_start><return>dict<block_end>fo=open(file 'rb')<line_sep>dict=cPickle.load(fo)<line_sep>fo.close()<line_sep>unpickled[file]=dict<line_sep><return>dict<block_end><def_stmt>get_next_batch batch_size step is_test=<false><block_start>files=FILES<if_stmt>is_test<block_start>files=TEST_FILES<block_end>file_index=step%len(FILES)<line_sep>filename=files[file_index]<line_sep>filename='%s/%s'%(PATH filename)<line_sep>dict=unpickle(filename)<line_sep>data_index=step/len(files)<times>batch_size<line_sep>images=dict['data'][data_index:data_index+batch_size]<line_sep>labels=dict['labels'][data_index:data_index+batch_size]<line_sep>reshaped_images=[np.reshape(image (IMAGE_SIZE IMAGE_SIZE 3))<for>image images]<line_sep><return>reshaped_images labels<block_end>
<import_stmt>model<import_stmt>tensorflow<as>tf<import_stmt>utils<def_stmt>train target num_param_servers is_chief lstm_size=64 input_filenames=<none> sentence_length=128 vocab_size=2<power>15 learning_rate=0.01 output_dir=<none> batch_size=1024 embedding_size=128 num_epochs=2<block_start>graph=tf.Graph()<with_stmt>graph.as_default()<block_start>sentences,scores=model.get_inputs(input_filenames batch_size num_epochs sentence_length)<with_stmt>tf.device(tf.train.replica_device_setter())<block_start>lstm=model.BasicRegressionLSTM(sentences scores num_param_servers vocab_size learning_rate embedding_size lstm_size)<block_end><block_end>tf.contrib.learn.train(graph output_dir lstm.train_op lstm.loss global_step_tensor=lstm.global_step supervisor_is_chief=is_chief supervisor_master=target)<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=utils.base_parser()<line_sep>parser.add_argument('--learning-rate' type=float default=0.01)<line_sep>utils.dispatch(train **parser.parse_args().__dict__)<block_end>
# pylint: disable=W0123 <import_stmt>re<import_stmt>requests<def_stmt>get url:str<arrow>dict<block_start>""" videos """<line_sep>data={}<line_sep>data["videos"]=[]<line_sep>headers={"User-Agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"}<line_sep>re_url=r'mid:(.*?),.*?mp4SdUrlOrign:(.*?),.*?mp4HdUrlOrign:(.*?),.*?mp4ShdUrlOrign:(.*?),'<line_sep>rep=requests.get(url headers=headers timeout=10)<line_sep>items=re.findall(re_url rep.text)<for_stmt>item items# ๅ€’ๅบๅ–ๆœ€้ซ˜็”ป่ดจ <block_start><for_stmt>video_url item[::-1]# type: str # print(url) <block_start><if_stmt>"http"<in>video_url<block_start>video_url=eval(video_url).replace("\\u002F" "/")<line_sep>data["videos"].append(video_url)<line_sep><break><block_end><block_end><block_end><return>data<block_end><if_stmt>__name__<eq>"__main__"<block_start>url="http://open.163.com/newview/movie/free?pid=M8LI1JCE6&mid=M8LI3BQ60"<line_sep>print(get(url))<block_end>
#Kinesis Aggregation/Deaggregation Libraries for Python # #Copyright 2014, Amazon.com, Inc. or its affiliates. All Rights Reserved. # #Licensed under the Amazon Software License (the "License"). #You may not use this file except in compliance with the License. #A copy of the License is located at # # http://aws.amazon.com/asl/ # #or in the "license" file accompanying this file. This file is distributed #on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either #express or implied. See the License for the specific language governing #permissions and limitations under the License. <import_stmt>md5<line_sep>#Message aggregation protocol-specific constants #(https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md) MAGIC='\xf3\x89\x9a\xc2'<line_sep>DIGEST_SIZE=md5.digest_size<line_sep>#Kinesis Limits #(https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html) MAX_BYTES_PER_RECORD=1024<times>1024# 1 MB
# Copyright 2017 Workiva # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>base64 b64encode<import_from_stmt>struct pack_into<import_stmt>unittest<import_from_stmt>mock Mock<import_from_stmt>mock patch<import_from_stmt>thrift.transport.TTransport TTransportException<import_from_stmt>frugal.exceptions TTransportExceptionType<import_from_stmt>frugal.transport.http_transport THttpTransport<line_sep>@patch('frugal.transport.http_transport.requests')<class_stmt>TestTHttpTransport(unittest.TestCase)<block_start><def_stmt>test_request self mock_requests<block_start>url='http://localhost:8080/frugal'<line_sep>headers={'foo':'bar'}<line_sep>resp=Mock(status_code=200)<line_sep>response=b'response'<line_sep>buff=bytearray(4)<line_sep>pack_into('!I' buff 0 len(response))<line_sep>resp.content=b64encode(buff+response)<line_sep>mock_requests.post.return_value=resp<def_stmt>get_headers <block_start><return>{'baz':'qux'}<block_end>tr=THttpTransport(url headers=headers get_headers=get_headers response_capacity=500)<line_sep>tr.open()<line_sep>self.assertTrue(tr.isOpen())<line_sep>data=b'helloworld'<line_sep>buff=bytearray(4)<line_sep>pack_into('!I' buff 0 len(data))<line_sep>encoded_frame=b64encode(buff+data)<line_sep>tr.write(data)<line_sep>tr.flush()<line_sep>mock_requests.post.assert_called_once_with(url data=encoded_frame timeout=<none> headers={'foo':'bar' 'baz':'qux' 'Content-Length':'20' 'Content-Type':'application/x-frugal' 'Content-Transfer-Encoding':'base64' 'User-Agent':'Python/TBaseHttpTransport' 'x-frugal-payload-limit':'500'})<line_sep>resp=tr.read(len(response))<line_sep>self.assertEqual(response resp)<line_sep>tr.close()<line_sep>self.assertTrue(tr.isOpen())<block_end># open/close are no-ops <def_stmt>test_request_timeout self mock_requests<block_start>url='http://localhost:8080/frugal'<line_sep>headers={'foo':'bar'}<line_sep>resp=Mock(status_code=200)<line_sep>response=b'response'<line_sep>buff=bytearray(4)<line_sep>pack_into('!I' buff 0 len(response))<line_sep>resp.content=b64encode(buff+response)<line_sep>mock_requests.post.return_value=resp<def_stmt>get_headers <block_start><return>{'baz':'qux'}<block_end>tr=THttpTransport(url headers=headers get_headers=get_headers response_capacity=500)<line_sep>tr.open()<line_sep>self.assertTrue(tr.isOpen())<line_sep>data=b'helloworld'<line_sep>buff=bytearray(4)<line_sep>pack_into('!I' buff 0 len(data))<line_sep>encoded_frame=b64encode(buff+data)<line_sep>tr.set_timeout(5000)<line_sep>tr.write(data)<line_sep>tr.flush()<line_sep>mock_requests.post.assert_called_once_with(url data=encoded_frame timeout=5 headers={'foo':'bar' 'baz':'qux' 'Content-Length':'20' 'Content-Type':'application/x-frugal' 'Content-Transfer-Encoding':'base64' 'User-Agent':'Python/TBaseHttpTransport' 'x-frugal-payload-limit':'500'})<line_sep>resp=tr.read(len(response))<line_sep>self.assertEqual(response resp)<line_sep>tr.close()<line_sep>self.assertTrue(tr.isOpen())<block_end># open/close are no-ops <def_stmt>test_flush_no_body self mock_requests<block_start>url='http://localhost:8080/frugal'<line_sep>tr=THttpTransport(url)<line_sep>tr.flush()<line_sep>self.assertFalse(mock_requests.post.called)<block_end><def_stmt>test_flush_bad_response self mock_requests<block_start>url='http://localhost:8080/frugal'<line_sep>resp=Mock(status_code=500)<line_sep>mock_requests.post.return_value=resp<line_sep>tr=THttpTransport(url)<line_sep>data=b'helloworld'<line_sep>buff=bytearray(4)<line_sep>pack_into('!I' buff 0 len(data))<line_sep>encoded_frame=b64encode(buff+data)<line_sep>tr.write(data)<with_stmt>self.assertRaises(TTransportException)<block_start>tr.flush()<block_end>mock_requests.post.assert_called_once_with(url data=encoded_frame timeout=<none> headers={'Content-Length':'20' 'Content-Type':'application/x-frugal' 'Content-Transfer-Encoding':'base64' 'User-Agent':'Python/TBaseHttpTransport'})<block_end><def_stmt>test_flush_bad_oneway_response self mock_requests<block_start>url='http://localhost:8080/frugal'<line_sep>resp=Mock(status_code=200)<line_sep>buff=bytearray(4)<line_sep>pack_into('!I' buff 0 10)<line_sep>resp.content=b64encode(buff)<line_sep>mock_requests.post.return_value=resp<line_sep>tr=THttpTransport(url)<line_sep>data=b'helloworld'<line_sep>buff=bytearray(4)<line_sep>pack_into('!I' buff 0 len(data))<line_sep>encoded_frame=b64encode(buff+data)<line_sep>tr.write(data)<with_stmt>self.assertRaises(TTransportException)<block_start>tr.flush()<block_end>mock_requests.post.assert_called_once_with(url data=encoded_frame timeout=<none> headers={'Content-Length':'20' 'Content-Type':'application/x-frugal' 'Content-Transfer-Encoding':'base64' 'User-Agent':'Python/TBaseHttpTransport'})<block_end><def_stmt>test_flush_oneway self mock_requests<block_start>url='http://localhost:8080/frugal'<line_sep>resp=Mock(status_code=200)<line_sep>buff=bytearray(4)<line_sep>pack_into('!I' buff 0 0)<line_sep>resp.content=b64encode(buff)<line_sep>mock_requests.post.return_value=resp<line_sep>tr=THttpTransport(url)<line_sep>data=b'helloworld'<line_sep>buff=bytearray(4)<line_sep>pack_into('!I' buff 0 len(data))<line_sep>encoded_frame=b64encode(buff+data)<line_sep>tr.write(data)<line_sep>tr.flush()<line_sep>mock_requests.post.assert_called_once_with(url data=encoded_frame timeout=<none> headers={'Content-Length':'20' 'Content-Type':'application/x-frugal' 'Content-Transfer-Encoding':'base64' 'User-Agent':'Python/TBaseHttpTransport'})<line_sep>resp=tr.read(10)<line_sep>self.assertEqual(b'' resp)<block_end><def_stmt>test_write_limit_exceeded self mock_requests<block_start>url='http://localhost:8080/frugal'<line_sep>resp=Mock(status_code=200)<line_sep>buff=bytearray(4)<line_sep>pack_into('!I' buff 0 0)<line_sep>resp.content=b64encode(buff)<line_sep>mock_requests.post.return_value=resp<line_sep>tr=THttpTransport(url request_capacity=5)<line_sep>data=b'helloworld'<with_stmt>self.assertRaises(TTransportException)<as>cm<block_start>tr.write(data)<block_end>self.assertEqual(TTransportExceptionType.REQUEST_TOO_LARGE cm.exception.type)<line_sep>self.assertFalse(mock_requests.post.called)<block_end><block_end>
#Interpolation search is an improved version of binary search. #Its time complexity is O(log(log n)) as compared to log(n) of binary search. #following is the code of interpolation search: # Python program to implement interpolation search #Variable naming: """ 1) lys - our input array 2) val - the element we are searching for 2) index - the probable index of the search element. This is computed to be a higher value when val is closer in value to the element at the end of the array (lys[high]), and lower when val is closer in value to the element at the start of the array (lys[low]) 4) low - the starting index of the array 5) high - the last index of the array"""<def_stmt>InterpolationSearch lys val<block_start>low=0<line_sep>high=(len(lys)-1)<while_stmt>low<le>high<and>val<ge>lys[low]<and>val<le>lys[high]<block_start>index=low+int(((float(high-low)/(lys[high]-lys[low]))<times>(val-lys[low])))<if_stmt>lys[index]<eq>val<block_start><return>index<block_end><if_stmt>lys[index]<l>val<block_start>low=index+1<block_end><else_stmt><block_start>high=index-1<block_end><block_end><return>-1<block_end>print(InterpolationSearch([1 2 3 4 5 6 7 8] 6))<line_sep>
# -*- coding: utf-8 -*- """ .. invisible: _ _ _____ _ _____ _____ | | | | ___| | | ___/ ___| | | | | |__ | | | |__ \ `--. | | | | __|| | | __| `--. \ \ \_/ / |___| |___| |___/\__/ / \___/\____/\_____|____/\____/ Created on Apr 13, 2015 BLAS class to use with ocl backend. โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ """<import_from_stmt>cuda4py.blas CUBLAS_OP_N CUBLAS_OP_T<import_stmt>numpy<import_stmt>opencl4py.blas<as>clblas<import_stmt>os<import_stmt>threading<import_stmt>weakref<import_from_stmt>zope.interface implementer<import_from_stmt>veles.accelerated_units AcceleratedUnit IOpenCLUnit<import_from_stmt>veles.config root<import_from_stmt>veles.dummy DummyWorkflow<import_from_stmt>veles.logger Logger<import_from_stmt>veles.numpy_ext roundup<line_sep>@implementer(IOpenCLUnit)<class_stmt>Builder(AcceleratedUnit)<block_start>"""Dummy unit for building OpenCL kernels. """<def_stmt>__init__ self workflow **kwargs<block_start>super(Builder self).__init__(workflow **kwargs)<line_sep>self.source=kwargs["source"]<line_sep>self.defines=kwargs["defines"]<line_sep>self.kernel_name=kwargs["kernel_name"]<line_sep>self.cache_file_name=kwargs["cache_file_name"]<line_sep>self.dtype=kwargs["dtype"]<block_end>@property<def_stmt>kernel self<block_start><return>self._kernel_<block_end><def_stmt>ocl_init self<block_start>self.sources_[self.source]={}<line_sep>self.build_program(self.defines self.cache_file_name self.dtype)<line_sep>self.assign_kernel(self.kernel_name)<block_end><def_stmt>ocl_run self<block_start><pass><block_end><block_end><class_stmt>OCLBLAS(Logger)<block_start>"""Class with BLAS functionality similar to CUBLAS. It uses CLBLAS when available or custom kernels otherwise. """<line_sep>@staticmethod<def_stmt>attach_to_device device<block_start><if_stmt>device.blas<is><none><block_start>device.blas=OCLBLAS(device)<block_end><block_end><def_stmt>__init__ self device<block_start>super(OCLBLAS self).__init__()<line_sep>self._lock_=threading.Lock()<line_sep>self._device=weakref.ref(device)<line_sep>self.kernels={}<line_sep>self._const_i=numpy.zeros(3 dtype=numpy.uint64)<try_stmt><block_start><if_stmt>(root.common.engine.ocl.clBLAS<is><not><true><or>root.common.engine.precision_level<g>0)<block_start><raise>ValueError()<block_end><if_stmt>"CLBLAS_STORAGE_PATH"<not><in>os.environ<block_start>found=<false><for_stmt>dirnme root.common.engine.device_dirs<block_start><for_stmt>path,_,files os.walk(dirnme)<block_start><for_stmt>f files<block_start><if_stmt>f.endswith(".kdb")<block_start>found=<true><line_sep>os.environ["CLBLAS_STORAGE_PATH"]=path<line_sep><break><block_end><block_end><if_stmt>found<block_start><break><block_end><block_end><if_stmt>found<block_start><break><block_end><block_end><block_end>self.blas=clblas.CLBLAS()<line_sep>self._sgemm=self.clblas_sgemm<line_sep>self._dgemm=self.clblas_dgemm<line_sep>self.debug("Using clBLAS for matrix multiplication")<block_end><except_stmt>(OSError RuntimeError ValueError)<block_start>self._sgemm=self.veles_gemm<line_sep>self._dgemm=self.veles_gemm<line_sep>self.debug("Using Veles OpenCL kernels for matrix multiplication")<block_end><block_end>@property<def_stmt>device self<block_start><return>self._device()<block_end>@staticmethod<def_stmt>gemm dtype<block_start><if_stmt>dtype<eq>numpy.float32<block_start><return>OCLBLAS.sgemm<block_end><if_stmt>dtype<eq>numpy.float64<block_start><return>OCLBLAS.dgemm<block_end><raise>ValueError("Invalid dtype %s"%dtype)<block_end><def_stmt>sgemm self transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA=0 offsetB=0 offsetC=0<block_start><return>self._sgemm(transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA=offsetA offsetB=offsetB offsetC=offsetC)<block_end><def_stmt>dgemm self transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA=0 offsetB=0 offsetC=0<block_start><return>self._dgemm(transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA=offsetA offsetB=offsetB offsetC=offsetC)<block_end><def_stmt>clblas_sgemm self transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA=0 offsetB=0 offsetC=0<block_start>"""Does a matrix multiplication like in CUBLAS using clBLAS. Matricies are assumed to be tightly packed and stored like in CUBLAS. Single precision (float) version. """<line_sep>self.blas.sgemm((self.device.queue_ ) clblas.clblasColumnMajor transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA=offsetA offsetB=offsetB offsetC=offsetC)<block_end><def_stmt>clblas_dgemm self transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA=0 offsetB=0 offsetC=0<block_start>"""Does a matrix multiplication like in CUBLAS using clBLAS. Matricies are assumed to be tightly packed and stored like in CUBLAS. Double precision (double) version. """<line_sep>self.blas.dgemm((self.device.queue_ ) clblas.clblasColumnMajor transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA=offsetA offsetB=offsetB offsetC=offsetC)<block_end><def_stmt>veles_gemm self transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA=0 offsetB=0 offsetC=0<block_start>"""Does a matrix multiplication like in CUBLAS using custom kernel. Matricies are assumed to be tightly packed and stored like in CUBLAS. """<with_stmt>self._lock_<block_start>self._veles_gemm(transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA offsetB offsetC)<block_end><block_end><def_stmt>_veles_gemm self transA transB rowsCountA columnCountB commonSideLength alpha A B beta C offsetA offsetB offsetC<block_start>dtype=alpha.dtype<line_sep>key=(transA transB rowsCountA columnCountB commonSideLength dtype)<line_sep>krn_info=self.kernels.get(key)<if_stmt>krn_info<is><none><block_start>block_size,vector_opt=self.device.device_info.get_kernel_bs_vo(kernel="matrix_multiplication" dtype=dtype)<line_sep>defines={"BLOCK_SIZE":block_size "VECTOR_OPT":int(bool(vector_opt)) "B_WIDTH":rowsCountA "A_WIDTH":columnCountB "AB_COMMON":commonSideLength}<if_stmt>transB<eq>CUBLAS_OP_T<block_start>defines["A_COL"]=1<block_end><else_stmt><block_start><assert_stmt>transB<eq>CUBLAS_OP_N<block_end><if_stmt>transA<eq>CUBLAS_OP_N<block_start>defines["B_COL"]=1<block_end><else_stmt><block_start><assert_stmt>transA<eq>CUBLAS_OP_T<block_end>global_size=(roundup(rowsCountA block_size) roundup(columnCountB block_size))<line_sep>local_size=(block_size block_size)<line_sep>w=DummyWorkflow()<line_sep>builder=Builder(w source="gemm" defines=defines kernel_name="gemm" cache_file_name=("veles_gemm_%s"%"_".join(str(x)<for>x key)) dtype=dtype)<line_sep>builder.initialize(self.device)<line_sep>krn_info=(builder.kernel global_size local_size)<line_sep>self.kernels[key]=krn_info<del_stmt>builder<del_stmt>w<block_end># Set the constants and execute the kernel krn=krn_info[0]<line_sep>self._const_i[0:3]=offsetA offsetB offsetC<line_sep># Our kernel stores output in row-major order, so swap A and B krn.set_args(B A C alpha beta self._const_i[1:2] self._const_i[0:1] self._const_i[2:3])<line_sep>global_size=krn_info[1]<line_sep>local_size=krn_info[2]<line_sep>self.device.queue_.execute_kernel(krn global_size local_size need_event=<false>)<block_end><block_end>
<import_from_stmt>selene.support.shared browser<import_from_stmt>selene.support.conditions be<import_from_stmt>selene.support.conditions have<line_sep>browser.open('http://google.com')<line_sep>browser.element('input[name="q"]').should(be.blank).type('Envydust').press_enter()<line_sep>
"""Tests for general GPU support"""<import_stmt>unittest<import_from_stmt>common gpu_test<class_stmt>TestPycuda(unittest.TestCase)<block_start>@gpu_test<def_stmt>test_pycuda self<block_start><import_stmt>pycuda.driver<line_sep>pycuda.driver.init()<line_sep>gpu_name=pycuda.driver.Device(0).name()<line_sep>self.assertNotEqual(0 len(gpu_name))<block_end><block_end>
""" Module for reading Gaussian cube files, which have become one of the standard file formats for volumetric data in quantum chemistry and solid state physics software packages (VASP being an exception). Some basic info about cube files (abridged info from http://paulbourke.net/dataformats/cube/ by <NAME>) The file consists of a header which includes the atom information and the size as well as orientation of the volumetric data. The first two lines of the header are comments. The third line has the number of atoms included in the file followed by the position of the origin of the volumetric data. The next three lines give the number of voxels along each axis (x, y, z) followed by the axis vector. The last section in the header is one line for each atom consisting of 5 numbers, the first is the atom number, the second is the charge, and the last three are the x,y,z coordinates of the atom center. The volumetric data is straightforward, one floating point number for each volumetric element. Example In the following example the volumetric data is a 40 by 40 by 40 grid, each voxel is 0.283459 units wide and the volume is aligned with the coordinate axis. There are three atoms. CPMD CUBE FILE. OUTER LOOP: X, MIDDLE LOOP: Y, INNER LOOP: Z 3 0.000000 0.000000 0.000000 40 0.283459 0.000000 0.000000 40 0.000000 0.283459 0.000000 40 0.000000 0.000000 0.283459 8 0.000000 5.570575 5.669178 5.593517 1 0.000000 5.562867 5.669178 7.428055 1 0.000000 7.340606 5.669178 5.111259 -0.25568E-04 0.59213E-05 0.81068E-05 0.10868E-04 0.11313E-04 0.35999E-05 : : : : : : : : : : : : : : : : : : In this case there will be 40 x 40 x 40 floating point values : : : : : : : : : : : : : : : : : : """<import_stmt>numpy<as>np<import_from_stmt>monty.io zopen<import_from_stmt>pymatgen.core.sites Site<import_from_stmt>pymatgen.core.structure Structure<import_from_stmt>pymatgen.core.units bohr_to_angstrom<line_sep># TODO: can multiprocessing be incorporated without causing issues during drone assimilation? <class_stmt>Cube<block_start>""" Class to read Gaussian cube file formats for volumetric data. Cube files are, by default, written in atomic units, and this class assumes that convention. """<def_stmt>__init__ self fname<block_start>""" Initialize the cube object and store the data as self.data Args: fname (str): filename of the cube to read """<line_sep>f=zopen(fname "rt")<line_sep># skip header lines <for_stmt>i range(2)<block_start>f.readline()<block_end># number of atoms followed by the position of the origin of the volumetric data line=f.readline().split()<line_sep>self.natoms=int(line[0])<line_sep>self.origin=np.array(list(map(float line[1:])))<line_sep># The number of voxels along each axis (x, y, z) followed by the axis vector. line=f.readline().split()<line_sep>self.NX=int(line[0])<line_sep>self.X=np.array([bohr_to_angstrom<times>float(l)<for>l line[1:]])<line_sep>self.dX=np.linalg.norm(self.X)<line_sep>line=f.readline().split()<line_sep>self.NY=int(line[0])<line_sep>self.Y=np.array([bohr_to_angstrom<times>float(l)<for>l line[1:]])<line_sep>self.dY=np.linalg.norm(self.Y)<line_sep>line=f.readline().split()<line_sep>self.NZ=int(line[0])<line_sep>self.Z=np.array([bohr_to_angstrom<times>float(l)<for>l line[1:]])<line_sep>self.dZ=np.linalg.norm(self.Z)<line_sep>self.voxel_volume=abs(np.dot(np.cross(self.X self.Y) self.Z))<line_sep>self.volume=abs(np.dot(np.cross(self.X.dot(self.NZ) self.Y.dot(self.NY)) self.Z.dot(self.NZ)))<line_sep># The last section in the header is one line for each atom consisting of 5 numbers, # the first is the atom number, second is charge, # the last three are the x,y,z coordinates of the atom center. self.sites=[]<for_stmt>i range(self.natoms)<block_start>line=f.readline().split()<line_sep>self.sites.append(Site(line[0] np.multiply(bohr_to_angstrom list(map(float line[2:])))))<block_end>self.structure=Structure(lattice=[self.X<times>self.NX self.Y<times>self.NY self.Z<times>self.NZ] species=[s.specie<for>s self.sites] coords=[s.coords<for>s self.sites] coords_are_cartesian=<true> )<line_sep># Volumetric data self.data=np.reshape(np.array(f.read().split()).astype(float) (self.NX self.NY self.NZ))<block_end><def_stmt>mask_sphere self radius cx cy cz<block_start>""" Create a mask for a sphere with radius=radius, centered at cx, cy, cz. Args: radius: (flaot) of the mask (in Angstroms) cx, cy, cz: (float) the fractional coordinates of the center of the sphere """<line_sep>dx,dy,dz=(np.floor(radius/np.linalg.norm(self.X)).astype(int) np.floor(radius/np.linalg.norm(self.Y)).astype(int) np.floor(radius/np.linalg.norm(self.Z)).astype(int) )<line_sep>gcd=max(np.gcd(dx dy) np.gcd(dy dz) np.gcd(dx dz))<line_sep>sx,sy,sz=dx<floordiv>gcd dy<floordiv>gcd dz<floordiv>gcd<line_sep>r=min(dx dy dz)<line_sep>x0,y0,z0=int(np.round(self.NX<times>cx)) int(np.round(self.NY<times>cy)) int(np.round(self.NZ<times>cz))<line_sep>centerx,centery,centerz=self.NX<floordiv>2 self.NY<floordiv>2 self.NZ<floordiv>2<line_sep>a=np.roll(self.data (centerx-x0 centery-y0 centerz-z0))<line_sep>i,j,k=np.indices(a.shape sparse=<true>)<line_sep>a=np.sqrt((sx<times>i-sx<times>centerx)<power>2+(sy<times>j-sy<times>centery)<power>2+(sz<times>k-sz<times>centerz)<power>2)<line_sep>indices=a<g>r<line_sep>a[indices]=0<line_sep><return>a<block_end><def_stmt>get_atomic_site_averages self atomic_site_radii<block_start>""" Get the average value around each atomic site. Args: atomic_site_radii (dict): dictionary determining the cutoff radius (in Angstroms) for averaging around atomic sites (e.g. {'Li': 0.97, 'B': 0.77, ...}. If not provided, then the returns: Array of site averages, [Average around site 1, Average around site 2, ...] """<line_sep><return>[self._get_atomic_site_average(s atomic_site_radii[s.species_string])<for>s self.structure.sites]<block_end><def_stmt>_get_atomic_site_average self site radius<block_start>""" Helper function for get_atomic_site_averages. Args: site: Site in the structure around which to get the average radius: (float) the atomic_site_radius (in Angstroms) for given atomic species returns: Average around the atomic site """<line_sep>mask=self.mask_sphere(radius *site.frac_coords)<line_sep><return>np.sum(self.data<times>mask)/np.count_nonzero(mask)<block_end><def_stmt>get_atomic_site_totals self atomic_site_radii<block_start>""" Get the integrated total in a sphere around each atomic site. Args: atomic_site_radii (dict): dictionary determining the cutoff radius (in Angstroms) for averaging around atomic sites (e.g. {'Li': 0.97, 'B': 0.77, ...}. If not provided, then the returns: Array of site averages, [Average around site 1, Average around site 2, ...] """<line_sep><return>[self._get_atomic_site_total(s atomic_site_radii[s.species_string])<for>s self.structure.sites]<block_end><def_stmt>_get_atomic_site_total self site radius<block_start>""" Helper function for get_atomic_site_averages. Args: site: Site in the structure around which to get the total radius: (float) the atomic_site_radius (in Angstroms) for given atomic species returns: Average around the atomic site """<line_sep>mask=self.mask_sphere(radius *site.frac_coords)<line_sep><return>np.sum(self.data<times>mask)<block_end><def_stmt>get_axis_grid self ind<block_start>""" Modified from pymatgen.io.vasp.outputs Returns the grid for a particular axis. Args: ind (int): Axis index. """<line_sep>ng=self.data.shape<line_sep>num_pts=ng[ind]<line_sep>lengths=self.structure.lattice.abc<line_sep><return>[i/num_pts<times>lengths[ind]<for>i range(num_pts)]<block_end><def_stmt>get_average_along_axis self ind<block_start>""" Modified from pymatgen.io.vasp.outputs Get the averaged total of the volumetric data a certain axis direction. For example, useful for visualizing Hartree Potentials. Args: ind (int): Index of axis. Returns: Average total along axis """<line_sep>ng=self.data.shape<line_sep>m=self.data<if_stmt>ind<eq>0<block_start>total=np.sum(np.sum(m axis=1) 1)<block_end><elif_stmt>ind<eq>1<block_start>total=np.sum(np.sum(m axis=0) 1)<block_end><else_stmt><block_start>total=np.sum(np.sum(m axis=0) 0)<block_end><return>total/ng[(ind+1)%3]/ng[(ind+2)%3]<block_end><block_end>
<class_stmt>Node<block_start><def_stmt>__init__ self data<block_start>self.data=data<line_sep>self.next=<none><block_end><block_end><class_stmt>SinglyLinkedList<block_start><def_stmt>__init__ self<block_start>self.head=<none><line_sep>self.pos=<none><block_end><def_stmt>insert self data<block_start>newNode=Node(data)<if_stmt>self.head<eq><none><block_start>self.head=newNode<line_sep>self.pos=newNode<block_end><else_stmt><block_start>tmp=self.pos.next<line_sep>self.pos.next=newNode<line_sep>self.pos=newNode<if_stmt>tmp<ne><none><block_start>newNode.next=tmp<block_end><block_end><block_end><def_stmt>delete self<block_start><if_stmt>self.pos<eq>self.head<block_start>self.pos=self.pos.next<del_stmt>self.head<line_sep>self.head=self.pos<block_end><else_stmt><block_start>tmp=self.head<while_stmt>tmp.next<ne>self.pos<block_start>tmp=tmp.next<block_end>tmp.next=self.pos.next<del_stmt>self.pos<line_sep>self.pos=tmp<block_end><block_end><def_stmt>reset self<block_start>self.pos=self.head<block_end><def_stmt>advance self<block_start><if_stmt>self.pos<ne><none><block_start>self.pos=self.pos.next<block_end><block_end><def_stmt>out_of_list self<block_start><if_stmt>self.pos<eq><none><block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>pos_position self<block_start><if_stmt><not>(self.out_of_list())<block_start><return>self.pos.data<block_end><else_stmt><block_start><return>"pos is out of list"<block_end><block_end><def_stmt>print_list self<block_start><if_stmt>self.head<eq><none><block_start>print("List is empty")<block_end><else_stmt><block_start>tmp=self.head<while_stmt>tmp<ne><none><block_start>print(tmp.data)<line_sep>tmp=tmp.next<block_end><block_end><block_end><block_end>run=<true><line_sep>sll=SinglyLinkedList()<while_stmt>run<block_start>print("\ni [insert] insert element")<line_sep>print("d [delete] delete element")<line_sep>print("o [out] out_of_list ?")<line_sep>print("p [pos] current position of pos")<line_sep>print("r [reset] pos-pointer")<line_sep>print("a [advance] pos-pointer")<line_sep>print("pr [print] print list")<line_sep>print("q [quit] program")<line_sep>choice=input()<if_stmt>choice<eq>"i"<block_start>num=input("Enter Data for insertion: ")<line_sep>sll.insert(num)<block_end><elif_stmt>choice<eq>"d"<block_start>sll.delete()<block_end><elif_stmt>choice<eq>"o"<block_start>print(sll.out_of_list())<block_end><elif_stmt>choice<eq>"r"<block_start>sll.reset()<block_end><elif_stmt>choice<eq>"a"<block_start>sll.advance()<block_end><elif_stmt>choice<eq>"p"<block_start>print(sll.pos_position())<block_end><elif_stmt>choice<eq>"q"<block_start>run=<false><block_end><elif_stmt>choice<eq>"pr"<block_start>sll.print_list()<block_end><else_stmt><block_start>print("Invalid Input")<block_end><block_end>""" Sample I/O: i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program i (Userinput) Enter Data for insertion: 10 i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program i (Userinput) Enter Data for insertion: 20 i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program pr (Userinput) 10 20 i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program p (Userinput) 20 i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program a (Userinput) i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program o (Userinput) True i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program r (Userinput) i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program p (Userinput) 10 i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program d (Userinput) i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program pr (Userinput) 20 i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program p (Userinput) 20 i [insert] insert element d [delete] delete element o [out] out_of_list ? p [pos] current position of pos r [reset] pos-pointer a [advance] pos-pointer pr [print] print list q [quit] program q (Userinput) Time Complexity: Insert: O(1) Delete: O(N) PrintList: O(N) Everything Else: O(1) """<line_sep>
""" First fit is the simplest of all the storage allocation strategies. Here the list of storages is searched and as soon as a free storage block of size >= N is found , the pointer of that block is sent to the calling program after retaining the residue space.Thus, for example, for a block of size 5k , 2k memory will be sent to the caller . The below program is a simulation of the first fit strategy using array data structure. """<line_sep># Block class is used as the fixed memory blocks for allocation <class_stmt>Block<block_start><def_stmt>__init__ self<block_start>self.size=0<line_sep>self.ID=0<line_sep>self.fragment=0<block_end><block_end># process class is used for allocating memory for the requesting processes <class_stmt>process<block_start><def_stmt>__init__ self<block_start>self.Num=0<line_sep>self.size=0<line_sep>self.block=<none><block_end><block_end># initialiseBlocks function initializes all the blocks with sizes and id <def_stmt>initialiseBlocks arr sizes n<block_start><for_stmt>i range(n)<block_start>arr[i].size=sizes[i]<line_sep>arr[i].fragment=sizes[i]<line_sep>arr[i].ID=i+1<block_end><block_end># printResult function prints the result of the memory allocation strategy <def_stmt>printResult arr2 numOfProcess<block_start>print("Process No Process Size Block ID Block Size Block Fragment")<for_stmt>i range(numOfProcess)<block_start>print(str(arr2[i].Num)+" "+str(arr2[i].size)+" "+str(arr2[i].block.ID)+" "+str(arr2[i].block.size)+" "+str(arr2[i].block.fragment))<block_end><block_end># firstfit function allocates memory to processes using firstfit allocation algorithm <def_stmt>firstfit arr sizes n arr2 numOfProcess<block_start>initialiseBlocks(arr sizes n)<for_stmt>i range(numOfProcess)<block_start><for_stmt>j range(n)<block_start><if_stmt>arr2[i].size<le>arr[j].fragment<block_start>arr[j].fragment<augsub>arr2[i].size<line_sep>arr2[i].block=Block()<line_sep>arr2[i].block.size=arr[j].size<line_sep>arr2[i].block.ID=arr[j].ID<line_sep>arr2[i].block.fragment=arr[j].fragment<line_sep><break><block_end><block_end><block_end>print("First Fit Allocation")<line_sep>printResult(arr2 numOfProcess)<block_end># Driver code <if_stmt>__name__<eq>"__main__"<block_start>sizes=[60 20 12 35 64 42 31 35 40 50]<line_sep>arr=[]<for_stmt>i range(10)<block_start>arr.append(Block())<block_end>initialiseBlocks(arr sizes 10)<line_sep>numOfProcess=int(input("Enter the number of process for memory to be allocated : "))<line_sep>print("Enter the sizes required by the processes in the order of requirement")<line_sep>psize=list(map(int input().split(" ")))<line_sep>arr2=[]<for_stmt>i range(numOfProcess)<block_start>arr2.append(process())<line_sep>arr2[i].size=psize[i]<line_sep>arr2[i].Num=i+1<block_end>firstfit(arr sizes 10 arr2 numOfProcess)<block_end>""" Sample I/O: Enter the number of process for memory to be allocated : 5 Enter the sizes required by the processes in the order of requirement 15 12 13 20 11 First Fit Allocation Process No Process Size Block ID Block Size Block Fragment 1 15 1 60 45 2 12 1 60 33 3 13 1 60 20 4 20 1 60 0 5 11 2 20 9 Time complexity : O(n) space complexity : O(n) """<line_sep>
""" Copyright (C) 2021 <NAME> This file is part of QuantLib, a free-software/open-source library for financial quantitative analysts and developers - http://quantlib.org/ QuantLib is free software: you can redistribute it and/or modify it under the terms of the QuantLib license. You should have received a copy of the license along with this program; if not, please email <<EMAIL>>. The license is also available online at <http://quantlib.org/license.shtml>. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the license for more details. """<import_stmt>unittest<import_stmt>QuantLib<as>ql<class_stmt>CurrencyTest(unittest.TestCase)<block_start><def_stmt>test_default_currency_constructor self<block_start>"""Testing default currency constructor"""<line_sep>fail_msg="Failed to create default currency."<line_sep>default_ccy=ql.Currency()<line_sep>self.assertTrue(default_ccy.empty() fail_msg)<block_end><def_stmt>test_eur_constructor self<block_start>"""Testing EUR constructor"""<line_sep>fail_msg="Failed to create EUR currency."<line_sep>eur=ql.EURCurrency()<line_sep>self.assertFalse(eur.empty() fail_msg)<block_end><def_stmt>test_bespoke_currency_constructor self<block_start>"""Testing bespoke currency constructor"""<line_sep>fail_msg="Failed to create bespoke currency."<line_sep>custom_ccy=ql.Currency("CCY" "CCY" 100 "#" "" 100 ql.Rounding() "")<line_sep>self.assertFalse(custom_ccy.empty() fail_msg)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>print('testing QuantLib '+ql.__version__)<line_sep>suite=unittest.TestSuite()<line_sep>suite.addTest(unittest.makeSuite(CurrencyTest 'test'))<line_sep>unittest.TextTestRunner(verbosity=2).run(suite)<block_end>
''' Translate expressions to SMT import format. '''<import_from_stmt>Z3 Z3<class_stmt>UnsatisfiableException(Exception)<block_start><pass><block_end># NOTE(JY): Think about if the solver needs to know about everything for # negative constraints. I don't think so because enough things should be # concrete that this doesn't matter. <def_stmt>solve constraints defaults desiredVars# NOTE(JY): This is just a sketch of what should go on... # Implement defaults by adding values to the model and #for v in jeeveslib.env.envVars: # jeeveslib.solver.push() # solver.assertConstraint(v = z3.BoolVal(True)) # if (solver.check() == solver.Unsat): # jeeveslib.solver.pop() # Now get the variables back from the solver by evaluating all # variables in question... # Now return the new environment... #return NotImplemented <block_start>solver=Z3()<line_sep>result={}<for_stmt>constraint constraints<block_start><if_stmt>constraint.type<ne>bool<block_start><raise>ValueError("constraints must be bools")<block_end>solver.boolExprAssert(constraint)<block_end><if_stmt><not>solver.check()<block_start><raise>UnsatisfiableException("Constraints not satisfiable")<block_end><for_stmt>default defaults<block_start>solver.push()<if_stmt>default.type<ne>bool<block_start><raise>ValueError("defaults must be bools")<block_end>solver.boolExprAssert(default)<if_stmt><not>solver.isSatisfiable()<block_start>solver.pop()<block_end><block_end><assert_stmt>solver.check()<line_sep>result={}<for_stmt>var desiredVars<block_start>result[var]=solver.evaluate(var)<assert_stmt>(result[var]<is><true>)<or>(result[var]<is><false>)<block_end><return>result<block_end>