content
stringlengths
0
1.55M
<import_stmt>re<import_from_stmt>django.contrib.auth get_user_model<import_from_stmt>django.contrib.auth.models Group<import_from_stmt>rest_framework status<import_from_stmt>rest_framework.views APIView<import_from_stmt>rest_framework.generics ListAPIView RetrieveAPIView<import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework.permissions IsAuthenticated<import_from_stmt>.services get_group_or_404<import_from_stmt>.REST_permissions group_member IsGroupOwner IsGroupAdmin IsGroupAdminOrMemberReadOnly IsGroupOwnerOrPublicReadOnly <import_from_stmt>.REST_serializers PublicGroupSerializer GroupMetaUpdateSerializer PrivateUserSerializer PrivateGroupSerializer <import_from_stmt>.patterns group_name_pattern<import_from_stmt>.models GroupMeta<line_sep>User=get_user_model()<line_sep>group_name_regex=re.compile('^'+group_name_pattern+'$')<class_stmt>AccountView(RetrieveAPIView)<block_start>""" List authenticated user details """<line_sep>permission_classes=[IsAuthenticated]<line_sep>serializer_class=PrivateUserSerializer<def_stmt>get_object self<block_start><return>self.request.user<block_end><block_end><class_stmt>AccountGroupsView(APIView)<block_start>""" get: List authenticated user groups. post: Create a new group context. """<line_sep>permission_classes=[IsAuthenticated]<def_stmt>get self request<block_start>serializer=PrivateGroupSerializer(request.user.groups.all() many=<true>)<line_sep><return>Response(serializer.data)<block_end><def_stmt>post self request **kwargs<block_start>group_name=request.data.get('name' <none>)<line_sep># Ensure group name was specified <if_stmt><not>group_name<block_start><return>Response({'errors':['No Group Name Specified']} status=status.HTTP_400_BAD_REQUEST)<block_end># Verify group does not already exist <elif_stmt>Group.objects.filter(name=group_name).exists()<block_start><return>Response({'errors':['Group Already Exists']} status=status.HTTP_400_BAD_REQUEST)<block_end># Verify a user with the same name does not already exist <elif_stmt>User.objects.filter(username=group_name).exists()<block_start><return>Response({'errors':['Group Already Exists']} status=status.HTTP_400_BAD_REQUEST)<block_end># Verify group is allowed in URL routing <elif_stmt><not>group_name_regex.match(group_name)<block_start><return>Response({'errors':['Invalid Group Name']} status=status.HTTP_400_BAD_REQUEST)<block_end># Create group and group meta <else_stmt><block_start>group_object=Group.objects.create(name=group_name)<line_sep>group_object.save()<line_sep>group_meta=GroupMeta.objects.create(group=group_object owner=request.user)<line_sep>group_meta.save()<line_sep>request.user.groups.add(group_object)<block_end><return>Response(status=status.HTTP_201_CREATED)<block_end><block_end><class_stmt>GroupsView(ListAPIView)<block_start>""" List details on all groups. """<line_sep>permission_classes=[IsAuthenticated]<line_sep>serializer_class=PublicGroupSerializer<line_sep>queryset=Group.objects.all()<block_end><class_stmt>GroupDetailsView(APIView)<block_start>""" get: Retrieve details on a specific group. put: Update group delete: Delete specified group. """<line_sep>permission_classes=[IsGroupOwnerOrPublicReadOnly]<def_stmt>get self request group_name<block_start>group_object=get_group_or_404(group_name)<if_stmt>group_member(request)<block_start>serializer=PrivateGroupSerializer(group_object)<block_end><else_stmt><block_start>serializer=PublicGroupSerializer(group_object)<block_end><return>Response(serializer.data)<block_end><def_stmt>patch self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>serializer=GroupMetaUpdateSerializer(group_object.groupmeta data=request.data partial=<true>)<if_stmt>serializer.is_valid()<block_start>serializer.save()<block_end><return>Response(serializer.data)<block_end><def_stmt>delete self request group_name<block_start>group_object=get_group_or_404(group_name)<if_stmt>group_name<ne>request.user.username<block_start>group_object.delete()<block_end><return>Response(status=status.HTTP_204_NO_CONTENT)<block_end><block_end><class_stmt>GroupMembersView(APIView)<block_start>""" get: List group members. patch: Add new specified group members. delete: Remove specified members from group. """<line_sep>permission_classes=[IsGroupOwner]<def_stmt>get self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep><return>Response(group_object.user_set.all().values_list('username' flat=<true>))<block_end><def_stmt>patch self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>group_owner_name=group_object.groupmeta.owner.username<line_sep># Gather the submitted member name list <try_stmt><block_start>new_member_names=request.data.getlist('member')<block_end><except_stmt>AttributeError<block_start>new_member_names=request.data.get('member' list())<block_end><if_stmt><not>isinstance(new_member_names list)<block_start>new_member_names=[new_member_names]<block_end># Ensure group owner doesn't get inadvertently processed <if_stmt>group_owner_name<in>new_member_names<block_start>new_member_names=list(set(new_member_names))<line_sep>new_member_names.remove(group_owner_name)<block_end># Gather user objects for processing new_members=User.objects.filter(username__in=new_member_names)<line_sep># Add members <for_stmt>member new_members<block_start>member.groups.add(group_object)<block_end><return>Response(group_object.user_set.all().values_list('username' flat=<true>))<block_end><def_stmt>delete self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>group_owner_name=group_object.groupmeta.owner.username<line_sep># Gather user objects for processing member_name_list=request.query_params.getlist('member')<line_sep>removed_members=User.objects.filter(username__in=member_name_list)<line_sep># Remove from group <for_stmt>member removed_members.exclude(username=group_owner_name)<block_start>group_object.groupmeta.admins.remove(member)<line_sep>member.groups.remove(group_object)<block_end><return>Response(group_object.user_set.all().values_list('username' flat=<true>))<block_end><block_end><class_stmt>GroupAdminsView(APIView)<block_start>""" get: List group admins. patch: Add new specified group admins. delete: Remove specified admins from group. """<line_sep>permission_classes=[IsGroupOwner]<def_stmt>get self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep><return>Response(group_object.groupmeta.admins.all().values_list('username' flat=<true>))<block_end><def_stmt>patch self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>group_owner_name=group_object.groupmeta.owner.username<line_sep># Gather the submitted admin name list <try_stmt><block_start>new_admin_names=request.data.getlist('admin')<block_end><except_stmt>AttributeError<block_start>new_admin_names=request.data.get('admin' list())<block_end><if_stmt><not>isinstance(new_admin_names list)<block_start>new_admin_names=[new_admin_names]<block_end># Ensure group owner doesn't get inadvertently processed <if_stmt>group_owner_name<in>new_admin_names<block_start>new_admin_names=list(set(new_admin_names))<line_sep>new_admin_names.remove(group_owner_name)<block_end># Gather user objects for processing new_admin_users=User.objects.filter(username__in=new_admin_names)<line_sep># Add admins <for_stmt>admin new_admin_users<block_start>admin.groups.add(group_object)<line_sep>group_object.groupmeta.admins.add(admin)<block_end><return>Response(group_object.groupmeta.admins.all().values_list('username' flat=<true>))<block_end><def_stmt>delete self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>admin_name_list=request.query_params.getlist('admin')<line_sep># Gather user objects for processing removed_admins=User.objects.filter(username__in=admin_name_list)<line_sep># Remove from group <for_stmt>admin removed_admins<block_start>group_object.groupmeta.admins.remove(admin)<block_end><return>Response(group_object.groupmeta.admins.all().values_list('username' flat=<true>))<block_end><block_end><class_stmt>GroupSourcesView(APIView)<block_start>""" get: List group sources. patch: Add new specified group sources. delete: Remove specified sources from group. """<line_sep>permission_classes=[IsGroupAdminOrMemberReadOnly]<def_stmt>get self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>group_metadata=group_object.groupmeta<line_sep><return>Response(group_metadata.source_options)<block_end><def_stmt>patch self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>group_metadata=group_object.groupmeta<line_sep># Gather the submitted source list <try_stmt><block_start>new_sources=request.data.getlist('source')<block_end><except_stmt>AttributeError<block_start>new_sources=request.data.get('source' list())<block_end><if_stmt><not>isinstance(new_sources list)<block_start>new_sources=[new_sources]<block_end><for_stmt>source new_sources<block_start><if_stmt>source<not><in>group_metadata.source_options<block_start>group_metadata.source_options.append(source)<block_end><block_end>group_metadata.save()<line_sep><return>Response(group_metadata.source_options)<block_end><def_stmt>delete self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>group_metadata=group_object.groupmeta<line_sep>source_list=request.query_params.getlist('source')<for_stmt>source source_list<block_start><try_stmt><block_start>group_metadata.source_options.remove(source)<block_end><except_stmt>ValueError<block_start><pass><block_end><block_end>group_metadata.save()<line_sep><return>Response(group_metadata.source_options)<block_end><block_end><class_stmt>GroupCategoriesView(APIView)<block_start>""" get: List group categories. patch: Add new specified group categories. delete: Remove specified categories from group. """<line_sep>permission_classes=[IsGroupAdminOrMemberReadOnly]<def_stmt>get self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>group_metadata=group_object.groupmeta<line_sep><return>Response(group_metadata.category_options)<block_end><def_stmt>patch self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>group_metadata=group_object.groupmeta<line_sep># Gather the submitted category list <try_stmt><block_start>new_categories=request.data.getlist('category')<block_end><except_stmt>AttributeError<block_start>new_categories=request.data.get('category' list())<block_end><if_stmt><not>isinstance(new_categories list)<block_start>new_categories=[new_categories]<block_end><for_stmt>category new_categories<block_start><if_stmt>category<not><in>group_metadata.category_options<block_start>group_metadata.category_options.append(category)<block_end><block_end>group_metadata.save()<line_sep><return>Response(group_metadata.category_options)<block_end><def_stmt>delete self request group_name<block_start>group_object=get_group_or_404(group_name)<line_sep>group_metadata=group_object.groupmeta<line_sep>category_list=request.query_params.getlist('category')<for_stmt>category category_list<block_start><try_stmt><block_start>group_metadata.category_options.remove(category)<block_end><except_stmt>ValueError<block_start><pass><block_end><block_end>group_metadata.save()<line_sep><return>Response(group_metadata.category_options)<block_end><block_end>
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2020 TsinghuaAI Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Many thanks for following projects. # https://github.com/TsinghuaAI/CPM-Generate # https://github.com/jm12138/CPM-Generate-Paddle <import_stmt>sys<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>paddle<import_from_stmt>paddlenlp.transformers GPTModel GPTForGreedyGeneration<import_from_stmt>paddlenlp.transformers GPTChineseTokenizer GPTTokenizer<import_from_stmt>paddlenlp.utils.log logger<line_sep>MODEL_CLASSES={"gpt-cn":(GPTForGreedyGeneration GPTChineseTokenizer) "gpt":(GPTForGreedyGeneration GPTTokenizer) }<class_stmt>Demo<block_start><def_stmt>__init__ self model_type="gpt-cn" model_name_or_path="gpt-cpm-large-cn" max_predict_len=32<block_start>model_class,tokenizer_class=MODEL_CLASSES[model_type]<line_sep>self.tokenizer=tokenizer_class.from_pretrained(model_name_or_path)<line_sep>logger.info('Loading the model parameters, please wait...')<line_sep>self.model=model_class.from_pretrained(model_name_or_path max_predict_len=max_predict_len eol_token_id=self.tokenizer.eol_token_id)<line_sep>self.model.eval()<line_sep>logger.info('Model loaded.')<block_end># prediction function <def_stmt>predict self text<block_start>ids=self.tokenizer(text)["input_ids"]<line_sep>input_ids=paddle.to_tensor(np.array(ids).reshape(1 -1).astype('int64'))<line_sep>out=self.model(input_ids)<line_sep>out=[int(x)<for>x out.numpy().reshape([-1])]<line_sep>logger.info(self.tokenizer.convert_ids_to_string(out))<block_end># One shot example <def_stmt>ask_question_cn self question<block_start>self.predict("问题:中国的首都是哪里?答案:北京。\n问题:%s 答案:"%question)<block_end><def_stmt>ask_question_en self question<block_start>self.predict("Question: Where is the capital of China? Answer: Beijing. \n Question:%s Answer:"%question)<block_end># dictation poetry <def_stmt>dictation_poetry_cn self front<block_start>self.predict('''默写古诗: 大漠孤烟直,长河落日圆。\n%s'''%front)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><if_stmt>len(sys.argv)<g>1<and>sys.argv[1]<eq>"gpt-cn"<block_start>demo=Demo("gpt-cn" "gpt-cpm-large-cn")<line_sep>demo.ask_question_cn("苹果的CEO是谁?")<line_sep>demo.dictation_poetry_cn("举杯邀明月,")<block_end><else_stmt><block_start>demo=Demo("gpt" "gpt2-medium-en")<line_sep>demo.ask_question_en("Who is the CEO of Apple?")<block_end><block_end>
# -*- coding: utf-8 -*- """ """<import_from_stmt>flask Blueprint request render_template flash g session redirect url_for abort <import_from_stmt>flask_reddit db<import_from_stmt>flask_reddit.users.models User<import_from_stmt>flask_reddit.frontends.views get_subreddits<import_from_stmt>flask_reddit.users.decorators requires_login<line_sep>mod=Blueprint('users' __name__ url_prefix='/users')<line_sep>@mod.before_request<def_stmt>before_request <block_start>g.user=<none><if_stmt>'user_id'<in>session<block_start>g.user=User.query.get(session['user_id'])<block_end><block_end>@mod.route('/<username>/')<def_stmt>home_page username=<none><block_start><if_stmt><not>username<block_start>abort(404)<block_end>user=User.query.filter_by(username=username).first()<if_stmt><not>user<block_start>abort(404)<block_end><return>render_template('users/profile.html' user=g.user current_user=user subreddits=get_subreddits())<block_end>
<import_from_stmt>typing Any Callable Dict List Optional Tuple<import_stmt>flwr<as>fl<import_stmt>tensorflow<as>tf<def_stmt>main <arrow><none># Load and compile model for # 1. server-side parameter initialization # 2. server-side parameter evaluation <block_start>model=tf.keras.applications.EfficientNetB0(input_shape=(32 32 3) weights=<none> classes=10)<line_sep>model.compile("adam" "sparse_categorical_crossentropy" metrics=["accuracy"])<line_sep># Create strategy strategy=fl.server.strategy.FedAvg(fraction_fit=0.3 fraction_eval=0.2 min_fit_clients=3 min_eval_clients=2 min_available_clients=10 eval_fn=get_eval_fn(model) on_fit_config_fn=fit_config on_evaluate_config_fn=evaluate_config initial_parameters=fl.common.weights_to_parameters(model.get_weights()) )<line_sep># Start Flower server for four rounds of federated learning fl.server.start_server("[::]:8080" config={"num_rounds":4} strategy=strategy)<block_end><def_stmt>get_eval_fn model<block_start>"""Return an evaluation function for server-side evaluation."""<line_sep># Load data and model here to avoid the overhead of doing it in `evaluate` itself (x_train y_train),_=tf.keras.datasets.cifar10.load_data()<line_sep># Use the last 5k training examples as a validation set x_val,y_val=x_train[45000:50000] y_train[45000:50000]<line_sep># The `evaluate` function will be called after every round <def_stmt>evaluate weights:fl.common.Weights <arrow>Optional[Tuple[float Dict[str fl.common.Scalar]]]<block_start>model.set_weights(weights)# Update model with the latest parameters loss,accuracy=model.evaluate(x_val y_val)<line_sep><return>loss {"accuracy":accuracy}<block_end><return>evaluate<block_end><def_stmt>fit_config rnd:int<block_start>"""Return training configuration dict for each round. Keep batch size fixed at 32, perform two rounds of training with one local epoch, increase to two local epochs afterwards. """<line_sep>config={"batch_size":32 "local_epochs":1<if>rnd<l>2<else>2 }<line_sep><return>config<block_end><def_stmt>evaluate_config rnd:int<block_start>"""Return evaluation configuration dict for each round. Perform five local evaluation steps on each client (i.e., use five batches) during rounds one to three, then increase to ten local evaluation steps. """<line_sep>val_steps=5<if>rnd<l>4<else>10<line_sep><return>{"val_steps":val_steps}<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
""" https://gist.github.com/stephane/08b649ea818bd9dce2ff33903ba94aba Maps a request to a tenant using the first part of the hostname. For example: foo.example.com:8000 -> foo bar.baz.example.com -> bar This is a simple example; you should probably verify tenant names are valid against a whitelist before returning them, since the returned tenant name will be issued in a `SET search_path TO` SQL query. Take care to create the corresponding schema first, with ``psql``: db=# CREATE SCHEMA foo; You can set the tenant in command line with: TENANT_NAME=foo ./manage.my migrate With PostgreSQL, it's possible to have complex setups where some tables are public so you can set the schema to: SET search_path TO foo,public; To have an access to public and foo tables at the same time. https://www.postgresql.org/docs/current/static/ddl-schemas.html """<import_stmt>re<import_from_stmt>db_multitenant mapper<line_sep>HOST_REGEX=re.compile(r'(\w+)[\.|$]')<class_stmt>TenantMapper(mapper.TenantMapper)<block_start><def_stmt>get_tenant_name self request<block_start>"""Takes the first part of the hostname as the tenant"""<line_sep>hostname=request.get_host()<line_sep>match=HOST_REGEX.search(hostname)<line_sep>tenant_name=match.groups()[0].lower()<if>match<else><none><line_sep># Compare against a whitelist or fallback to 'public'? <if_stmt><not>tenant_name<block_start><raise>ValueError('Unable to find the tenant name from `%s`.'%hostname)<block_end><return>tenant_name<block_end><def_stmt>get_db_name self request tenant_name# Still use the DB name of settings <block_start><return><none><block_end><def_stmt>get_cache_prefix self request tenant_name db_name<block_start>"""The arguments db_name and tenant_name are provided by the methods of this TenantMapper"""<line_sep><return>'tenant-%s'%tenant_name<block_end><block_end>
<import_stmt>unittest<import_stmt>pytest<import_from_stmt>mock Mock<import_from_stmt>app ServiceContainer<import_from_stmt>app.exceptions ServiceNotFoundException ContainerAlreadyBootedException<class_stmt>PluginsTest(unittest.TestCase)<block_start>@staticmethod<def_stmt>test_register_singleton <block_start>service_container=ServiceContainer()<line_sep>service=Mock()<line_sep>service_container.register_singleton('mock_service' service)<line_sep>service_container.boot()<assert_stmt>service_container.has('mock_service')<is><true><assert_stmt>service_container.get('mock_service')<is>service<block_end>@staticmethod<def_stmt>test_register_decorator <block_start>service_container=ServiceContainer()<line_sep>@service_container.register('test_service')<class_stmt>TestService(object)<block_start><pass><block_end>service_container.boot()<assert_stmt>service_container.has('test_service')<is><true><assert_stmt>isinstance(service_container.get('test_service') TestService)<is><true><block_end>@staticmethod<def_stmt>test_get_service_unknown <block_start>service_container=ServiceContainer()<line_sep>service_container.boot()<with_stmt>pytest.raises(ServiceNotFoundException)<block_start>service_container.get('test_service')<block_end><block_end>@staticmethod<def_stmt>test_register_decorator_args <block_start>service_container=ServiceContainer()<line_sep>another_service=Mock()<line_sep>service_container.register_singleton('another_service' another_service)<line_sep>param_service=Mock()<line_sep>service_container.register_singleton('param_service' param_service)<line_sep>service_container.set_parameter('test_param' 'hello')<line_sep>service_container.set_parameter('service_param' 'param_service')<line_sep>@service_container.register('test_service' ['@another_service' '%test_param%' '%service_param%' 'static'])<class_stmt>TestService(object)<block_start><def_stmt>__init__ self ts_another_service ts_test_param ts_param_service ts_static_val<block_start>self.another_service=ts_another_service<line_sep>self.test_param=ts_test_param<line_sep>self.param_service=ts_param_service<line_sep>self.static_val=ts_static_val<block_end><block_end>service_container.boot()<line_sep>test_service=service_container.get('test_service')<assert_stmt>service_container.has('test_service')<is><true><assert_stmt>isinstance(test_service TestService)<is><true><assert_stmt>test_service.another_service<is>another_service<assert_stmt>test_service.test_param<is>'hello'<assert_stmt>test_service.param_service<is>param_service<assert_stmt>test_service.static_val<is>'static'<block_end>@staticmethod<def_stmt>test_register_decorator_kwargs <block_start>service_container=ServiceContainer()<line_sep>another_service=Mock()<line_sep>service_container.register_singleton('another_service' another_service)<line_sep>param_service=Mock()<line_sep>service_container.register_singleton('param_service' param_service)<line_sep>service_container.set_parameter('test_param' 'hello')<line_sep>service_container.set_parameter('service_param' 'param_service')<line_sep>@service_container.register('test_service' keywordsargs={'ts_another_service':'@another_service' 'ts_test_param':'%test_param%' 'ts_param_service':'%service_param%' 'ts_static_val':'static'})<class_stmt>TestService(object)<block_start><def_stmt>__init__ self ts_another_service=<none> ts_test_param=<none> ts_param_service=<none> ts_static_val=<none><block_start>self.another_service=ts_another_service<line_sep>self.test_param=ts_test_param<line_sep>self.param_service=ts_param_service<line_sep>self.static_val=ts_static_val<block_end><block_end>service_container.boot()<line_sep>test_service=service_container.get('test_service')<assert_stmt>service_container.has('test_service')<is><true><assert_stmt>isinstance(test_service TestService)<is><true><assert_stmt>test_service.another_service<is>another_service<assert_stmt>test_service.test_param<is>'hello'<assert_stmt>test_service.param_service<is>param_service<assert_stmt>test_service.static_val<is>'static'<block_end>@staticmethod<def_stmt>test_register_tags <block_start>service_container=ServiceContainer()<line_sep>another_service=Mock()<line_sep>service_container.register_singleton('another_service' another_service tags=['tag_one' 'tag_two' 'tag_three'])<line_sep>@service_container.register('test_service' tags=['tag_one' 'tag_two'])# pylint: disable=unused-variable <class_stmt>TestService(object)<block_start><def_stmt>__init__ self ts_another_service=<none> ts_test_param=<none> ts_param_service=<none> ts_static_val=<none><block_start>self.another_service=ts_another_service<line_sep>self.test_param=ts_test_param<line_sep>self.param_service=ts_param_service<line_sep>self.static_val=ts_static_val<block_end><block_end>service_container.boot()<line_sep>tag_one_services=service_container.get_by_tag('tag_one')<line_sep>tag_two_services=service_container.get_by_tag('tag_two')<line_sep>tag_three_services=service_container.get_by_tag('tag_three')<line_sep>tag_four_services=service_container.get_by_tag('tag_four')<assert_stmt>len(tag_one_services)<is>2<assert_stmt>len(tag_two_services)<is>2<assert_stmt>len(tag_three_services)<is>1<assert_stmt>len(tag_four_services)<is>0<block_end>@staticmethod<def_stmt>test_compiler_pass <block_start>service_container=ServiceContainer()<line_sep>@service_container.register_compiler_pass()# pylint: disable=unused-variable <def_stmt>compiler_pass sc<block_start>sc.set_parameter('compiler_set' 'test')<block_end>service_container.boot()<assert_stmt>service_container.get_parameter('compiler_set')<is>'test'<block_end>@staticmethod<def_stmt>test_compiler_pass_already_booted <block_start>service_container=ServiceContainer()<line_sep>service_container.boot()<with_stmt>pytest.raises(ContainerAlreadyBootedException)<block_start>@service_container.register_compiler_pass()# pylint: disable=unused-variable <def_stmt>compiler_pass sc<block_start>sc.set_parameter('compiler_set' 'test')<block_end><block_end><block_end>@staticmethod<def_stmt>test_boot_already_booted <block_start>service_container=ServiceContainer()<line_sep>service_container.boot()<with_stmt>pytest.raises(ContainerAlreadyBootedException)<block_start>service_container.boot()<block_end><block_end><block_end>
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # <import_stmt>json<import_stmt>os<import_stmt>pathlib<import_stmt>re<import_stmt>shutil<import_stmt>tempfile<import_from_stmt>distutils.dir_util copy_tree<import_from_stmt>typing Any Dict<import_stmt>pytest<import_from_stmt>integration_tests.dbt_integration_test DbtIntegrationTest<import_from_stmt>normalization.destination_type DestinationType<import_from_stmt>normalization.transform_catalog.catalog_processor CatalogProcessor<line_sep>temporary_folders=set()<line_sep>dbt_test_utils=DbtIntegrationTest()<line_sep>@pytest.fixture(scope="module" autouse=<true>)<def_stmt>before_all_tests request<block_start>destinations_to_test=dbt_test_utils.get_test_targets()<if_stmt>DestinationType.POSTGRES.value<not><in>destinations_to_test<block_start>destinations_to_test.append(DestinationType.POSTGRES.value)<block_end>dbt_test_utils.set_target_schema("test_ephemeral")<line_sep>dbt_test_utils.change_current_test_dir(request)<line_sep>dbt_test_utils.setup_db(destinations_to_test)<line_sep>os.environ["PATH"]=os.path.abspath("../.venv/bin/")+":"+os.environ["PATH"]<line_sep><yield><line_sep>dbt_test_utils.tear_down_db()<for_stmt>folder temporary_folders<block_start>print(f"Deleting temporary test folder {folder}")<line_sep>shutil.rmtree(folder ignore_errors=<true>)<block_end><block_end>@pytest.fixture<def_stmt>setup_test_path request<block_start>dbt_test_utils.change_current_test_dir(request)<line_sep>print(f"Running from: {pathlib.Path().absolute()}")<line_sep>print(f"Current PATH is: {os.environ['PATH']}")<line_sep><yield><line_sep>os.chdir(request.config.invocation_dir)<block_end>@pytest.mark.parametrize("column_count" [1000])@pytest.mark.parametrize("destination_type" list(DestinationType))<def_stmt>test_destination_supported_limits destination_type:DestinationType column_count:int<block_start><if_stmt>destination_type.value<not><in>dbt_test_utils.get_test_targets()<or>destination_type.value<eq>DestinationType.MYSQL.value# In MySQL, the max number of columns is limited by row size (8KB), # not by absolute column count. It is way fewer than 1000. <block_start>pytest.skip(f"Destinations {destination_type} is not in NORMALIZATION_TEST_TARGET env variable (MYSQL is also skipped)")<block_end><if_stmt>destination_type.value<eq>DestinationType.ORACLE.value<block_start>column_count=998<block_end>run_test(destination_type column_count)<block_end>@pytest.mark.parametrize("integration_type, column_count, expected_exception_message" [("Postgres" 1665 "target lists can have at most 1664 entries") ("BigQuery" 2500 "The view is too large.") ("Snowflake" 2000 "Operation failed because soft limit on objects of type 'Column' per table was exceeded.") ("Redshift" 1665 "target lists can have at most 1664 entries") ("MySQL" 250 "Row size too large") ("Oracle" 1001 "ORA-01792: maximum number of columns in a table or view is 1000") ("MSSQL" 1025 "exceeds the maximum of 1024 columns.") ] )<def_stmt>test_destination_failure_over_limits integration_type:str column_count:int expected_exception_message:str setup_test_path<block_start>destination_type=DestinationType.from_string(integration_type)<if_stmt>destination_type.value<not><in>dbt_test_utils.get_test_targets()<block_start>pytest.skip(f"Destinations {destination_type} is not in NORMALIZATION_TEST_TARGET env variable")<block_end>run_test(destination_type column_count expected_exception_message)<block_end><def_stmt>test_empty_streams setup_test_path<block_start>run_test(DestinationType.POSTGRES 0)<block_end><def_stmt>test_stream_with_1_airbyte_column setup_test_path<block_start>run_test(DestinationType.POSTGRES 1)<block_end><def_stmt>run_test destination_type:DestinationType column_count:int expected_exception_message:str=""<block_start><if_stmt>destination_type.value<eq>DestinationType.ORACLE.value# Oracle does not allow changing to random schema <block_start>dbt_test_utils.set_target_schema("test_normalization")<block_end><else_stmt><block_start>dbt_test_utils.set_target_schema("test_ephemeral")<block_end>print("Testing ephemeral")<line_sep>integration_type=destination_type.value<line_sep># Create the test folder with dbt project and appropriate destination settings to run integration tests from test_root_dir=setup_test_dir(integration_type)<line_sep>destination_config=dbt_test_utils.generate_profile_yaml_file(destination_type test_root_dir)<line_sep># generate a catalog and associated dbt models files generate_dbt_models(destination_type test_root_dir column_count)<line_sep># Use destination connector to create empty _airbyte_raw_* tables to use as input for the test <assert_stmt>setup_input_raw_data(integration_type test_root_dir destination_config)<if_stmt>expected_exception_message<block_start><with_stmt>pytest.raises(AssertionError)<block_start>dbt_test_utils.dbt_run(destination_type test_root_dir)<block_end><assert_stmt>search_logs_for_pattern(test_root_dir+"/dbt_output.log" expected_exception_message)<block_end><else_stmt><block_start>dbt_test_utils.dbt_run(destination_type test_root_dir)<block_end><block_end><def_stmt>search_logs_for_pattern log_file:str pattern:str<block_start><with_stmt>open(log_file "r")<as>file<block_start><for_stmt>line file<block_start><if_stmt>re.search(pattern line)<block_start><return><true><block_end><block_end><block_end><return><false><block_end><def_stmt>setup_test_dir integration_type:str<arrow>str<block_start>""" We prepare a clean folder to run the tests from. """<line_sep>test_root_dir=f"{pathlib.Path().joinpath('..' 'build' 'normalization_test_output' integration_type.lower()).resolve()}"<line_sep>os.makedirs(test_root_dir exist_ok=<true>)<line_sep>test_root_dir=tempfile.mkdtemp(dir=test_root_dir)<line_sep>temporary_folders.add(test_root_dir)<line_sep>shutil.rmtree(test_root_dir ignore_errors=<true>)<line_sep>print(f"Setting up test folder {test_root_dir}")<line_sep>copy_tree("../dbt-project-template" test_root_dir)<if_stmt>integration_type<eq>DestinationType.MSSQL.value<block_start>copy_tree("../dbt-project-template-mysql" test_root_dir)<block_end><elif_stmt>integration_type<eq>DestinationType.MYSQL.value<block_start>copy_tree("../dbt-project-template-mysql" test_root_dir)<block_end><elif_stmt>integration_type<eq>DestinationType.ORACLE.value<block_start>copy_tree("../dbt-project-template-oracle" test_root_dir)<block_end><return>test_root_dir<block_end><def_stmt>setup_input_raw_data integration_type:str test_root_dir:str destination_config:Dict[str Any]<arrow>bool<block_start>""" This should populate the associated "raw" tables from which normalization is reading from when running dbt CLI. """<line_sep>config_file=os.path.join(test_root_dir "destination_config.json")<with_stmt>open(config_file "w")<as>f<block_start>f.write(json.dumps(destination_config))<block_end>commands=["docker" "run" "--rm" "--init" "-v" f"{test_root_dir}:/data" "--network" "host" "-i" f"airbyte/destination-{integration_type.lower()}:dev" "write" "--config" "/data/destination_config.json" "--catalog" "/data/catalog.json" ]<line_sep># Force a reset in destination raw tables <return>dbt_test_utils.run_destination_process("" test_root_dir commands)<block_end><def_stmt>generate_dbt_models destination_type:DestinationType test_root_dir:str column_count:int<block_start>""" This is the normalization step generating dbt models files from the destination_catalog.json taken as input. """<line_sep>output_directory=os.path.join(test_root_dir "models" "generated")<line_sep>shutil.rmtree(output_directory ignore_errors=<true>)<line_sep>catalog_processor=CatalogProcessor(output_directory destination_type)<line_sep>catalog_config={"streams":[{"stream":{"name":dbt_test_utils.generate_random_string(f"stream_with_{column_count}_columns") "json_schema":{"type":["null" "object"] "properties":{} } "supported_sync_modes":["incremental"] "source_defined_cursor":<true> "default_cursor_field":[] } "sync_mode":"incremental" "cursor_field":[] "destination_sync_mode":"overwrite" }]}<if_stmt>column_count<eq>1<block_start>catalog_config["streams"][0]["stream"]["json_schema"]["properties"]["_airbyte_id"]={"type":"integer"}<block_end><else_stmt><block_start><for_stmt>column [dbt_test_utils.random_string(5)<for>_ range(column_count)]<block_start>catalog_config["streams"][0]["stream"]["json_schema"]["properties"][column]={"type":"string"}<block_end><block_end>catalog=os.path.join(test_root_dir "catalog.json")<with_stmt>open(catalog "w")<as>fh<block_start>fh.write(json.dumps(catalog_config))<block_end>catalog_processor.process(catalog "_airbyte_data" dbt_test_utils.target_schema)<block_end>
<import_stmt>os<import_stmt>shutil<import_stmt>time<import_stmt>re<import_stmt>pytest<import_from_stmt>helpers.cluster ClickHouseCluster<import_from_stmt>helpers.test_tools assert_eq_with_retry assert_logs_contain<import_from_stmt>helpers.network PartitionManager<line_sep>test_recover_staled_replica_run=1<line_sep>cluster=ClickHouseCluster(__file__)<line_sep>main_node=cluster.add_instance("main_node" main_configs=["configs/config.xml"] user_configs=["configs/settings.xml"] with_zookeeper=<true> stay_alive=<true> macros={"shard":1 "replica":1} )<line_sep>dummy_node=cluster.add_instance("dummy_node" main_configs=["configs/config.xml"] user_configs=["configs/settings.xml"] with_zookeeper=<true> stay_alive=<true> macros={"shard":1 "replica":2} )<line_sep>competing_node=cluster.add_instance("competing_node" main_configs=["configs/config.xml"] user_configs=["configs/settings.xml"] with_zookeeper=<true> macros={"shard":1 "replica":3} )<line_sep>snapshotting_node=cluster.add_instance("snapshotting_node" main_configs=["configs/config.xml"] user_configs=["configs/settings.xml"] with_zookeeper=<true> macros={"shard":2 "replica":1} )<line_sep>snapshot_recovering_node=cluster.add_instance("snapshot_recovering_node" main_configs=["configs/config.xml"] user_configs=["configs/settings.xml"] with_zookeeper=<true> )<line_sep>all_nodes=[main_node dummy_node competing_node snapshotting_node snapshot_recovering_node ]<line_sep>uuid_regex=re.compile("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}")<def_stmt>assert_create_query nodes table_name expected<block_start>replace_uuid=<lambda>x:re.sub(uuid_regex "uuid" x)<line_sep>query="show create table {}".format(table_name)<for_stmt>node nodes<block_start>assert_eq_with_retry(node query expected get_result=replace_uuid)<block_end><block_end>@pytest.fixture(scope="module")<def_stmt>started_cluster <block_start><try_stmt><block_start>cluster.start()<line_sep><yield>cluster<block_end><finally_stmt><block_start>cluster.shutdown()<block_end><block_end><def_stmt>test_create_replicated_table started_cluster<block_start>main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica' || '1');")<line_sep>dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")<assert_stmt>("Explicit zookeeper_path and replica_name are specified"<in>main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) "<concat>"ENGINE=ReplicatedMergeTree('/test/tmp', 'r') ORDER BY k PARTITION BY toYYYYMM(d);"))<assert_stmt>("Explicit zookeeper_path and replica_name are specified"<in>main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) "<concat>"ENGINE=ReplicatedMergeTree('/test/tmp', 'r', d, k, 8192);"))<assert_stmt>"Old syntax is not allowed"<in>main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) "<concat>"ENGINE=ReplicatedMergeTree('/test/tmp/{shard}', '{replica}', d, k, 8192);")<line_sep>main_node.query("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree ORDER BY k PARTITION BY toYYYYMM(d);")<line_sep>expected=("CREATE TABLE testdb.replicated_table\\n(\\n `d` Date,\\n `k` UInt64,\\n `i32` Int32\\n)\\n"<concat>"ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')\\n"<concat>"PARTITION BY toYYYYMM(d)\\nORDER BY k\\nSETTINGS index_granularity = 8192")<line_sep>assert_create_query([main_node dummy_node] "testdb.replicated_table" expected)<line_sep># assert without replacing uuid <assert_stmt>main_node.query("show create testdb.replicated_table")<eq>dummy_node.query("show create testdb.replicated_table")<line_sep>main_node.query("DROP DATABASE testdb SYNC")<line_sep>dummy_node.query("DROP DATABASE testdb SYNC")<block_end>@pytest.mark.parametrize("engine" ["MergeTree" "ReplicatedMergeTree"])<def_stmt>test_simple_alter_table started_cluster engine<block_start>main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")<line_sep>dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")<line_sep># test_simple_alter_table name="testdb.alter_test_{}".format(engine)<line_sep>main_node.query("CREATE TABLE {} "<concat>"(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "<concat>"ENGINE = {} PARTITION BY StartDate ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID);".format(name engine))<line_sep>main_node.query("ALTER TABLE {} ADD COLUMN Added0 UInt32;".format(name))<line_sep>main_node.query("ALTER TABLE {} ADD COLUMN Added2 UInt32;".format(name))<line_sep>main_node.query("ALTER TABLE {} ADD COLUMN Added1 UInt32 AFTER Added0;".format(name))<line_sep>main_node.query("ALTER TABLE {} ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2;".format(name))<line_sep>main_node.query("ALTER TABLE {} ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B;".format(name))<line_sep>main_node.query("ALTER TABLE {} ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1;".format(name))<line_sep>full_engine=(engine<if><not>"Replicated"<in>engine<else>engine+"(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')")<line_sep>expected=("CREATE TABLE {}\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"<concat>" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n"<concat>" `ToDrop` UInt32,\\n `Added0` UInt32,\\n `Added1` UInt32,\\n `Added2` UInt32,\\n"<concat>" `AddedNested1.A` Array(UInt32),\\n `AddedNested1.B` Array(UInt64),\\n `AddedNested1.C` Array(String),\\n"<concat>" `AddedNested2.A` Array(UInt32),\\n `AddedNested2.B` Array(UInt64)\\n)\\n"<concat>"ENGINE = {}\\nPARTITION BY StartDate\\nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)\\n"<concat>"SETTINGS index_granularity = 8192".format(name full_engine))<line_sep>assert_create_query([main_node dummy_node] name expected)<line_sep># test_create_replica_after_delay competing_node.query("CREATE DATABASE IF NOT EXISTS testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');")<line_sep>name="testdb.alter_test_{}".format(engine)<line_sep>main_node.query("ALTER TABLE {} ADD COLUMN Added3 UInt32;".format(name))<line_sep>main_node.query("ALTER TABLE {} DROP COLUMN AddedNested1;".format(name))<line_sep>main_node.query("ALTER TABLE {} RENAME COLUMN Added1 TO AddedNested1;".format(name))<line_sep>full_engine=(engine<if><not>"Replicated"<in>engine<else>engine+"(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')")<line_sep>expected=("CREATE TABLE {}\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"<concat>" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n"<concat>" `ToDrop` UInt32,\\n `Added0` UInt32,\\n `AddedNested1` UInt32,\\n `Added2` UInt32,\\n"<concat>" `AddedNested2.A` Array(UInt32),\\n `AddedNested2.B` Array(UInt64),\\n `Added3` UInt32\\n)\\n"<concat>"ENGINE = {}\\nPARTITION BY StartDate\\nORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)\\n"<concat>"SETTINGS index_granularity = 8192".format(name full_engine))<line_sep>assert_create_query([main_node dummy_node competing_node] name expected)<line_sep>main_node.query("DROP DATABASE testdb SYNC")<line_sep>dummy_node.query("DROP DATABASE testdb SYNC")<line_sep>competing_node.query("DROP DATABASE testdb SYNC")<block_end><def_stmt>get_table_uuid database name<block_start><return>main_node.query(f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'").strip()<block_end>@pytest.fixture(scope="module" name="attachable_part")<def_stmt>fixture_attachable_part started_cluster<block_start>main_node.query(f"CREATE DATABASE testdb_attach_atomic ENGINE = Atomic")<line_sep>main_node.query(f"CREATE TABLE testdb_attach_atomic.test (CounterID UInt32) ENGINE = MergeTree ORDER BY (CounterID)")<line_sep>main_node.query(f"INSERT INTO testdb_attach_atomic.test VALUES (123)")<line_sep>main_node.query(f"ALTER TABLE testdb_attach_atomic.test FREEZE WITH NAME 'test_attach'")<line_sep>table_uuid=get_table_uuid("testdb_attach_atomic" "test")<line_sep><return>os.path.join(main_node.path f"database/shadow/test_attach/store/{table_uuid[:3]}/{table_uuid}/all_1_1_0" )<block_end>@pytest.mark.parametrize("engine" ["MergeTree" "ReplicatedMergeTree"])<def_stmt>test_alter_attach started_cluster attachable_part engine<block_start>main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")<line_sep>dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")<line_sep>name="alter_attach_test_{}".format(engine)<line_sep>main_node.query(f"CREATE TABLE testdb.{name} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)")<line_sep>table_uuid=get_table_uuid("testdb" name)<line_sep># Provide and attach a part to the main node shutil.copytree(attachable_part os.path.join(main_node.path f"database/store/{table_uuid[:3]}/{table_uuid}/detached/all_1_1_0" ) )<line_sep>main_node.query(f"ALTER TABLE testdb.{name} ATTACH PART 'all_1_1_0'")<line_sep># On the main node, data is attached <assert_stmt>main_node.query(f"SELECT CounterID FROM testdb.{name}")<eq>"123\n"<line_sep># On the other node, data is replicated only if using a Replicated table engine <if_stmt>engine<eq>"ReplicatedMergeTree"<block_start><assert_stmt>dummy_node.query(f"SELECT CounterID FROM testdb.{name}")<eq>"123\n"<block_end><else_stmt><block_start><assert_stmt>dummy_node.query(f"SELECT CounterID FROM testdb.{name}")<eq>""<block_end>main_node.query("DROP DATABASE testdb SYNC")<line_sep>dummy_node.query("DROP DATABASE testdb SYNC")<block_end>@pytest.mark.parametrize("engine" ["MergeTree" "ReplicatedMergeTree"])<def_stmt>test_alter_drop_part started_cluster engine<block_start>main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")<line_sep>dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")<line_sep>table=f"alter_drop_{engine}"<line_sep>part_name="all_0_0_0"<if>engine<eq>"ReplicatedMergeTree"<else>"all_1_1_0"<line_sep>main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)")<line_sep>main_node.query(f"INSERT INTO testdb.{table} VALUES (123)")<if_stmt>engine<eq>"MergeTree"<block_start>dummy_node.query(f"INSERT INTO testdb.{table} VALUES (456)")<block_end>main_node.query(f"ALTER TABLE testdb.{table} DROP PART '{part_name}'")<assert_stmt>main_node.query(f"SELECT CounterID FROM testdb.{table}")<eq>""<if_stmt>engine<eq>"ReplicatedMergeTree"# The DROP operation is still replicated at the table engine level <block_start><assert_stmt>dummy_node.query(f"SELECT CounterID FROM testdb.{table}")<eq>""<block_end><else_stmt><block_start><assert_stmt>dummy_node.query(f"SELECT CounterID FROM testdb.{table}")<eq>"456\n"<block_end>main_node.query("DROP DATABASE testdb SYNC")<line_sep>dummy_node.query("DROP DATABASE testdb SYNC")<block_end>@pytest.mark.parametrize("engine" ["MergeTree" "ReplicatedMergeTree"])<def_stmt>test_alter_detach_part started_cluster engine<block_start>main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")<line_sep>dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")<line_sep>table=f"alter_detach_{engine}"<line_sep>part_name="all_0_0_0"<if>engine<eq>"ReplicatedMergeTree"<else>"all_1_1_0"<line_sep>main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)")<line_sep>main_node.query(f"INSERT INTO testdb.{table} VALUES (123)")<if_stmt>engine<eq>"MergeTree"<block_start>dummy_node.query(f"INSERT INTO testdb.{table} VALUES (456)")<block_end>main_node.query(f"ALTER TABLE testdb.{table} DETACH PART '{part_name}'")<line_sep>detached_parts_query=f"SELECT name FROM system.detached_parts WHERE database='testdb' AND table='{table}'"<assert_stmt>main_node.query(detached_parts_query)<eq>f"{part_name}\n"<if_stmt>engine<eq>"ReplicatedMergeTree"# The detach operation is still replicated at the table engine level <block_start><assert_stmt>dummy_node.query(detached_parts_query)<eq>f"{part_name}\n"<block_end><else_stmt><block_start><assert_stmt>dummy_node.query(detached_parts_query)<eq>""<block_end>main_node.query("DROP DATABASE testdb SYNC")<line_sep>dummy_node.query("DROP DATABASE testdb SYNC")<block_end>@pytest.mark.parametrize("engine" ["MergeTree" "ReplicatedMergeTree"])<def_stmt>test_alter_drop_detached_part started_cluster engine<block_start>main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")<line_sep>dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")<line_sep>table=f"alter_drop_detached_{engine}"<line_sep>part_name="all_0_0_0"<if>engine<eq>"ReplicatedMergeTree"<else>"all_1_1_0"<line_sep>main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)")<line_sep>main_node.query(f"INSERT INTO testdb.{table} VALUES (123)")<line_sep>main_node.query(f"ALTER TABLE testdb.{table} DETACH PART '{part_name}'")<if_stmt>engine<eq>"MergeTree"<block_start>dummy_node.query(f"INSERT INTO testdb.{table} VALUES (456)")<line_sep>dummy_node.query(f"ALTER TABLE testdb.{table} DETACH PART '{part_name}'")<block_end>main_node.query(f"ALTER TABLE testdb.{table} DROP DETACHED PART '{part_name}'")<line_sep>detached_parts_query=f"SELECT name FROM system.detached_parts WHERE database='testdb' AND table='{table}'"<assert_stmt>main_node.query(detached_parts_query)<eq>""<assert_stmt>dummy_node.query(detached_parts_query)<eq>f"{part_name}\n"<line_sep>main_node.query("DROP DATABASE testdb SYNC")<line_sep>dummy_node.query("DROP DATABASE testdb SYNC")<block_end><def_stmt>test_alter_fetch started_cluster<block_start>main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")<line_sep>dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")<line_sep>main_node.query("CREATE TABLE testdb.fetch_source (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)")<line_sep>main_node.query("CREATE TABLE testdb.fetch_target (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)")<line_sep>main_node.query("INSERT INTO testdb.fetch_source VALUES (123)")<line_sep>table_uuid=get_table_uuid("testdb" "fetch_source")<line_sep>main_node.query(f"ALTER TABLE testdb.fetch_target FETCH PART 'all_0_0_0' FROM '/clickhouse/tables/{table_uuid}/{{shard}}' ")<line_sep>detached_parts_query="SELECT name FROM system.detached_parts WHERE database='testdb' AND table='fetch_target'"<assert_stmt>main_node.query(detached_parts_query)<eq>"all_0_0_0\n"<assert_stmt>dummy_node.query(detached_parts_query)<eq>""<line_sep>main_node.query("DROP DATABASE testdb SYNC")<line_sep>dummy_node.query("DROP DATABASE testdb SYNC")<block_end><def_stmt>test_alters_from_different_replicas started_cluster<block_start>main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")<line_sep>dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")<line_sep># test_alters_from_different_replicas competing_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');")<line_sep>main_node.query("CREATE TABLE testdb.concurrent_test "<concat>"(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "<concat>"ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192);")<line_sep>main_node.query("CREATE TABLE testdb.dist AS testdb.concurrent_test ENGINE = Distributed(testdb, testdb, concurrent_test, CounterID)")<line_sep>dummy_node.stop_clickhouse(kill=<true>)<line_sep>settings={"distributed_ddl_task_timeout":5}<assert_stmt>("There are 1 unfinished hosts (0 of them are currently active)"<in>competing_node.query_and_get_error("ALTER TABLE testdb.concurrent_test ADD COLUMN Added0 UInt32;" settings=settings ))<line_sep>settings={"distributed_ddl_task_timeout":5 "distributed_ddl_output_mode":"null_status_on_timeout" }<assert_stmt>"shard1|replica2\t\\N\t\\N"<in>main_node.query("ALTER TABLE testdb.concurrent_test ADD COLUMN Added2 UInt32;" settings=settings )<line_sep>settings={"distributed_ddl_task_timeout":5 "distributed_ddl_output_mode":"never_throw" }<assert_stmt>"shard1|replica2\t\\N\t\\N"<in>competing_node.query("ALTER TABLE testdb.concurrent_test ADD COLUMN Added1 UInt32 AFTER Added0;" settings=settings )<line_sep>dummy_node.start_clickhouse()<line_sep>main_node.query("ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2;")<line_sep>competing_node.query("ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B;")<line_sep>main_node.query("ALTER TABLE testdb.concurrent_test ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1;")<line_sep>expected=("CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"<concat>" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32,\\n"<concat>" `Added0` UInt32,\\n `Added1` UInt32,\\n `Added2` UInt32,\\n `AddedNested1.A` Array(UInt32),\\n"<concat>" `AddedNested1.B` Array(UInt64),\\n `AddedNested1.C` Array(String),\\n `AddedNested2.A` Array(UInt32),\\n"<concat>" `AddedNested2.B` Array(UInt64)\\n)\\n"<concat>"ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192)")<line_sep>assert_create_query([main_node competing_node] "testdb.concurrent_test" expected)<line_sep># test_create_replica_after_delay main_node.query("DROP TABLE testdb.concurrent_test SYNC")<line_sep>main_node.query("CREATE TABLE testdb.concurrent_test "<concat>"(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "<concat>"ENGINE = ReplicatedMergeTree ORDER BY CounterID;")<line_sep>expected=("CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"<concat>" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n"<concat>"ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192")<line_sep>assert_create_query([main_node competing_node] "testdb.concurrent_test" expected)<line_sep>main_node.query("INSERT INTO testdb.dist (CounterID, StartDate, UserID) SELECT number, addDays(toDate('2020-02-02'), number), intHash32(number) FROM numbers(10)")<line_sep># test_replica_restart main_node.restart_clickhouse()<line_sep>expected=("CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"<concat>" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n"<concat>"ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192")<line_sep># test_snapshot_and_snapshot_recover snapshotting_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard2', 'replica1');")<line_sep>snapshot_recovering_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard2', 'replica2');")<line_sep>assert_create_query(all_nodes "testdb.concurrent_test" expected)<line_sep>main_node.query("SYSTEM FLUSH DISTRIBUTED testdb.dist")<line_sep>main_node.query("ALTER TABLE testdb.concurrent_test UPDATE StartDate = addYears(StartDate, 1) WHERE 1")<line_sep>res=main_node.query("ALTER TABLE testdb.concurrent_test DELETE WHERE UserID % 2")<assert_stmt>("shard1|replica1"<in>res<and>"shard1|replica2"<in>res<and>"shard1|replica3"<in>res)<assert_stmt>"shard2|replica1"<in>res<and>"shard2|replica2"<in>res<line_sep>expected=("1\t1\tmain_node\n"<concat>"1\t2\tdummy_node\n"<concat>"1\t3\tcompeting_node\n"<concat>"2\t1\tsnapshotting_node\n"<concat>"2\t2\tsnapshot_recovering_node\n")<assert_stmt>(main_node.query("SELECT shard_num, replica_num, host_name FROM system.clusters WHERE cluster='testdb'")<eq>expected)<line_sep># test_drop_and_create_replica main_node.query("DROP DATABASE testdb SYNC")<line_sep>main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")<line_sep>expected=("CREATE TABLE testdb.concurrent_test\\n(\\n `CounterID` UInt32,\\n `StartDate` Date,\\n `UserID` UInt32,\\n"<concat>" `VisitID` UInt32,\\n `NestedColumn.A` Array(UInt8),\\n `NestedColumn.S` Array(String),\\n `ToDrop` UInt32\\n)\\n"<concat>"ENGINE = ReplicatedMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')\\nORDER BY CounterID\\nSETTINGS index_granularity = 8192")<line_sep>assert_create_query([main_node competing_node] "testdb.concurrent_test" expected)<line_sep>assert_create_query(all_nodes "testdb.concurrent_test" expected)<for_stmt>node all_nodes<block_start>node.query("SYSTEM SYNC REPLICA testdb.concurrent_test")<block_end>expected=("0\t2021-02-02\t4249604106\n"<concat>"1\t2021-02-03\t1343103100\n"<concat>"4\t2021-02-06\t3902320246\n"<concat>"7\t2021-02-09\t3844986530\n"<concat>"9\t2021-02-11\t1241149650\n")<line_sep>assert_eq_with_retry(dummy_node "SELECT CounterID, StartDate, UserID FROM testdb.dist ORDER BY CounterID" expected )<line_sep>main_node.query("DROP DATABASE testdb SYNC")<line_sep>dummy_node.query("DROP DATABASE testdb SYNC")<line_sep>competing_node.query("DROP DATABASE testdb SYNC")<line_sep>snapshotting_node.query("DROP DATABASE testdb SYNC")<line_sep>snapshot_recovering_node.query("DROP DATABASE testdb SYNC")<block_end><def_stmt>test_recover_staled_replica started_cluster<block_start>main_node.query("CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica1');")<line_sep>started_cluster.get_kazoo_client("zoo1").set("/clickhouse/databases/recover/logs_to_keep" b"10")<line_sep>dummy_node.query("CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica2');")<line_sep>settings={"distributed_ddl_task_timeout":0}<line_sep>main_node.query("CREATE TABLE recover.t1 (n int) ENGINE=Memory" settings=settings)<line_sep>dummy_node.query("CREATE TABLE recover.t2 (s String) ENGINE=Memory" settings=settings)<line_sep>main_node.query("CREATE TABLE recover.mt1 (n int) ENGINE=MergeTree order by n" settings=settings )<line_sep>dummy_node.query("CREATE TABLE recover.mt2 (n int) ENGINE=MergeTree order by n" settings=settings )<line_sep>main_node.query("CREATE TABLE recover.rmt1 (n int) ENGINE=ReplicatedMergeTree order by n" settings=settings )<line_sep>dummy_node.query("CREATE TABLE recover.rmt2 (n int) ENGINE=ReplicatedMergeTree order by n" settings=settings )<line_sep>main_node.query("CREATE TABLE recover.rmt3 (n int) ENGINE=ReplicatedMergeTree order by n" settings=settings )<line_sep>dummy_node.query("CREATE TABLE recover.rmt5 (n int) ENGINE=ReplicatedMergeTree order by n" settings=settings )<line_sep>main_node.query("CREATE MATERIALIZED VIEW recover.mv1 (n int) ENGINE=ReplicatedMergeTree order by n AS SELECT n FROM recover.rmt1" settings=settings )<line_sep>dummy_node.query("CREATE MATERIALIZED VIEW recover.mv2 (n int) ENGINE=ReplicatedMergeTree order by n AS SELECT n FROM recover.rmt2" settings=settings )<line_sep>main_node.query("CREATE DICTIONARY recover.d1 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n "<concat>"SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt1' PASSWORD '' DB 'recover')) "<concat>"LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT())")<line_sep>dummy_node.query("CREATE DICTIONARY recover.d2 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n "<concat>"SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt2' PASSWORD '' DB 'recover')) "<concat>"LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT())")<for_stmt>table ["t1" "t2" "mt1" "mt2" "rmt1" "rmt2" "rmt3" "rmt5"]<block_start>main_node.query("INSERT INTO recover.{} VALUES (42)".format(table))<block_end><for_stmt>table ["t1" "t2" "mt1" "mt2"]<block_start>dummy_node.query("INSERT INTO recover.{} VALUES (42)".format(table))<block_end><for_stmt>table ["rmt1" "rmt2" "rmt3" "rmt5"]<block_start>main_node.query("SYSTEM SYNC REPLICA recover.{}".format(table))<block_end><with_stmt>PartitionManager()<as>pm<block_start>pm.drop_instance_zk_connections(dummy_node)<line_sep>dummy_node.query_and_get_error("RENAME TABLE recover.t1 TO recover.m1")<line_sep>main_node.query_with_retry("RENAME TABLE recover.t1 TO recover.m1" settings=settings)<line_sep>main_node.query_with_retry("ALTER TABLE recover.mt1 ADD COLUMN m int" settings=settings)<line_sep>main_node.query_with_retry("ALTER TABLE recover.rmt1 ADD COLUMN m int" settings=settings)<line_sep>main_node.query_with_retry("RENAME TABLE recover.rmt3 TO recover.rmt4" settings=settings)<line_sep>main_node.query_with_retry("DROP TABLE recover.rmt5" settings=settings)<line_sep>main_node.query_with_retry("DROP DICTIONARY recover.d2" settings=settings)<line_sep>main_node.query_with_retry("CREATE DICTIONARY recover.d2 (n int DEFAULT 0, m int DEFAULT 1) PRIMARY KEY n "<concat>"SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 USER 'default' TABLE 'rmt1' PASSWORD '' DB 'recover')) "<concat>"LIFETIME(MIN 1 MAX 10) LAYOUT(FLAT());" settings=settings )<line_sep>inner_table=(".inner_id."+dummy_node.query_with_retry("SELECT uuid FROM system.tables WHERE database='recover' AND name='mv1'").strip())<line_sep>main_node.query_with_retry("ALTER TABLE recover.`{}` MODIFY COLUMN n int DEFAULT 42".format(inner_table) settings=settings )<line_sep>main_node.query_with_retry("ALTER TABLE recover.mv1 MODIFY QUERY SELECT m FROM recover.rmt1".format(inner_table) settings=settings )<line_sep>main_node.query_with_retry("RENAME TABLE recover.mv2 TO recover.mv3".format(inner_table) settings=settings )<line_sep>main_node.query_with_retry("CREATE TABLE recover.tmp AS recover.m1" settings=settings)<line_sep>main_node.query_with_retry("DROP TABLE recover.tmp" settings=settings)<line_sep>main_node.query_with_retry("CREATE TABLE recover.tmp AS recover.m1" settings=settings)<line_sep>main_node.query_with_retry("DROP TABLE recover.tmp" settings=settings)<line_sep>main_node.query_with_retry("CREATE TABLE recover.tmp AS recover.m1" settings=settings)<block_end><assert_stmt>(main_node.query("SELECT name FROM system.tables WHERE database='recover' AND name NOT LIKE '.inner_id.%' ORDER BY name")<eq>"d1\nd2\nm1\nmt1\nmt2\nmv1\nmv3\nrmt1\nrmt2\nrmt4\nt2\ntmp\n")<line_sep>query=("SELECT name, uuid, create_table_query FROM system.tables WHERE database='recover' AND name NOT LIKE '.inner_id.%' "<concat>"ORDER BY name SETTINGS show_table_uuid_in_table_create_query_if_not_nil=1")<line_sep>expected=main_node.query(query)<line_sep>assert_eq_with_retry(dummy_node query expected)<assert_stmt>(main_node.query("SELECT count() FROM system.tables WHERE database='recover' AND name LIKE '.inner_id.%'")<eq>"2\n")<assert_stmt>(dummy_node.query("SELECT count() FROM system.tables WHERE database='recover' AND name LIKE '.inner_id.%'")<eq>"2\n")<for_stmt>table ["m1" "t2" "mt1" "mt2" "rmt1" "rmt2" "rmt4" "d1" "d2" "mv1" "mv3" ]<block_start><assert_stmt>main_node.query("SELECT (*,).1 FROM recover.{}".format(table))<eq>"42\n"<block_end><for_stmt>table ["t2" "rmt1" "rmt2" "rmt4" "d1" "d2" "mt2" "mv1" "mv3"]<block_start><assert_stmt>dummy_node.query("SELECT (*,).1 FROM recover.{}".format(table))<eq>"42\n"<block_end><for_stmt>table ["m1" "mt1"]<block_start><assert_stmt>dummy_node.query("SELECT count() FROM recover.{}".format(table))<eq>"0\n"<block_end><global>test_recover_staled_replica_run<assert_stmt>(dummy_node.query("SELECT count() FROM system.tables WHERE database='recover_broken_tables'")<eq>f"{test_recover_staled_replica_run}\n")<assert_stmt>(dummy_node.query("SELECT count() FROM system.tables WHERE database='recover_broken_replicated_tables'")<eq>f"{test_recover_staled_replica_run}\n")<line_sep>test_recover_staled_replica_run<augadd>1<line_sep>table=dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'mt1_29_%' LIMIT 1").strip()<assert_stmt>(dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table))<eq>"42\n")<line_sep>table=dummy_node.query("SHOW TABLES FROM recover_broken_replicated_tables LIKE 'rmt5_29_%' LIMIT 1").strip()<assert_stmt>(dummy_node.query("SELECT (*,).1 FROM recover_broken_replicated_tables.{}".format(table))<eq>"42\n")<line_sep>expected="Cleaned 6 outdated objects: dropped 1 dictionaries and 3 tables, moved 2 tables"<line_sep>assert_logs_contain(dummy_node expected)<line_sep>dummy_node.query("DROP TABLE recover.tmp")<line_sep>assert_eq_with_retry(main_node "SELECT count() FROM system.tables WHERE database='recover' AND name='tmp'" "0\n" )<line_sep>main_node.query("DROP DATABASE recover SYNC")<line_sep>dummy_node.query("DROP DATABASE recover SYNC")<block_end><def_stmt>test_startup_without_zk started_cluster<block_start><with_stmt>PartitionManager()<as>pm<block_start>pm.drop_instance_zk_connections(main_node)<line_sep>err=main_node.query_and_get_error("CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');")<assert_stmt>"ZooKeeper"<in>err<block_end>main_node.query("CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');")<line_sep># main_node.query("CREATE TABLE startup.rmt (n int) ENGINE=ReplicatedMergeTree order by n") main_node.query("CREATE TABLE startup.rmt (n int) ENGINE=MergeTree order by n")<line_sep>main_node.query("INSERT INTO startup.rmt VALUES (42)")<with_stmt>PartitionManager()<as>pm<block_start>pm.drop_instance_zk_connections(main_node)<line_sep>main_node.restart_clickhouse(stop_start_wait_sec=30)<assert_stmt>main_node.query("SELECT (*,).1 FROM startup.rmt")<eq>"42\n"<block_end><for_stmt>_ range(10)<block_start><try_stmt><block_start>main_node.query("CREATE TABLE startup.m (n int) ENGINE=Memory")<line_sep><break><block_end><except_stmt><block_start>time.sleep(1)<block_end><block_end>main_node.query("EXCHANGE TABLES startup.rmt AND startup.m")<assert_stmt>main_node.query("SELECT (*,).1 FROM startup.m")<eq>"42\n"<line_sep>main_node.query("DROP DATABASE startup SYNC")<block_end><def_stmt>test_server_uuid started_cluster<block_start>uuid1=main_node.query("select serverUUID()")<line_sep>uuid2=dummy_node.query("select serverUUID()")<assert_stmt>uuid1<ne>uuid2<line_sep>main_node.restart_clickhouse()<line_sep>uuid1_after_restart=main_node.query("select serverUUID()")<assert_stmt>uuid1<eq>uuid1_after_restart<block_end><def_stmt>test_sync_replica started_cluster<block_start>main_node.query("CREATE DATABASE test_sync_database ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")<line_sep>dummy_node.query("CREATE DATABASE test_sync_database ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")<line_sep>number_of_tables=1000<line_sep>settings={"distributed_ddl_task_timeout":0}<with_stmt>PartitionManager()<as>pm<block_start>pm.drop_instance_zk_connections(dummy_node)<for_stmt>i range(number_of_tables)<block_start>main_node.query("CREATE TABLE test_sync_database.table_{} (n int) ENGINE=MergeTree order by n".format(i) settings=settings )<block_end><block_end># wait for host to reconnect dummy_node.query_with_retry("SELECT * FROM system.zookeeper WHERE path='/'")<line_sep>dummy_node.query("SYSTEM SYNC DATABASE REPLICA test_sync_database")<assert_stmt>dummy_node.query("SELECT count() FROM system.tables where database='test_sync_database'").strip()<eq>str(number_of_tables)<assert_stmt>main_node.query("SELECT count() FROM system.tables where database='test_sync_database'").strip()<eq>str(number_of_tables)<line_sep>engine_settings={"default_table_engine":"ReplicatedMergeTree"}<line_sep>dummy_node.query("CREATE TABLE test_sync_database.table (n int, primary key n) partition by n" settings=engine_settings )<line_sep>main_node.query("INSERT INTO test_sync_database.table SELECT * FROM numbers(10)")<line_sep>dummy_node.query("TRUNCATE TABLE test_sync_database.table" settings=settings)<line_sep>dummy_node.query("ALTER TABLE test_sync_database.table ADD COLUMN m int" settings=settings)<line_sep>main_node.query("SYSTEM SYNC DATABASE REPLICA ON CLUSTER test_sync_database test_sync_database")<line_sep>lp1=main_node.query("select value from system.zookeeper where path='/clickhouse/databases/test1/replicas/shard1|replica1' and name='log_ptr'")<line_sep>lp2=main_node.query("select value from system.zookeeper where path='/clickhouse/databases/test1/replicas/shard1|replica2' and name='log_ptr'")<line_sep>max_lp=main_node.query("select value from system.zookeeper where path='/clickhouse/databases/test1/' and name='max_log_ptr'")<assert_stmt>lp1<eq>max_lp<assert_stmt>lp2<eq>max_lp<block_end>
<import_from_stmt>torch Tensor<import_from_stmt>torch_geometric.typing OptTensor<import_from_stmt>.max_pool max_pool max_pool_x max_pool_neighbor_x<import_from_stmt>.avg_pool avg_pool avg_pool_x avg_pool_neighbor_x<import_from_stmt>.graclus graclus<import_from_stmt>.voxel_grid voxel_grid<import_from_stmt>.topk_pool TopKPooling<import_from_stmt>.sag_pool SAGPooling<import_from_stmt>.edge_pool EdgePooling<import_from_stmt>.asap ASAPooling<import_from_stmt>.pan_pool PANPooling<import_from_stmt>.mem_pool MemPooling<try_stmt><block_start><import_stmt>torch_cluster<block_end><except_stmt>ImportError<block_start>torch_cluster=<none><block_end><def_stmt>fps x:Tensor batch:OptTensor=<none> ratio:float=0.5 random_start:bool=<true><arrow>Tensor<block_start>r"""A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space" <https://arxiv.org/abs/1706.02413>`_ paper, which iteratively samples the most distant point with regard to the rest points. Args: x (Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. batch (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. (default: :obj:`None`) ratio (float, optional): Sampling ratio. (default: :obj:`0.5`) random_start (bool, optional): If set to :obj:`False`, use the first node in :math:`\mathbf{X}` as starting node. (default: obj:`True`) :rtype: :class:`LongTensor` .. code-block:: python import torch from torch_geometric.nn import fps x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) batch = torch.tensor([0, 0, 0, 0]) index = fps(x, batch, ratio=0.5) """<line_sep><return>torch_cluster.fps(x batch ratio random_start)<block_end><def_stmt>knn x:Tensor y:Tensor k:int batch_x:OptTensor=<none> batch_y:OptTensor=<none> cosine:bool=<false> num_workers:int=1<arrow>Tensor<block_start>r"""Finds for each element in :obj:`y` the :obj:`k` nearest points in :obj:`x`. Args: x (Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. y (Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{M \times F}`. k (int): The number of neighbors. batch_x (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. (default: :obj:`None`) batch_y (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each node to a specific example. (default: :obj:`None`) cosine (boolean, optional): If :obj:`True`, will use the cosine distance instead of euclidean distance to find nearest neighbors. (default: :obj:`False`) num_workers (int): Number of workers to use for computation. Has no effect in case :obj:`batch_x` or :obj:`batch_y` is not :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) :rtype: :class:`LongTensor` .. code-block:: python import torch from torch_geometric.nn import knn x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) batch_x = torch.tensor([0, 0, 0, 0]) y = torch.Tensor([[-1, 0], [1, 0]]) batch_y = torch.tensor([0, 0]) assign_index = knn(x, y, 2, batch_x, batch_y) """<line_sep><return>torch_cluster.knn(x y k batch_x batch_y cosine num_workers)<block_end><def_stmt>knn_graph x:Tensor k:int batch:OptTensor=<none> loop:bool=<false> flow:str='source_to_target' cosine:bool=<false> num_workers:int=1<arrow>Tensor<block_start>r"""Computes graph edges to the nearest :obj:`k` points. Args: x (Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. k (int): The number of neighbors. batch (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. (default: :obj:`None`) loop (bool, optional): If :obj:`True`, the graph will contain self-loops. (default: :obj:`False`) flow (string, optional): The flow direction when using in combination with message passing (:obj:`"source_to_target"` or :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`) cosine (boolean, optional): If :obj:`True`, will use the cosine distance instead of euclidean distance to find nearest neighbors. (default: :obj:`False`) num_workers (int): Number of workers to use for computation. Has no effect in case :obj:`batch` is not :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) :rtype: :class:`LongTensor` .. code-block:: python import torch from torch_geometric.nn import knn_graph x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) batch = torch.tensor([0, 0, 0, 0]) edge_index = knn_graph(x, k=2, batch=batch, loop=False) """<line_sep><return>torch_cluster.knn_graph(x k batch loop flow cosine num_workers)<block_end><def_stmt>radius x:Tensor y:Tensor r:float batch_x:OptTensor=<none> batch_y:OptTensor=<none> max_num_neighbors:int=32 num_workers:int=1<arrow>Tensor<block_start>r"""Finds for each element in :obj:`y` all points in :obj:`x` within distance :obj:`r`. Args: x (Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. y (Tensor): Node feature matrix :math:`\mathbf{Y} \in \mathbb{R}^{M \times F}`. r (float): The radius. batch_x (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. (default: :obj:`None`) batch_y (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each node to a specific example. (default: :obj:`None`) max_num_neighbors (int, optional): The maximum number of neighbors to return for each element in :obj:`y`. (default: :obj:`32`) num_workers (int): Number of workers to use for computation. Has no effect in case :obj:`batch_x` or :obj:`batch_y` is not :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) :rtype: :class:`LongTensor` .. code-block:: python import torch from torch_geometric.nn import radius x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) batch_x = torch.tensor([0, 0, 0, 0]) y = torch.Tensor([[-1, 0], [1, 0]]) batch_y = torch.tensor([0, 0]) assign_index = radius(x, y, 1.5, batch_x, batch_y) """<line_sep><return>torch_cluster.radius(x y r batch_x batch_y max_num_neighbors num_workers)<block_end><def_stmt>radius_graph x:Tensor r:float batch:OptTensor=<none> loop:bool=<false> max_num_neighbors:int=32 flow:str='source_to_target' num_workers:int=1<arrow>Tensor<block_start>r"""Computes graph edges to all points within a given distance. Args: x (Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. r (float): The radius. batch (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. (default: :obj:`None`) loop (bool, optional): If :obj:`True`, the graph will contain self-loops. (default: :obj:`False`) max_num_neighbors (int, optional): The maximum number of neighbors to return for each element in :obj:`y`. (default: :obj:`32`) flow (string, optional): The flow direction when using in combination with message passing (:obj:`"source_to_target"` or :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`) num_workers (int): Number of workers to use for computation. Has no effect in case :obj:`batch` is not :obj:`None`, or the input lies on the GPU. (default: :obj:`1`) :rtype: :class:`LongTensor` .. code-block:: python import torch from torch_geometric.nn import radius_graph x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) batch = torch.tensor([0, 0, 0, 0]) edge_index = radius_graph(x, r=1.5, batch=batch, loop=False) """<line_sep><return>torch_cluster.radius_graph(x r batch loop max_num_neighbors flow num_workers)<block_end><def_stmt>nearest x:Tensor y:Tensor batch_x:OptTensor=<none> batch_y:OptTensor=<none><arrow>Tensor<block_start>r"""Clusters points in :obj:`x` together which are nearest to a given query point in :obj:`y`. Args: x (Tensor): Node feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. y (Tensor): Node feature matrix :math:`\mathbf{Y} \in \mathbb{R}^{M \times F}`. batch_x (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. (default: :obj:`None`) batch_y (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^M`, which assigns each node to a specific example. (default: :obj:`None`) :rtype: :class:`LongTensor` .. code-block:: python import torch from torch_geometric.nn import nearest x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) batch_x = torch.tensor([0, 0, 0, 0]) y = torch.Tensor([[-1, 0], [1, 0]]) batch_y = torch.tensor([0, 0]) cluster = nearest(x, y, batch_x, batch_y) """<line_sep><return>torch_cluster.nearest(x y batch_x batch_y)<block_end>__all__=['TopKPooling' 'SAGPooling' 'EdgePooling' 'ASAPooling' 'PANPooling' 'MemPooling' 'max_pool' 'avg_pool' 'max_pool_x' 'max_pool_neighbor_x' 'avg_pool_x' 'avg_pool_neighbor_x' 'graclus' 'voxel_grid' 'fps' 'knn' 'knn_graph' 'radius' 'radius_graph' 'nearest' ]<line_sep>classes=__all__<line_sep>
''' Determine whether an integer is a palindrome. Do this without extra space. Could negative integers be palindromes? (ie, -1) If you are thinking of converting the integer to string, note the restriction of using extra space. You could also try reversing an integer. However, if you have solved the problem "Reverse Integer", you know that the reversed integer might overflow. How would you handle such case? There is a more generic way of solving this problem. '''<class_stmt>Solution(object)<block_start><def_stmt>isPalindrome self x<block_start>""" :type x: int :rtype: bool """<if_stmt>x<l>0<block_start><return><false><block_end>div=1<while_stmt>x/div<ge>10<block_start>div<augmul>10<block_end><while_stmt>x<g>0<block_start>l=x<floordiv>div<line_sep>r=x%10<if_stmt>l<ne>r<block_start><return><false><block_end>x<augmod>div<line_sep>x<augfloordiv>10<line_sep>div<augdiv>100<block_end><return><true><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><assert_stmt>Solution().isPalindrome(123)<eq><false><assert_stmt>Solution().isPalindrome(12321)<eq><true><assert_stmt>Solution().isPalindrome(-121)<eq><false><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>operator attrgetter<import_stmt>datetime<import_stmt>numpy<as>np<import_from_stmt>..distance DistanceHypothesiser<import_from_stmt>...types.detection Detection<import_from_stmt>...types.state GaussianState<import_from_stmt>...types.track Track<import_from_stmt>... measures<def_stmt>test_mahalanobis predictor updater<block_start>timestamp=datetime.datetime.now()<line_sep>track=Track([GaussianState(np.array([[0]]) np.array([[1]]) timestamp)])<line_sep>detection1=Detection(np.array([[2]]))<line_sep>detection2=Detection(np.array([[3]]))<line_sep>detection3=Detection(np.array([[10]]))<line_sep>detections={detection1 detection2 detection3}<line_sep>measure=measures.Mahalanobis()<line_sep>hypothesiser=DistanceHypothesiser(predictor updater measure=measure missed_distance=3)<line_sep>hypotheses=hypothesiser.hypothesise(track detections timestamp)<line_sep># There are 3 hypotheses - Detection 1, Detection 2, Missed Detection <assert_stmt>len(hypotheses)<eq>3<line_sep># And not detection3 <assert_stmt>detection3<not><in>{hypothesis.measurement<for>hypothesis hypotheses}<line_sep># There is a missed detection hypothesis <assert_stmt>any(<not>hypothesis.measurement<for>hypothesis hypotheses)<line_sep># Each hypothesis has a distance attribute <assert_stmt>all(hypothesis.distance<ge>0<for>hypothesis hypotheses)<line_sep># The hypotheses are sorted correctly <assert_stmt>min(hypotheses key=attrgetter('distance'))<is>hypotheses[0]<block_end><def_stmt>test_distance_include_all predictor updater<block_start>timestamp=datetime.datetime.now()<line_sep>track=Track([GaussianState(np.array([[0]]) np.array([[1]]) timestamp)])<line_sep>detection1=Detection(np.array([[2]]))<line_sep>detection2=Detection(np.array([[3]]))<line_sep>detection3=Detection(np.array([[10]]))<line_sep>detections={detection1 detection2 detection3}<line_sep>measure=measures.Mahalanobis()<line_sep>hypothesiser=DistanceHypothesiser(predictor updater measure=measure missed_distance=1 include_all=<true>)<line_sep>hypotheses=hypothesiser.hypothesise(track detections timestamp)<line_sep># There are 4 hypotheses - Detections and Missed Detection <assert_stmt>len(hypotheses)<eq>4<line_sep># detection3 is beyond missed distance and largest distance (last # hypothesis in list) last_hypothesis=hypotheses[-1]<assert_stmt>last_hypothesis.measurement<is>detection3<assert_stmt>last_hypothesis.distance<g>hypothesiser.missed_distance<block_end>
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for nccl ops. See also the cc test for nccl_communicator."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_from_stmt>tensorflow.contrib nccl<import_from_stmt>tensorflow.python.framework ops<import_from_stmt>tensorflow.python.ops array_ops<import_from_stmt>tensorflow.python.platform test<class_stmt>AllReduceTest(test.TestCase)<block_start><def_stmt>testAllReduce self<block_start><if_stmt><not>test.is_gpu_available()<block_start><return><block_end># Test requires access to a GPU <for_stmt>dtype [np.float32 np.int32 np.int64 np.float64]# Create session inside outer loop to test use of # same communicator across multiple sessions. <block_start><with_stmt>self.test_session(use_gpu=<true>)<as>sess<block_start>self._testSingleAllReduce(sess dtype nccl.all_sum <lambda>x y:x+y)<line_sep>self._testSingleAllReduce(sess dtype nccl.all_prod <lambda>x y:x<times>y)<line_sep>self._testSingleAllReduce(sess dtype nccl.all_min np.minimum)<line_sep>self._testSingleAllReduce(sess dtype nccl.all_max np.maximum)<block_end><block_end><block_end><def_stmt>_testSingleAllReduce self sess np_type nccl_fn numpy_accumulation_fn<block_start><for_stmt>devices [['/gpu:0' '/gpu:0' '/gpu:0'] ['/gpu:0' '/gpu:0']]<block_start>shape=(3 4)<line_sep>np_ans=<none><line_sep>tensors=[]<for_stmt>d devices<block_start><with_stmt>ops.device(d)<block_start>t=((np.random.random_sample(shape)-.5)<times>1024).astype(np_type)<if_stmt>np_ans<is><none><block_start>np_ans=t<block_end><else_stmt><block_start>np_ans=numpy_accumulation_fn(np_ans t)<block_end>tensors.append(array_ops.identity(t))<block_end><block_end>all_reduce_tensors=nccl_fn(tensors)<line_sep># Test shape inference. <for_stmt>r all_reduce_tensors<block_start>self.assertEqual(shape r.get_shape())<block_end># Test execution and results. nccl_results=sess.run(all_reduce_tensors)<for_stmt>r nccl_results<block_start>self.assertAllClose(r np_ans)<block_end><block_end><block_end><def_stmt>testErrors self<block_start><with_stmt>self.assertRaisesRegexp(ValueError 'Device assignment required')<block_start>nccl.all_sum([array_ops.identity(np.random.random_sample((3 4)))])<block_end><with_stmt>self.assertRaisesRegexp(ValueError 'Must pass >0 tensors')<block_start>nccl.all_sum([])<block_end><block_end><block_end><class_stmt>BroadcastTest(test.TestCase)<block_start><def_stmt>testBroadcast self<block_start><if_stmt><not>test.is_gpu_available()<block_start><return><block_end># Test requires access to a GPU <for_stmt>dtype [np.float32 np.int32 np.int64 np.float64]# Create session inside outer loop to test use of # same communicator across multiple sessions. <block_start><with_stmt>self.test_session(use_gpu=<true>)<as>sess<block_start><for_stmt>devices [['/gpu:0' '/gpu:0' '/gpu:0'] ['/gpu:0' '/gpu:0']]<block_start>shape=(3 4)<line_sep>sender=np.random.randint(0 len(devices)-1)<with_stmt>ops.device(devices[sender])<block_start>np_ans=(((np.random.random_sample(shape)-.5)<times>1024).astype(dtype))<line_sep>t=array_ops.identity(np_ans)<block_end>other_devices=devices[:sender]+devices[sender+1:]<line_sep>send_op,received_tensors=nccl.broadcast(t other_devices)<line_sep># Verify shape inference. <for_stmt>r received_tensors<block_start>self.assertEqual(shape r.get_shape())<block_end># Run and verify results. nccl_results=sess.run(received_tensors+[send_op])<for_stmt>r nccl_results[:-1]<block_start>self.assertAllClose(r np_ans)<block_end><block_end><block_end><block_end><block_end><block_end><class_stmt>CombinedTest(test.TestCase)<block_start>"""Tests using a mix of all-reduce ops in one session.run call."""<def_stmt>testCombined self<block_start><if_stmt><not>test.is_gpu_available()<block_start><return><block_end># Test requires access to a GPU <for_stmt>dtype [np.float32 np.int32 np.int64 np.float64]# Create session inside outer loop to test use of # same communicator across multiple sessions. <block_start><with_stmt>self.test_session(use_gpu=<true>)<as>sess<block_start><for_stmt>devices [['/gpu:0' '/gpu:0' '/gpu:0'] ['/gpu:0' '/gpu:0']]<block_start>shape=(3 4)<line_sep># all-reduce np_ans=np.zeros(shape=shape dtype=dtype)<line_sep>tensors=[]<for_stmt>d devices<block_start><with_stmt>ops.device(d)<block_start>t=((np.random.random_sample(shape)-.5)<times>1024).astype(dtype)<line_sep>np_ans<augadd>t<line_sep>tensors.append(array_ops.identity(t))<block_end><block_end>all_reduce_tensors=nccl.all_sum(tensors)<line_sep>sender=np.random.randint(0 len(devices)-1)<line_sep>other_devices=devices[:sender]+devices[sender+1:]<line_sep>send_op,received_tensors=nccl.broadcast(all_reduce_tensors[sender] other_devices)<line_sep># sender doesn't need to be fetched as part of outputs of session.run. <del_stmt>all_reduce_tensors[sender]<line_sep># Verify shape inference. <for_stmt>r received_tensors<block_start>self.assertEqual(shape r.get_shape())<block_end># Run and verify results. nccl_results=sess.run(received_tensors+[send_op]+all_reduce_tensors)<for_stmt>r nccl_results[:len(received_tensors)]<block_start>self.assertAllClose(r np_ans)<block_end><block_end><block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test.main()<block_end>
"""Sample agents that implements the process method, listen to ping messages and sends them back."""<import_stmt>datetime<import_stmt>logging<import_from_stmt>ostorlab.agent agent message<as>agent_message<line_sep>logger=logging.getLogger(__name__)<class_stmt>ProcessTestAgent(agent.Agent)<block_start>"""Custom agent implementation."""<line_sep>message=<none><def_stmt>process self message:agent_message.Message<arrow><none><block_start>"""Receives ping messages and sends new ones. Args: message: message from bus Returns: None """<line_sep>logger.info('received message')<line_sep>self.message=message<line_sep>self.emit('v3.healthcheck.ping' {'body':f'from test agent at {datetime.datetime.now()}'})<block_end><block_end># process_agent = ProcessTestAgent( # definitions.AgentDefinition(name='process_test_agent', in_selectors=['v3.healthcheck.ping'], # out_selectors=['v3.healthcheck.ping']), # definitions.AgentInstanceSettings( # bus_url='amqp://guest:guest@localhost:5672/', bus_exchange_topic='ostorlab_test', healthcheck_port=5302)) # # process_agent.run() ProcessTestAgent.main()<line_sep>
""" Tests for the print_name.py """<import_stmt>unittest<import_stmt>pybamm<class_stmt>TestPrintName(unittest.TestCase)<block_start><def_stmt>test_prettify_print_name self<block_start>param=pybamm.LithiumIonParameters()<line_sep>param1=pybamm.standard_variables<line_sep>param2=pybamm.LeadAcidParameters()<line_sep># Test PRINT_NAME_OVERRIDES self.assertEqual(param.timescale.print_name r"\tau")<line_sep># Test superscripts self.assertEqual(param.U_n_ref.print_name r"U_{n}^{ref}")<line_sep># Test subscripts self.assertEqual(param.a_R_p.print_name r"a_{R\,p}")<line_sep># Test dim and dimensional self.assertEqual(param.j0_n_ref_dimensional.print_name r"\hat{j0}_{n}^{ref}")<line_sep>self.assertEqual(param.C_dl_n_dimensional.print_name r"\hat{C}_{dl\,n}")<line_sep># Test bar self.assertEqual(param1.c_s_n_xav.print_name r"\bar{c}_{s\,n}")<line_sep># Test greek letters self.assertEqual(param2.delta.print_name r"\delta")<line_sep># Test new_copy() x_n=pybamm.standard_spatial_vars.x_n<line_sep>a_n=param2.a_n(x_n)<line_sep>a_n.new_copy()<line_sep># Test eps self.assertEqual(param1.eps_n.print_name r"\epsilon_n")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print("Add -v for more debug output")<import_stmt>sys<if_stmt>"-v"<in>sys.argv<block_start>debug=<true><block_end>pybamm.settings.debug_mode=<true><line_sep>unittest.main()<block_end>
<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>glob glob<line_sep># Dicrease color <def_stmt>dic_color img<block_start>img<augfloordiv>63<line_sep>img=img<times>64+32<line_sep><return>img<block_end># Database <def_stmt>get_DB # get training image path <block_start>train=glob("dataset/train_*")<line_sep>train.sort()<line_sep># prepare database db=np.zeros((len(train) 13) dtype=np.int32)<line_sep># prepare path database pdb=[]<line_sep># each image <for_stmt>i,path enumerate(train)# read image <block_start>img=dic_color(cv2.imread(path))<line_sep>#get histogram <for_stmt>j range(4)<block_start>db[i j]=len(np.where(img[<ellipsis> 0]<eq>(64<times>j+32))[0])<line_sep>db[i j+4]=len(np.where(img[<ellipsis> 1]<eq>(64<times>j+32))[0])<line_sep>db[i j+8]=len(np.where(img[<ellipsis> 2]<eq>(64<times>j+32))[0])<block_end># get class <if_stmt>'akahara'<in>path<block_start>cls=0<block_end><elif_stmt>'madara'<in>path<block_start>cls=1<block_end># store class label db[i -1]=cls<line_sep># store image path pdb.append(path)<block_end><return>db pdb<block_end># test <def_stmt>test_DB db pdb# get test image path <block_start>test=glob("dataset/test_*")<line_sep>test.sort()<line_sep>success_num=0.<line_sep># each image <for_stmt>path test# read image <block_start>img=dic_color(cv2.imread(path))<line_sep># get histogram hist=np.zeros(12 dtype=np.int32)<for_stmt>j range(4)<block_start>hist[j]=len(np.where(img[<ellipsis> 0]<eq>(64<times>j+32))[0])<line_sep>hist[j+4]=len(np.where(img[<ellipsis> 1]<eq>(64<times>j+32))[0])<line_sep>hist[j+8]=len(np.where(img[<ellipsis> 2]<eq>(64<times>j+32))[0])<block_end># get histogram difference difs=np.abs(db[: :12]-hist)<line_sep>difs=np.sum(difs axis=1)<line_sep># get argmin of difference pred_i=np.argmin(difs)<line_sep># get prediction label pred=db[pred_i -1]<if_stmt>pred<eq>0<block_start>pl="akahara"<block_end><elif_stmt>pred<eq>1<block_start>pl="madara"<block_end>print(path "is similar >>" pdb[pred_i] " Pred >>" pl)<block_end><block_end>db,pdb=get_DB()<line_sep>test_DB(db pdb)<line_sep>
<import_from_stmt>typing List Dict Any# noqa: F401 <import_from_stmt>vint.linting.formatter.formatter Formatter<class_stmt>StatisticFormatter(Formatter)<block_start><def_stmt>format_violations self violations# type: (List[Dict[str, Any]]) -> str <block_start>violations_count=len(violations)<line_sep>output=super(StatisticFormatter self).format_violations(violations)+'\n'<line_sep><return>output+'Total violations: {count}'.format(count=violations_count)<block_end><block_end>
#encoding:utf-8 # Write here subreddit name. Like this one for /r/BigAnimeTiddies. subreddit='BigAnimeTiddies'<line_sep># This is for your public telegram channel. t_channel='@r_BigAnimeTiddies'<def_stmt>send_post submission r2t<block_start><return>r2t.send_simple(submission)<block_end>
<import_stmt>json<import_from_stmt>collections OrderedDict<import_from_stmt>starlette.testclient TestClient<import_from_stmt>src.fastapi_quickcrud sqlalchemy_to_pydantic<import_from_stmt>src.fastapi_quickcrud.crud_router crud_router_builder<import_from_stmt>src.fastapi_quickcrud.misc.type CrudMethods<import_from_stmt>tests.test_implementations.test_sqlalchemy.api_test get_transaction_session app UntitledTable256<line_sep>UntitledTable256Model=sqlalchemy_to_pydantic(UntitledTable256 crud_methods=[CrudMethods.UPSERT_ONE] exclude_columns=['bytea_value' 'xml_value' 'box_valaue'])<line_sep>test_create_one=crud_router_builder(db_session=get_transaction_session db_model=UntitledTable256 crud_models=UntitledTable256Model prefix="/test_creation_one" tags=["test"])<line_sep>UntitledTable256Model=sqlalchemy_to_pydantic(UntitledTable256 crud_methods=[CrudMethods.UPSERT_MANY ] exclude_columns=['bytea_value' 'xml_value' 'box_valaue'])<line_sep>test_create_many=crud_router_builder(db_session=get_transaction_session db_model=UntitledTable256 crud_models=UntitledTable256Model prefix="/test_creation_many" tags=["test"])<line_sep># Response Mode Test # response_many = create_many_response_model['__root__'].sub_fields[0].outer_type_.__dict__['__fields__'] # for k, v in response_many.items(): # assert not v.required UntitledTable256Model=sqlalchemy_to_pydantic(UntitledTable256 crud_methods=[CrudMethods.POST_REDIRECT_GET] exclude_columns=['bytea_value' 'xml_value' 'box_valaue'])<line_sep># Model Test # api_model = UntitledTable256Model.__dict__['POST'] # assert api_model # post_redirect_get_model = api_model[CrudMethods.POST_REDIRECT_GET].__dict__ # assert post_redirect_get_model['requestModel'] or post_redirect_get_model['responseModel'] # post_redirect_get_request_model = deepcopy(post_redirect_get_model['requestModel'].__dict__['__fields__']) # post_redirect_get_response_model = deepcopy(post_redirect_get_model['responseModel'].__dict__['__fields__']) # Request Model Test # for k, v in post_redirect_get_request_model.items(): # sql_schema = UntitledTable256.__dict__[v.name].comparator # # if sql_schema.server_default or sql_schema.default: # assert not v.required # elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default: # assert not v.required # elif sql_schema.nullable: # assert not v.required # elif not sql_schema.nullable: # assert v.required # elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default: # assert v.required # else: # print(f"{v.name=}") # print(f"{v.required=}") # print(f"{v.default=}") # Response Model Test # for k, v in post_redirect_get_response_model.items(): # sql_schema = UntitledTable256.__dict__[v.name].comparator # # if sql_schema.server_default or sql_schema.default: # assert not v.required # elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default: # assert not v.required # elif sql_schema.nullable: # assert not v.required # elif not sql_schema.nullable: # assert v.required # elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default: # assert v.required # else: # print(f"{v.name=}") # print(f"{v.required=}") # print(f"{v.default=}") # for k, v in post_redirect_get_response_model.items(): # assert v.required test_post_and_redirect_get=crud_router_builder(db_session=get_transaction_session db_model=UntitledTable256 crud_models=UntitledTable256Model prefix="/test_post_direct_get" tags=["test"])<line_sep>UntitledTable256Model=sqlalchemy_to_pydantic(UntitledTable256 crud_methods=[CrudMethods.FIND_ONE] exclude_columns=['bytea_value' 'xml_value' 'box_valaue'])<line_sep># # # Model Test # api_model = UntitledTable256Model.__dict__['GET'] # assert api_model # get_one_model = api_model[CrudMethods.FIND_ONE].__dict__ # assert get_one_model['requestModel'] or get_one_model['responseModel'] # get_one_request_model = deepcopy(get_one_model['requestModel'].__dict__['__fields__']) # get_one_response_model = deepcopy(get_one_model['responseModel'].__dict__['__fields__']) # primary_key_of_get_sql_schema = get_one_request_model[UntitledTable256.__dict__['primary_key_of_table']] # assert not primary_key_of_get_sql_schema.required # get_one_request_model.pop(UntitledTable256.__dict__['primary_key_of_table'], None) # for k, v in get_one_request_model.items(): # assert not v.required # # FIXME some thing may not require # for k, v in get_one_response_model.items(): # sql_schema = UntitledTable256.__dict__[v.name].comparator # # if sql_schema.server_default or sql_schema.default: # assert not v.required # elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default: # assert not v.required # elif sql_schema.nullable: # assert not v.required # elif not sql_schema.nullable: # assert v.required # elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default: # assert v.required # else: # print(f"{v.name=}") # print(f"{v.required=}") # print(f"{v.default=}") test_get_data=crud_router_builder(db_session=get_transaction_session db_model=UntitledTable256 crud_models=UntitledTable256Model prefix="/test" tags=["test"])<line_sep>UntitledTable256Model=sqlalchemy_to_pydantic(UntitledTable256 crud_methods=[CrudMethods.DELETE_MANY] exclude_columns=['bytea_value' 'xml_value' 'box_valaue'])<line_sep># # # Model Test # api_model = UntitledTable256Model.__dict__['GET'] # assert api_model # get_one_model = api_model[CrudMethods.FIND_ONE].__dict__ # assert get_one_model['requestModel'] or get_one_model['responseModel'] # get_one_request_model = deepcopy(get_one_model['requestModel'].__dict__['__fields__']) # get_one_response_model = deepcopy(get_one_model['responseModel'].__dict__['__fields__']) # primary_key_of_get_sql_schema = get_one_request_model[UntitledTable256.__dict__['primary_key_of_table']] # assert not primary_key_of_get_sql_schema.required # get_one_request_model.pop(UntitledTable256.__dict__['primary_key_of_table'], None) # for k, v in get_one_request_model.items(): # assert not v.required # # FIXME some thing may not require # for k, v in get_one_response_model.items(): # sql_schema = UntitledTable256.__dict__[v.name].comparator # # if sql_schema.server_default or sql_schema.default: # assert not v.required # elif not sql_schema.nullable and sql_schema.server_default or sql_schema.default: # assert not v.required # elif sql_schema.nullable: # assert not v.required # elif not sql_schema.nullable: # assert v.required # elif not sql_schema.nullable and not sql_schema.server_default or not sql_schema.default: # assert v.required # else: # print(f"{v.name=}") # print(f"{v.required=}") # print(f"{v.default=}") test_delete_data=crud_router_builder(db_session=get_transaction_session db_model=UntitledTable256 crud_models=UntitledTable256Model prefix="/test_delete_many" tags=["test"])<line_sep>[app.include_router(i)<for>i [test_post_and_redirect_get test_delete_data test_create_one test_create_many test_get_data]]<line_sep>client=TestClient(app)<line_sep>primary_key_name=UntitledTable256.primary_key_of_table<line_sep>unique_fields=UntitledTable256.unique_fields<def_stmt>test_create_many_and_delete_many <block_start>headers={'accept':'application/json' 'Content-Type':'application/json' }<line_sep>data={"insert":[{"bool_value":<true> "char_value":"string" "date_value":"2021-07-24" "float4_value":0 "float8_value":0 "int2_value":0 "int4_value":0 "int8_value":0 "interval_value":0 "json_value":{} "jsonb_value":{} "numeric_value":0 "text_value":"string" "timestamp_value":"2021-07-24T02:54:53.285Z" "timestamptz_value":"2021-07-24T02:54:53.285Z" "uuid_value":"3fa85f64-5717-4562-b3fc-2c963f66afa6" "varchar_value":"string" "array_value":[0] "array_str__value":["string"] "time_value":"18:18:18" "timetz_value":"18:18:18+00:00"} {"bool_value":<true> "char_value":"string" "date_value":"2021-07-24" "float4_value":0 "float8_value":0 "int2_value":0 "int4_value":0 "int8_value":0 "interval_value":0 "json_value":{} "jsonb_value":{} "numeric_value":0 "text_value":"string" "time_value":"18:18:18" "timestamp_value":"2021-07-24T02:54:53.285Z" "timestamptz_value":"2021-07-24T02:54:53.285Z" "uuid_value":"3fa85f64-5717-4562-b3fc-2c963f66afa6" "varchar_value":"string" "array_value":[0] "array_str__value":["string"] "timetz_value":"18:18:18+00:00"} {"bool_value":<true> "char_value":"string" "date_value":"2021-07-24" "float4_value":0 "float8_value":0 "int2_value":0 "int4_value":0 "int8_value":0 "interval_value":0 "json_value":{} "jsonb_value":{} "numeric_value":0 "text_value":"string" "timestamp_value":"2021-07-24T02:54:53.285Z" "timestamptz_value":"2021-07-24T02:54:53.285Z" "uuid_value":"3fa85f64-5717-4562-b3fc-2c963f66afa6" "varchar_value":"string" "array_value":[0] "array_str__value":["string"] "time_value":"18:18:18" "timetz_value":"18:18:18+00:00"} ]}<line_sep>response=client.post('/test_creation_many' headers=headers data=json.dumps(data))<assert_stmt>response.status_code<eq>201<line_sep>insert_response_data=response.json()<line_sep>primary_key_list=[i[primary_key_name]<for>i insert_response_data]<line_sep>min_key=min(primary_key_list)<line_sep>max_key=max(primary_key_list)<line_sep>params={"primary_key____from":min_key "primary_key____to":max_key "bool_value____list":<true> "char_value____str":'string%' "char_value____str_____matching_pattern":'case_sensitive' "date_value____from":"2021-07-22" "date_value____to":"2021-07-25" "float4_value____from":-1 "float4_value____to":2 "float4_value____list":0 "float8_value____from":-1 "float8_value____to":2 "float8_value____list":0 "int2_value____from":-1 "int2_value____to":9 "int2_value____list":0 "int4_value____from":-1 "int4_value____to":9 "int4_value____list":0 "int8_value____from":-1 "int8_value____to":9 "int8_value____list":0 "interval_value____from":-1 "interval_value____to":9 "interval_value____list":0 "numeric_value____from":-1 "numeric_value____to":9 "numeric_value____list":0 "text_value____list":"string" "time_value____from":'18:18:18' "time_value____to":'18:18:18' "time_value____list":'18:18:18' "timestamp_value_value____from":"2021-07-24T02:54:53.285" "timestamp_value_value____to":"2021-07-24T02:54:53.285" "timestamp_value_value____list":"2021-07-24T02:54:53.285" "timestamptz_value_value____from":"2021-07-24T02:54:53.285Z" "timestamptz_value_value____to":"2021-07-24T02:54:53.285Z" "timestamptz_value_value____list":"2021-07-24T02:54:53.285Z" "uuid_value_value____list":"3fa85f64-5717-4562-b3fc-2c963f66afa6" "time_value____from":'18:18:18+00:00' "time_value____to":'18:18:18+00:00' "time_value____list":'18:18:18+00:00' "varchar_value____str":'string' "varchar_value____str_____matching_pattern":'case_sensitive' "varchar_value____list":'string' }<import_from_stmt>urllib.parse urlencode<line_sep>query_string=urlencode(OrderedDict(**params))<line_sep>response=client.delete(f'/test_delete_many?{query_string}')<assert_stmt>response.status_code<eq>200<assert_stmt>response.headers['x-total-count']<eq>'3'<block_end><def_stmt>test_create_many_and_delete_many_but_not_found <block_start>headers={'accept':'application/json' 'Content-Type':'application/json' }<line_sep>data={"insert":[{"bool_value":<true> "char_value":"string" "date_value":"2021-07-24" "float4_value":0 "float8_value":0 "int2_value":0 "int4_value":0 "int8_value":0 "interval_value":0 "json_value":{} "jsonb_value":{} "numeric_value":0 "text_value":"string" "timestamp_value":"2021-07-24T02:54:53.285" "timestamptz_value":"2021-07-24T02:54:53.285Z" "uuid_value":"3fa85f64-5717-4562-b3fc-2c963f66afa6" "varchar_value":"string" "array_value":[0] "array_str__value":["string"] "time_value":"18:18:18" "timetz_value":"18:18:18+00:00"} {"bool_value":<true> "char_value":"string" "date_value":"2021-07-24" "float4_value":0 "float8_value":0 "int2_value":0 "int4_value":0 "int8_value":0 "interval_value":0 "json_value":{} "jsonb_value":{} "numeric_value":0 "text_value":"string" "time_value":"18:18:18" "timestamp_value":"2021-07-24T02:54:53.285" "timestamptz_value":"2021-07-24T02:54:53.285Z" "uuid_value":"3fa85f64-5717-4562-b3fc-2c963f66afa6" "varchar_value":"string" "array_value":[0] "array_str__value":["string"] "timetz_value":"18:18:18+00:00"} {"bool_value":<true> "char_value":"string" "date_value":"2021-07-24" "float4_value":0 "float8_value":0 "int2_value":0 "int4_value":0 "int8_value":0 "interval_value":0 "json_value":{} "jsonb_value":{} "numeric_value":0 "text_value":"string" "timestamp_value":"2021-07-24T02:54:53.285" "timestamptz_value":"2021-07-24T02:54:53.285Z" "uuid_value":"3fa85f64-5717-4562-b3fc-2c963f66afa6" "varchar_value":"string" "array_value":[0] "array_str__value":["string"] "time_value":"18:18:18" "timetz_value":"18:18:18+00:00"} ]}<line_sep>response=client.post('/test_creation_many' headers=headers data=json.dumps(data))<assert_stmt>response.status_code<eq>201<line_sep>insert_response_data=response.json()<line_sep>primary_key_list=[i[primary_key_name]<for>i insert_response_data]<line_sep>min_key=min(primary_key_list)<line_sep>max_key=max(primary_key_list)<line_sep>params={"primary_key____from":min_key "primary_key____to":max_key "bool_value____list":<true> "char_value____str":'string%' "char_value____str_____matching_pattern":'case_sensitive' "date_value____from":"2021-07-22" "date_value____to":"2021-07-25" "float4_value____from":-1 "float4_value____to":2 "float4_value____list":0 "float8_value____from":-1 "float8_value____to":2 "float8_value____list":0 "int2_value____from":-1 "int2_value____to":9 "int2_value____list":0 "int4_value____from":-1 "int4_value____to":9 "int4_value____list":0 "int8_value____from":-1 "int8_value____to":9 "int8_value____list":0 "interval_value____from":-1 "interval_value____to":9 "interval_value____list":0 "numeric_value____from":-1 "numeric_value____to":9 "numeric_value____list":0 "text_value____list":"string" "time_value____from":'10:18:18' "time_value____to":'12:18:18' "time_value____list":'12:18:18' "timestamp_value_value____from":"2021-07-24T02:54:53.285" "timestamp_value_value____to":"2021-07-24T02:54:53.285" "timestamp_value_value____list":"2021-07-24T02:54:53.285" "timestamptz_value_value____from":"2021-07-24T02:54:53.285Z" "timestamptz_value_value____to":"2021-07-24T02:54:53.285Z" "timestamptz_value_value____list":"2021-07-24T02:54:53.285Z" "uuid_value_value____list":"3fa85f64-5717-4562-b3fc-2c963f66afa6" "timez_value____from":'18:18:18+00:00' "timez_value____to":'18:18:18+00:00' "timez_value____list":'18:18:18+00:00' "varchar_value____str":'string' "varchar_value____str_____matching_pattern":'case_sensitive' "varchar_value____list":'string' }<import_from_stmt>urllib.parse urlencode<line_sep>query_string=urlencode(OrderedDict(**params))<line_sep>response=client.delete(f'/test_delete_many?{query_string}')<assert_stmt>response.status_code<eq>204<block_end>
<import_stmt>configparser<import_stmt>re<import_from_stmt>os path<try_stmt><block_start><import_stmt>light_the_torch<as>ltt<import_stmt>yaml<assert_stmt>ltt.__version__<ge>"0.2"<block_end><except_stmt>(ImportError AssertionError)<block_start>msg="Please install pyyaml and light-the-torch>=0.2 prior to running this."<line_sep><raise>RuntimeError(msg)<block_end>DEPS_SUBSTITUTION_PATTERN=re.compile(r"\{\[(?P<section>[a-zA-Z\-]+)\]deps\}")<def_stmt>main root="." file=path.join("docs" "requirements-rtd.txt") <block_start>python_version=extract_python_version_from_rtd_config(root)<line_sep>deps=extract_docs_deps_from_tox_config(root)<line_sep>deps.extend(find_pytorch_wheel_links(root python_version))<with_stmt>open(file "w")<as>fh<block_start>fh.write("\n".join(deps)+"\n")<block_end><block_end><def_stmt>extract_python_version_from_rtd_config root file=".readthedocs.yml"<block_start><with_stmt>open(path.join(root file))<as>fh<block_start>data=yaml.load(fh Loader=yaml.FullLoader)<block_end><return>str(data["python"]["version"])<block_end><def_stmt>extract_docs_deps_from_tox_config root file="tox.ini" section="docs-common"<block_start>config=configparser.ConfigParser()<line_sep>config.read(path.join(root file))<line_sep>deps=[]<line_sep>sections=[section]<for_stmt>section sections<block_start><for_stmt>dep config[section]["deps"].strip().split("\n")<block_start>match=DEPS_SUBSTITUTION_PATTERN.match(dep)<if_stmt>match<is><none><block_start>deps.append(dep)<block_end><else_stmt><block_start>sections.append(match.group("section"))<block_end><block_end><block_end><return>deps<block_end><def_stmt>find_pytorch_wheel_links root python_version computation_backend="cpu" platform="linux_x86_64" <block_start><return>ltt.find_links([root] computation_backend=computation_backend python_version=python_version platform=platform )<block_end><if_stmt>__name__<eq>"__main__"<block_start>project_root=path.abspath(path.join(path.dirname(__file__) ".."))<line_sep>main(project_root)<block_end>
<import_from_stmt>gryphon.execution.lib.exchange_color exchange_color<import_from_stmt>gryphon.lib.exchange.exchange_factory *<import_from_stmt>gryphon.lib.logger get_logger<import_from_stmt>gryphon.lib.models.exchange Balance<import_from_stmt>gryphon.lib session<line_sep>logger=get_logger(__name__)<def_stmt>balance_requests exchanges<block_start>balance_requests=[]<for_stmt>exchange exchanges<block_start>balance_requests.append(exchange.get_balance_req())<block_end><return>balance_requests<block_end><def_stmt>balance_responses exchanges balance_requests<block_start>""" This function uses environment variables to set a minimum balances for an exchange. Format:{{exchange.name}}_MINIMUM_USD Examples: BITSTAMP_MINIMUM_USD, CAVIRTEX_MINIMUM_BTC """<line_sep>balances={}<line_sep>balances['system']=Balance()<for_stmt>exchange exchanges<block_start>req=balance_requests.pop(0)<line_sep>balances[exchange.name]=exchange.get_balance_resp(req)<line_sep>balances['system']['USD']<augadd>balances[exchange.name].fiat().to('USD')<line_sep>balances['system']['BTC']<augadd>balances[exchange.name]['BTC']<block_end><return>balances<block_end><def_stmt>get_db_balances exchanges<block_start>db=session.get_a_trading_db_mysql_session()<line_sep>db_balances={}<line_sep>db_balances['system']=Balance()<try_stmt><block_start><for_stmt>exchange exchanges<block_start>exchange_data=exchange.exchange_account_db_object(db)<line_sep>db_balances[exchange.name]=exchange_data.balance<line_sep>db_balances['system']['USD']<augadd>db_balances[exchange.name].fiat().to('USD')<line_sep>db_balances['system']['BTC']<augadd>db_balances[exchange.name]['BTC']<block_end><block_end><finally_stmt><block_start>db.close()<block_end><return>db_balances<block_end><def_stmt>format_balances exchange_balances db_balances<block_start>output_string=u"\n{0:15} : {1:15} | {2:15} || {3:15} | {4:15}\n".format("Balances" "FIAT" "BTC" "dbFIAT" "dbBTC")<for_stmt>name,balance sorted(exchange_balances.iteritems())<block_start>db_balance=db_balances[name]<line_sep>chunk=u"{0:15} : {1:15} | {2:15.8f} || {3:15} | {4:15.8f}\n".format(name balance.fiat() balance['BTC'].amount db_balance.fiat() db_balance['BTC'].amount)<line_sep>chunk=exchange_color(chunk name)<line_sep>output_string<augadd>chunk<block_end><return>output_string<block_end><def_stmt>balance exchange_name<block_start><if_stmt>exchange_name<block_start>exchange=make_exchange_from_key(exchange_name)<line_sep>exchanges=[exchange]<block_end><else_stmt><block_start>exchanges=all_exchanges()<block_end>brs=balance_requests(exchanges)<line_sep>balances=balance_responses(exchanges brs)<line_sep>db_balances=get_db_balances(exchanges)<line_sep>print(format_balances(balances db_balances))<block_end>
# Copyright © 2020 <NAME> <<EMAIL>> # Licensed under Apache License 2.0 <http://www.apache.org/licenses/LICENSE-2.0> <import_from_stmt>.instrumentation PrometheusFastApiInstrumentator<line_sep>Instrumentator=PrometheusFastApiInstrumentator<line_sep>
""" Syndication feed generation library -- used for generating RSS, etc. Sample usage: >>> from django.utils import feedgenerator >>> feed = feedgenerator.Rss201rev2Feed( ... title="Poynter E-Media Tidbits", ... link="http://www.poynter.org/column.asp?id=31", ... description="A group Weblog by the sharpest minds in online media/journalism/publishing.", ... language="en", ... ) >>> feed.add_item( ... title="Hello", ... link="http://www.holovaty.com/test/", ... description="Testing." ... ) >>> with open('test.rss', 'w') as fp: ... feed.write(fp, 'utf-8') For definitions of the different versions of RSS, see: http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss """<import_from_future_stmt> unicode_literals<import_stmt>datetime<import_from_stmt>django.utils.xmlutils SimplerXMLGenerator<import_from_stmt>django.utils.encoding force_text iri_to_uri<import_from_stmt>django.utils datetime_safe<import_from_stmt>django.utils six<import_from_stmt>django.utils.six StringIO<import_from_stmt>django.utils.six.moves.urllib.parse urlparse<import_from_stmt>django.utils.timezone is_aware<def_stmt>rfc2822_date date# We can't use strftime() because it produces locale-dependent results, so # we have to map english month and day names manually <block_start>months=('Jan' 'Feb' 'Mar' 'Apr' 'May' 'Jun' 'Jul' 'Aug' 'Sep' 'Oct' 'Nov' 'Dec' )<line_sep>days=('Mon' 'Tue' 'Wed' 'Thu' 'Fri' 'Sat' 'Sun')<line_sep># Support datetime objects older than 1900 date=datetime_safe.new_datetime(date)<line_sep># We do this ourselves to be timezone aware, email.Utils is not tz aware. dow=days[date.weekday()]<line_sep>month=months[date.month-1]<line_sep>time_str=date.strftime('%s, %%d %s %%Y %%H:%%M:%%S '%(dow month))<if_stmt>six.PY2# strftime returns a byte string in Python 2 <block_start>time_str=time_str.decode('utf-8')<block_end><if_stmt>is_aware(date)<block_start>offset=date.tzinfo.utcoffset(date)<line_sep>timezone=(offset.days<times>24<times>60)+(offset.seconds<floordiv>60)<line_sep>hour,minute=divmod(timezone 60)<line_sep><return>time_str+'%+03d%02d'%(hour minute)<block_end><else_stmt><block_start><return>time_str+'-0000'<block_end><block_end><def_stmt>rfc3339_date date# Support datetime objects older than 1900 <block_start>date=datetime_safe.new_datetime(date)<line_sep>time_str=date.strftime('%Y-%m-%dT%H:%M:%S')<if_stmt>six.PY2# strftime returns a byte string in Python 2 <block_start>time_str=time_str.decode('utf-8')<block_end><if_stmt>is_aware(date)<block_start>offset=date.tzinfo.utcoffset(date)<line_sep>timezone=(offset.days<times>24<times>60)+(offset.seconds<floordiv>60)<line_sep>hour,minute=divmod(timezone 60)<line_sep><return>time_str+'%+03d:%02d'%(hour minute)<block_end><else_stmt><block_start><return>time_str+'Z'<block_end><block_end><def_stmt>get_tag_uri url date<block_start>""" Creates a TagURI. See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id """<line_sep>bits=urlparse(url)<line_sep>d=''<if_stmt>date<is><not><none><block_start>d=',%s'%datetime_safe.new_datetime(date).strftime('%Y-%m-%d')<block_end><return>'tag:%s%s:%s/%s'%(bits.hostname d bits.path bits.fragment)<block_end><class_stmt>SyndicationFeed(object)<block_start>"Base class for all syndication feeds. Subclasses should provide write()"<def_stmt>__init__ self title link description language=<none> author_email=<none> author_name=<none> author_link=<none> subtitle=<none> categories=<none> feed_url=<none> feed_copyright=<none> feed_guid=<none> ttl=<none> **kwargs<block_start>to_unicode=<lambda>s:force_text(s strings_only=<true>)<if_stmt>categories<block_start>categories=[force_text(c)<for>c categories]<block_end><if_stmt>ttl<is><not><none># Force ints to unicode <block_start>ttl=force_text(ttl)<block_end>self.feed={'title':to_unicode(title) 'link':iri_to_uri(link) 'description':to_unicode(description) 'language':to_unicode(language) 'author_email':to_unicode(author_email) 'author_name':to_unicode(author_name) 'author_link':iri_to_uri(author_link) 'subtitle':to_unicode(subtitle) 'categories':categories<or>() 'feed_url':iri_to_uri(feed_url) 'feed_copyright':to_unicode(feed_copyright) 'id':feed_guid<or>link 'ttl':ttl }<line_sep>self.feed.update(kwargs)<line_sep>self.items=[]<block_end><def_stmt>add_item self title link description author_email=<none> author_name=<none> author_link=<none> pubdate=<none> comments=<none> unique_id=<none> unique_id_is_permalink=<none> enclosure=<none> categories=() item_copyright=<none> ttl=<none> **kwargs<block_start>""" Adds an item to the feed. All args are expected to be Python Unicode objects except pubdate, which is a datetime.datetime object, and enclosure, which is an instance of the Enclosure class. """<line_sep>to_unicode=<lambda>s:force_text(s strings_only=<true>)<if_stmt>categories<block_start>categories=[to_unicode(c)<for>c categories]<block_end><if_stmt>ttl<is><not><none># Force ints to unicode <block_start>ttl=force_text(ttl)<block_end>item={'title':to_unicode(title) 'link':iri_to_uri(link) 'description':to_unicode(description) 'author_email':to_unicode(author_email) 'author_name':to_unicode(author_name) 'author_link':iri_to_uri(author_link) 'pubdate':pubdate 'comments':to_unicode(comments) 'unique_id':to_unicode(unique_id) 'unique_id_is_permalink':unique_id_is_permalink 'enclosure':enclosure 'categories':categories<or>() 'item_copyright':to_unicode(item_copyright) 'ttl':ttl }<line_sep>item.update(kwargs)<line_sep>self.items.append(item)<block_end><def_stmt>num_items self<block_start><return>len(self.items)<block_end><def_stmt>root_attributes self<block_start>""" Return extra attributes to place on the root (i.e. feed/channel) element. Called from write(). """<line_sep><return>{}<block_end><def_stmt>add_root_elements self handler<block_start>""" Add elements in the root (i.e. feed/channel) element. Called from write(). """<line_sep><pass><block_end><def_stmt>item_attributes self item<block_start>""" Return extra attributes to place on each item (i.e. item/entry) element. """<line_sep><return>{}<block_end><def_stmt>add_item_elements self handler item<block_start>""" Add elements on each item (i.e. item/entry) element. """<line_sep><pass><block_end><def_stmt>write self outfile encoding<block_start>""" Outputs the feed in the given encoding to outfile, which is a file-like object. Subclasses should override this. """<line_sep><raise>NotImplementedError<block_end><def_stmt>writeString self encoding<block_start>""" Returns the feed in the given encoding as a string. """<line_sep>s=StringIO()<line_sep>self.write(s encoding)<line_sep><return>s.getvalue()<block_end><def_stmt>latest_post_date self<block_start>""" Returns the latest item's pubdate. If none of them have a pubdate, this returns the current date/time. """<line_sep>updates=[i['pubdate']<for>i self.items<if>i['pubdate']<is><not><none>]<if_stmt>len(updates)<g>0<block_start>updates.sort()<line_sep><return>updates[-1]<block_end><else_stmt><block_start><return>datetime.datetime.now()<block_end><block_end><block_end><class_stmt>Enclosure(object)<block_start>"Represents an RSS enclosure"<def_stmt>__init__ self url length mime_type<block_start>"All args are expected to be Python Unicode objects"<line_sep>self.length,self.mime_type=length mime_type<line_sep>self.url=iri_to_uri(url)<block_end><block_end><class_stmt>RssFeed(SyndicationFeed)<block_start>mime_type='application/rss+xml; charset=utf-8'<def_stmt>write self outfile encoding<block_start>handler=SimplerXMLGenerator(outfile encoding)<line_sep>handler.startDocument()<line_sep>handler.startElement("rss" self.rss_attributes())<line_sep>handler.startElement("channel" self.root_attributes())<line_sep>self.add_root_elements(handler)<line_sep>self.write_items(handler)<line_sep>self.endChannelElement(handler)<line_sep>handler.endElement("rss")<block_end><def_stmt>rss_attributes self<block_start><return>{"version":self._version "xmlns:atom":"http://www.w3.org/2005/Atom"}<block_end><def_stmt>write_items self handler<block_start><for_stmt>item self.items<block_start>handler.startElement('item' self.item_attributes(item))<line_sep>self.add_item_elements(handler item)<line_sep>handler.endElement("item")<block_end><block_end><def_stmt>add_root_elements self handler<block_start>handler.addQuickElement("title" self.feed['title'])<line_sep>handler.addQuickElement("link" self.feed['link'])<line_sep>handler.addQuickElement("description" self.feed['description'])<if_stmt>self.feed['feed_url']<is><not><none><block_start>handler.addQuickElement("atom:link" <none> {"rel":"self" "href":self.feed['feed_url']})<block_end><if_stmt>self.feed['language']<is><not><none><block_start>handler.addQuickElement("language" self.feed['language'])<block_end><for_stmt>cat self.feed['categories']<block_start>handler.addQuickElement("category" cat)<block_end><if_stmt>self.feed['feed_copyright']<is><not><none><block_start>handler.addQuickElement("copyright" self.feed['feed_copyright'])<block_end>handler.addQuickElement("lastBuildDate" rfc2822_date(self.latest_post_date()))<if_stmt>self.feed['ttl']<is><not><none><block_start>handler.addQuickElement("ttl" self.feed['ttl'])<block_end><block_end><def_stmt>endChannelElement self handler<block_start>handler.endElement("channel")<block_end><block_end><class_stmt>RssUserland091Feed(RssFeed)<block_start>_version="0.91"<def_stmt>add_item_elements self handler item<block_start>handler.addQuickElement("title" item['title'])<line_sep>handler.addQuickElement("link" item['link'])<if_stmt>item['description']<is><not><none><block_start>handler.addQuickElement("description" item['description'])<block_end><block_end><block_end><class_stmt>Rss201rev2Feed(RssFeed)# Spec: http://blogs.law.harvard.edu/tech/rss <block_start>_version="2.0"<def_stmt>add_item_elements self handler item<block_start>handler.addQuickElement("title" item['title'])<line_sep>handler.addQuickElement("link" item['link'])<if_stmt>item['description']<is><not><none><block_start>handler.addQuickElement("description" item['description'])<block_end># Author information. <if_stmt>item["author_name"]<and>item["author_email"]<block_start>handler.addQuickElement("author" "%s (%s)"%(item['author_email'] item['author_name']))<block_end><elif_stmt>item["author_email"]<block_start>handler.addQuickElement("author" item["author_email"])<block_end><elif_stmt>item["author_name"]<block_start>handler.addQuickElement("dc:creator" item["author_name"] {"xmlns:dc":"http://purl.org/dc/elements/1.1/"})<block_end><if_stmt>item['pubdate']<is><not><none><block_start>handler.addQuickElement("pubDate" rfc2822_date(item['pubdate']))<block_end><if_stmt>item['comments']<is><not><none><block_start>handler.addQuickElement("comments" item['comments'])<block_end><if_stmt>item['unique_id']<is><not><none><block_start>guid_attrs={}<if_stmt>isinstance(item.get('unique_id_is_permalink') bool)<block_start>guid_attrs['isPermaLink']=str(item['unique_id_is_permalink']).lower()<block_end>handler.addQuickElement("guid" item['unique_id'] guid_attrs)<block_end><if_stmt>item['ttl']<is><not><none><block_start>handler.addQuickElement("ttl" item['ttl'])<block_end># Enclosure. <if_stmt>item['enclosure']<is><not><none><block_start>handler.addQuickElement("enclosure" '' {"url":item['enclosure'].url "length":item['enclosure'].length "type":item['enclosure'].mime_type})<block_end># Categories. <for_stmt>cat item['categories']<block_start>handler.addQuickElement("category" cat)<block_end><block_end><block_end><class_stmt>Atom1Feed(SyndicationFeed)# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html <block_start>mime_type='application/atom+xml; charset=utf-8'<line_sep>ns="http://www.w3.org/2005/Atom"<def_stmt>write self outfile encoding<block_start>handler=SimplerXMLGenerator(outfile encoding)<line_sep>handler.startDocument()<line_sep>handler.startElement('feed' self.root_attributes())<line_sep>self.add_root_elements(handler)<line_sep>self.write_items(handler)<line_sep>handler.endElement("feed")<block_end><def_stmt>root_attributes self<block_start><if_stmt>self.feed['language']<is><not><none><block_start><return>{"xmlns":self.ns "xml:lang":self.feed['language']}<block_end><else_stmt><block_start><return>{"xmlns":self.ns}<block_end><block_end><def_stmt>add_root_elements self handler<block_start>handler.addQuickElement("title" self.feed['title'])<line_sep>handler.addQuickElement("link" "" {"rel":"alternate" "href":self.feed['link']})<if_stmt>self.feed['feed_url']<is><not><none><block_start>handler.addQuickElement("link" "" {"rel":"self" "href":self.feed['feed_url']})<block_end>handler.addQuickElement("id" self.feed['id'])<line_sep>handler.addQuickElement("updated" rfc3339_date(self.latest_post_date()))<if_stmt>self.feed['author_name']<is><not><none><block_start>handler.startElement("author" {})<line_sep>handler.addQuickElement("name" self.feed['author_name'])<if_stmt>self.feed['author_email']<is><not><none><block_start>handler.addQuickElement("email" self.feed['author_email'])<block_end><if_stmt>self.feed['author_link']<is><not><none><block_start>handler.addQuickElement("uri" self.feed['author_link'])<block_end>handler.endElement("author")<block_end><if_stmt>self.feed['subtitle']<is><not><none><block_start>handler.addQuickElement("subtitle" self.feed['subtitle'])<block_end><for_stmt>cat self.feed['categories']<block_start>handler.addQuickElement("category" "" {"term":cat})<block_end><if_stmt>self.feed['feed_copyright']<is><not><none><block_start>handler.addQuickElement("rights" self.feed['feed_copyright'])<block_end><block_end><def_stmt>write_items self handler<block_start><for_stmt>item self.items<block_start>handler.startElement("entry" self.item_attributes(item))<line_sep>self.add_item_elements(handler item)<line_sep>handler.endElement("entry")<block_end><block_end><def_stmt>add_item_elements self handler item<block_start>handler.addQuickElement("title" item['title'])<line_sep>handler.addQuickElement("link" "" {"href":item['link'] "rel":"alternate"})<if_stmt>item['pubdate']<is><not><none><block_start>handler.addQuickElement("updated" rfc3339_date(item['pubdate']))<block_end># Author information. <if_stmt>item['author_name']<is><not><none><block_start>handler.startElement("author" {})<line_sep>handler.addQuickElement("name" item['author_name'])<if_stmt>item['author_email']<is><not><none><block_start>handler.addQuickElement("email" item['author_email'])<block_end><if_stmt>item['author_link']<is><not><none><block_start>handler.addQuickElement("uri" item['author_link'])<block_end>handler.endElement("author")<block_end># Unique ID. <if_stmt>item['unique_id']<is><not><none><block_start>unique_id=item['unique_id']<block_end><else_stmt><block_start>unique_id=get_tag_uri(item['link'] item['pubdate'])<block_end>handler.addQuickElement("id" unique_id)<line_sep># Summary. <if_stmt>item['description']<is><not><none><block_start>handler.addQuickElement("summary" item['description'] {"type":"html"})<block_end># Enclosure. <if_stmt>item['enclosure']<is><not><none><block_start>handler.addQuickElement("link" '' {"rel":"enclosure" "href":item['enclosure'].url "length":item['enclosure'].length "type":item['enclosure'].mime_type})<block_end># Categories. <for_stmt>cat item['categories']<block_start>handler.addQuickElement("category" "" {"term":cat})<block_end># Rights. <if_stmt>item['item_copyright']<is><not><none><block_start>handler.addQuickElement("rights" item['item_copyright'])<block_end><block_end><block_end># This isolates the decision of what the system default is, so calling code can # do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed". DefaultFeed=Rss201rev2Feed<line_sep>
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license, see LICENSE. """ Submodule for useful exceptions =============================== .. note:: not meant for user code in general, though possible. """<line_sep># Definition of handy colours for printing _default="\x1b[00m"<line_sep>_green="\x1b[01;32m"<line_sep>_red="\x1b[01;31m"<class_stmt>InvalidOperationError(Exception)<block_start>"""Exception class for meaningless operations."""<def_stmt>__init__ self *args **kwargs<block_start>Exception.__init__(self *args **kwargs)<block_end><def_stmt>__str__ self<block_start>"""String representation."""<line_sep><return>_red+self.message+_default<block_end><block_end><class_stmt>SkhepTypeError(Exception)<block_start>"""Exception class for non-instantiable classes."""<def_stmt>__init__ self name<block_start>Exception.__init__(self name)<line_sep>self.message="'{0}' is an abstract base class. Instantiate one of its subclasses instead.".format(name)<block_end><def_stmt>__str__ self<block_start>"""String representation."""<line_sep><return>_red+self.message+_default<block_end><block_end>
<import_from_stmt>._version get_versions# type: ignore __version__=get_versions()["version"]<del_stmt>get_versions<line_sep>
<import_stmt>logging<import_from_stmt>djmoney.money Money<import_from_stmt>import_export fields<import_from_stmt>ralph.settings DEFAULT_CURRENCY_CODE<line_sep>logger=logging.getLogger(__name__)<class_stmt>ThroughField(fields.Field)<block_start><def_stmt>__init__ self through_model through_from_field_name through_to_field_name attribute=<none> column_name=<none> widget=<none> readonly=<false><block_start>""" Field for through django model import/export Args: through_model: Django through model for M2M relation through_from_field_name: field name model that is currently imported through_to_field_name: field name the model which is added as ManyToMany attribute: string of either an instance attribute or callable off the object column_name: let you provide how this field is named in datasource. widget: defines widget that will be used to represent field data in export readonly: boolean value defines that if this field will be assigned to object during import """<line_sep>self.through_model=through_model<line_sep>self.through_from_field_name=through_from_field_name<line_sep>self.through_to_field_name=through_to_field_name<line_sep>super().__init__(attribute column_name widget readonly)<block_end><def_stmt>save self obj data<block_start><if_stmt><not>self.readonly<block_start>value=data.get(self.column_name)<line_sep>current=set(self.widget.clean(value))<line_sep># filter old assignments to obj by through_model old_objs=set([getattr(i self.through_to_field_name)<for>i self.through_model.objects.filter(**{self.through_from_field_name:obj}).select_related(self.through_to_field_name)])<line_sep>to_add=current-old_objs<line_sep>to_remove=old_objs-current<line_sep>to_add_list=[]<for_stmt>i to_add<block_start>logger.info('Adding %s to %s/%s assignments' i.pk self.through_model obj.pk)<line_sep>to_add_list.append(self.through_model(**{self.through_from_field_name:obj self.through_to_field_name:i}))<block_end><if_stmt>to_add_list<block_start>self.through_model.objects.bulk_create(to_add_list)<block_end><if_stmt>to_remove<block_start>logger.warning('Removing assignments from %s/%s: %s' self.through_model obj.pk [i.pk<for>i to_remove])<line_sep>self.through_model.objects.filter(**{self.through_from_field_name:obj '{}__in'.format(self.through_to_field_name):to_remove}).delete()<block_end><block_end><block_end><block_end><class_stmt>PriceField(fields.Field)<block_start><def_stmt>save self obj data<block_start>price=Money(data['price'] data.get('price_currency' DEFAULT_CURRENCY_CODE))<line_sep>setattr(obj 'price' price)<block_end><block_end>
# # Copyright (c) 2015, EURECOM (www.eurecom.fr) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are those # of the authors and should not be interpreted as representing official policies, # either expressed or implied, of the FreeBSD Project. <import_stmt>datetime<import_stmt>getopt<import_stmt>getpass<import_stmt>os<import_stmt>re<import_stmt>string<import_stmt>sys<line_sep>version="1.0.2"<line_sep>lines=""<line_sep>iesDefs={}<line_sep>ieofielist={}<line_sep>choicelist={}<line_sep>choiceiesDefs={}<line_sep>outdir='./'<line_sep>filenames=[]<line_sep>verbosity=0<line_sep>prefix=""<line_sep>FAIL='\033[91m'<line_sep>WARN='\033[93m'<line_sep>ENDC='\033[0m'<line_sep>fileprefix=""<line_sep>fileprefix_first_upper=""<def_stmt>printFail string<block_start>sys.stderr.write(FAIL+string+ENDC+"\n")<block_end><def_stmt>printWarning string<block_start>print(WARN+string+ENDC)<block_end><def_stmt>printDebug string<block_start><if_stmt>verbosity<g>0<block_start>print(string)<block_end><block_end><def_stmt>outputHeaderToFile f filename<block_start>now=datetime.datetime.now()<line_sep>f.write("""/* * Copyright (c) 2015, EURECOM (www.eurecom.fr) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are those * of the authors and should not be interpreted as representing official policies, * either expressed or implied, of the FreeBSD Project. */ """)<line_sep>f.write("/*******************************************************************************\n")<line_sep>f.write(" * This file had been created by asn1tostruct.py script v%s\n"%(version))<line_sep>f.write(" * Please do not modify this file but regenerate it via script.\n")<line_sep>f.write(" * Created on: %s by %s\n * from %s\n"%(str(now) getpass.getuser() filenames))<line_sep>f.write(" ******************************************************************************/\n")<block_end><def_stmt>lowerFirstCamelWord word<block_start>""" puts the first word in a CamelCase Word in lowercase. I.e. CustomerID becomes customerID, XMLInfoTest becomes xmlInfoTest """<line_sep>newstr=''<line_sep>swapped=word.swapcase()<line_sep>idx=0<line_sep># if it's all-caps, return an all-lowered version lowered=word.lower()<if_stmt>swapped<eq>lowered<block_start><return>lowered<block_end><for_stmt>c swapped<block_start><if_stmt>c<in>string.ascii_lowercase<block_start>newstr<augadd>c<line_sep>idx<augadd>1<block_end><else_stmt><block_start><break><block_end><block_end><if_stmt>idx<l>2<block_start>newstr<augadd>word[idx:]<block_end><else_stmt><block_start>newstr=newstr[:-1]+word[idx-1:]<block_end><return>newstr<block_end><def_stmt>usage <block_start>print("Python parser for asn1 v%s"%(version))<line_sep>print("Usage: python asn1tostruct.py [options]")<line_sep>print("Available options:")<line_sep>print("-d Enable script debug")<line_sep>print("-f [file] Input file to parse")<line_sep>print("-o [dir] Output files to given directory")<line_sep>print("-h Print this help and return")<block_end><try_stmt><block_start>opts,args=getopt.getopt(sys.argv[1:] "df:ho:" ["debug" "file" "help" "outdir"])<block_end><except_stmt>getopt.GetoptError<as>err# print help information and exit: <block_start>usage()<line_sep>sys.exit(2)<block_end><for_stmt>o,a opts<block_start><if_stmt>o<in>("-f" "--file")<block_start>filenames.append(a)<block_end><if_stmt>o<in>("-d" "--debug")<block_start>verbosity=1<block_end><if_stmt>o<in>("-o" "--outdir")<block_start>outdir=a<if_stmt>outdir.rfind('/')<ne>len(outdir)<block_start>outdir<augadd>'/'<block_end><block_end><if_stmt>o<in>("-h" "--help")<block_start>usage()<line_sep>sys.exit(2)<block_end><block_end><for_stmt>filename filenames<block_start>file=open(filename 'r')<for_stmt>line file# Removing any comment <block_start><if_stmt>line.find('--')<ge>0<block_start>line=line[:line.find('--')]<block_end># Removing any carriage return lines<augadd>re.sub('\r' '' line)<block_end><for_stmt>m re.findall(r'([a-zA-Z0-9-]+)\s*::=\s+SEQUENCE\s+\(\s*SIZE\s*\(\s*\d+\s*\.\.\s*[0-9a-zA-Z-]+\s*\)\s*\)\s*OF\s+[a-zA-Z-]+\s*\{\s*\{\s*([0-9a-zA-Z-]+)\s*\}\s*\}' lines re.MULTILINE)<block_start>ieofielist[m[0]]=m[1]<block_end><for_stmt>m re.findall(r'([a-zA-Z0-9-]+)\s*::=\s+E-RAB-IE-ContainerList\s*\{\s*\{\s*([a-zA-Z0-9-]+)\s*\}\s*\}' lines re.MULTILINE)<block_start>ieofielist[m[0]]=m[1]<block_end><for_stmt>m re.findall(r'([a-zA-Z0-9-]+)\s*::=\s+CHOICE\s*\{' lines re.MULTILINE)<block_start>choicelist[m]=m<block_end><for_stmt>i re.findall(r'([a-zA-Z0-9-]+)\s+([A-Z0-9-]+)\s*::=\s*\{\s+([\,\|\{\}\t\n\.{3}\ \-a-zA-Z0-9]+)\s+}\n' lines re.MULTILINE)<block_start>ies=[]<line_sep>maxLength=0<line_sep># TODO: handle extensions <if_stmt>i[1].find('EXTENSION')<ge>0<block_start><continue><block_end><if_stmt>fileprefix<eq>""<block_start>fileprefix=i[1][:i[1].find('-')].lower()<block_end><for_stmt>j re.findall(r'\s*\{\s*([a-zA-Z0-9-\ \t]+)\s*\}\s*[\|,]*' i[2] re.MULTILINE)<block_start><for_stmt>k re.findall(r'ID\s*([a-zA-Z0-9\-]+)\s*CRITICALITY\s*([a-zA-Z0-9\-]+)\s+[A-Z]+\s+([a-zA-Z0-9\-]+)\s*PRESENCE\s*([a-zA-Z0-9\-]+)' j re.MULTILINE)<block_start>printDebug("Got new ie for message "+i[0]+": "+str(k))<if_stmt>len(k[2])<g>maxLength<block_start>maxLength=len(k[2])<block_end>ies.append(k)<block_end><block_end><if_stmt>len(ies)<g>0<block_start>iesDefs[i[0]]={"length":maxLength "ies":ies}<block_end><else_stmt><block_start>printWarning("Didn't find any information element for message: "+i[0])<block_end><block_end><for_stmt>i re.findall(r'([a-zA-Z0-9-]+)\s*::=\s*CHOICE\s*\{\s+([\,\|\t\n\.{3}\ \-a-zA-Z0-9]+)\s+}\n' lines re.MULTILINE)<block_start>choiceies=[]<for_stmt>j re.findall(r'\s*([a-zA-Z0-9-\ \t\n]+)\s*[\|,]*' i[1] re.MULTILINE)<block_start><for_stmt>k re.findall(r'([a-zA-Z0-9\-]+)\s*([a-zA-Z0-9\-]+)' j re.MULTILINE)<block_start>printDebug("Got new ie for message "+i[0]+": "+str(k))<line_sep>choiceies.append(k)<block_end><block_end><if_stmt>len(choiceies)<g>0<block_start>choiceiesDefs[i[0]]={"ies":choiceies}<block_end><else_stmt><block_start>printWarning("Didn't find any information element for message: "+i[0])<block_end><block_end><block_end><if_stmt>len(iesDefs)<eq>0<block_start>printFail("No Information Element parsed, exiting")<line_sep>sys.exit(0)<block_end>fileprefix_first_upper=fileprefix[0].upper()+fileprefix[1:]<line_sep>f=open(outdir+fileprefix+'_ies_defs.h' 'w')<line_sep>outputHeaderToFile(f filename)<line_sep>f.write("#include \"%s_common.h\"\n\n"%(fileprefix))<line_sep>f.write("#ifndef %s_IES_DEFS_H_\n#define %s_IES_DEFS_H_\n\n"%(fileprefix.upper() fileprefix.upper()))<line_sep>f.write("/* Define the version of script used to generate this file */\n")<line_sep>f.write("#define %s_SCRIPT_VERSION (%s)\n\n"%(fileprefix.upper() re.sub('\.' '' version)))<for_stmt>key iesDefs<block_start><if_stmt>key<not><in>ieofielist.values()<block_start><continue><block_end><for_stmt>(i j) ieofielist.items()<block_start><if_stmt>j<eq>key<block_start><break><block_end><block_end>f.write("typedef struct %sIEs_s {\n"%(re.sub('-' '_' i)))<line_sep>f.write(" A_SEQUENCE_OF(struct %s_s) %s;\n"%(re.sub('IEs' '' re.sub('-' '_' ieofielist[i])) lowerFirstCamelWord(re.sub('IEs' '' re.sub('-' '_' ieofielist[i])))))<line_sep>f.write("} %sIEs_t;\n\n"%(re.sub('-' '_' i)))<block_end><for_stmt>key iesDefs<block_start>keyupperunderscore=re.sub('-' '_' key.upper())<line_sep>keylowerunderscore=re.sub('-' '_' key.lower())<line_sep>shift=0<if_stmt>len(iesDefs[key]["ies"])<eq>0<block_start><continue><block_end># Presence mask <for_stmt>ie iesDefs[key]["ies"]<block_start>ieupperunderscore=re.sub('-' '_' re.sub('id-' '' ie[0])).upper()<if_stmt>ie[3]<eq>"optional"<or>ie[3]<eq>"conditional"<block_start>f.write("#define {0:<{pad}} {1}\n".format("%s_%s_PRESENT"%(keyupperunderscore ieupperunderscore) "(1 << %d)"%shift pad=iesDefs[key]["length"]+len(keyupperunderscore)+9 ) )<line_sep>shift<augadd>1<block_end><block_end><if_stmt>(shift<g>0)<block_start>f.write("\n")<block_end>f.write("typedef struct %s_s {\n"%(re.sub('-' '_' key)))<if_stmt>(shift<g>0)<block_start>f.write(" {0:<{pad}} {1};\n".format("uint16_t" "presenceMask" pad=iesDefs[key]["length"]+2))<block_end><for_stmt>ie iesDefs[key]["ies"]<block_start>ieunderscore=re.sub('-' '_' ie[2])<line_sep>iename=re.sub('id-' '' ie[0])<line_sep>ienameunderscore=lowerFirstCamelWord(re.sub('-' '_' iename))<if_stmt>ie[2]<in>ieofielist<block_start>f.write(" %sIEs_t %s;"%(re.sub('-' '_' ie[2]) ienameunderscore))<block_end><else_stmt><block_start>f.write(" {0:<{pad}} {1};".format("%s_t"%ieunderscore ienameunderscore pad=iesDefs[key]["length"]+2))<block_end><if_stmt>ie[3]<eq>"optional"<block_start>f.write(" ///< Optional field")<block_end><elif_stmt>ie[3]<eq>"conditional"<block_start>f.write(" ///< Conditional field")<block_end>f.write("\n")<block_end>f.write("} %s_t;\n\n"%(re.sub('-' '_' key)))<block_end>f.write("typedef struct %s_message_s {\n"%(fileprefix))<line_sep>f.write(" %s_ProcedureCode_t procedureCode;\n"%(fileprefix_first_upper))<line_sep>f.write(" %s_Criticality_t criticality;\n"%(fileprefix_first_upper))<line_sep>f.write(" uint8_t direction;\n")<line_sep>f.write(" union {\n")<line_sep>messageList=list(iesDefs.keys())<line_sep>messageList.sort()<for_stmt>message messageList<block_start><if_stmt>message<in>ieofielist.values()<block_start><continue><block_end><if_stmt>len(iesDefs[message]["ies"])<eq>0<block_start><continue><block_end>f.write(" %s_t %s;\n"%(re.sub('-' '_' message) lowerFirstCamelWord(re.sub('-' '_' message))))<block_end>f.write(" } msg;\n")<line_sep>f.write("} %s_message;\n\n"%(fileprefix))<for_stmt>key iesDefs<block_start><if_stmt>key<in>ieofielist.values()<block_start><continue><block_end>structName=re.sub('ies' '' key)<line_sep>asn1cStruct=re.sub('-' '_' re.sub('IEs' '' re.sub('-IEs' '' key)))<line_sep>asn1cStruct=re.sub('Item' 'List' asn1cStruct)<line_sep>keylowerunderscore=re.sub('-' '_' key.lower())<line_sep>firstlower=re.sub('Item' 'List' re.sub('enb' 'eNB' lowerFirstCamelWord(asn1cStruct)))<line_sep>f.write("/** \\brief Decode function for %s ies.\n"%(key))<if_stmt>len(iesDefs[key]["ies"])<ne>0<block_start>f.write(" * \\param %s Pointer to ASN1 structure in which data will be stored\n"%(lowerFirstCamelWord(re.sub('-' '_' key))))<block_end>f.write(" * \\param any_p Pointer to the ANY value to decode.\n")<line_sep>f.write(" **/\n")<line_sep>f.write("int %s_decode_%s(\n"%(fileprefix keylowerunderscore))<if_stmt>len(iesDefs[key]["ies"])<ne>0<block_start>f.write(" %s_t *%s,\n"%(re.sub('-' '_' key) lowerFirstCamelWord(re.sub('-' '_' key))))<block_end>f.write(" ANY_t *any_p);\n\n")<if_stmt>len(iesDefs[key]["ies"])<eq>0<block_start><continue><block_end>f.write("/** \\brief Encode function for %s ies.\n"%(key))<line_sep>f.write(" * \\param %s Pointer to the ASN1 structure.\n"%(firstlower))<line_sep>f.write(" * \\param %s Pointer to the IES structure.\n"%(lowerFirstCamelWord(re.sub('-' '_' key))))<line_sep>f.write(" **/\n")<line_sep>f.write("int %s_encode_%s(\n"%(fileprefix re.sub('-' '_' structName.lower())))<line_sep>f.write(" %s_t *%s,\n"%(asn1cStruct firstlower))<line_sep>f.write(" %s_t *%s);\n\n"%(re.sub('-' '_' key) lowerFirstCamelWord(re.sub('-' '_' key))))<block_end><for_stmt>key iesDefs<block_start><if_stmt>key<not><in>ieofielist.values()<block_start><continue><block_end>asn1cStruct=re.sub('-' '_' re.sub('IEs' '' key))<line_sep>asn1cStruct=re.sub('Item' 'List' asn1cStruct)<line_sep>firstlower=re.sub('Item' 'List' re.sub('enb' 'eNB' lowerFirstCamelWord(asn1cStruct)))<line_sep>f.write("/** \\brief Encode function for %s ies.\n"%(key))<line_sep>f.write(" * \\param %s Pointer to the ASN1 structure.\n"%(firstlower))<line_sep>f.write(" * \\param %s Pointer to the IES structure.\n"%(lowerFirstCamelWord(re.sub('-' '_' key))))<line_sep>f.write(" **/\n")<line_sep>f.write("int %s_encode_%s(\n"%(fileprefix firstlower.lower()))<line_sep>f.write(" %s_t *%s,\n"%(asn1cStruct firstlower))<line_sep>f.write(" %sIEs_t *%sIEs);\n\n"%(asn1cStruct firstlower))<line_sep>f.write("/** \\brief Decode function for %s ies.\n"%(key))<line_sep>f.write(" * \\param any_p Pointer to the ANY value to decode.\n")<line_sep>f.write(" * \\param callback Callback function called when any_p is successfully decoded.\n")<line_sep>f.write(" **/\n")<line_sep>f.write("int %s_decode_%s(\n"%(fileprefix firstlower.lower()))<line_sep>f.write(" %sIEs_t *%sIEs,\n"%(asn1cStruct firstlower))<line_sep>f.write(" %s_t *%s);\n\n"%(asn1cStruct lowerFirstCamelWord(asn1cStruct)))<block_end><for_stmt>key choiceiesDefs<block_start><if_stmt>key<not><in>choicelist.values()<block_start><continue><block_end>keyname=re.sub('IEs' '' key)<line_sep>f.write("/** \\brief Decode function for %s ies.\n"%(key))<line_sep>f.write(" * \\param %s_p pointer to buffer to decode.\n"%(lowerFirstCamelWord(re.sub('-' '' keyname))))<line_sep>f.write(" * \\param %s pointer to store the value after decode.\n"%(lowerFirstCamelWord(re.sub('-' '' keyname))))<line_sep>f.write(" **/\n")<line_sep>f.write("int %s_decode_%s(\n"%(fileprefix re.sub('-' '_' keyname).lower()))<line_sep>f.write(" %s_t *%s,\n"%(re.sub('-' '_' keyname) lowerFirstCamelWord(re.sub('-' '' keyname))))<line_sep>f.write(" %s_t *%s_p);\n\n"%(re.sub('-' '_' keyname) lowerFirstCamelWord(re.sub('-' '' keyname))))<block_end><for_stmt>key iesDefs<block_start>asn1cStruct=re.sub('-' '_' re.sub('IEs' '' key))<line_sep>asn1cStruct=re.sub('Item' 'List' asn1cStruct)<line_sep>firstlower=re.sub('Item' 'List' re.sub('enb' 'eNB' lowerFirstCamelWord(asn1cStruct)))<if_stmt>key<in>ieofielist.values()<block_start>f.write("/** \\brief Display %s encapsulated IE using XER encoding.\n"%(asn1cStruct))<line_sep>f.write(" * \\param %s Pointer to the IES structure.\n"%(lowerFirstCamelWord(re.sub('-' '_' key))))<line_sep>f.write(" * \\param file File descriptor to write output.\n")<line_sep>f.write(" **/\n")<line_sep>f.write("asn_enc_rval_t %s_xer_print_%s(\n"%(fileprefix re.sub('item' 'list' firstlower.lower())))<line_sep>f.write(" asn_app_consume_bytes_f *cb,\n")<line_sep>f.write(" void *app_key,\n")<line_sep>f.write(" %sIEs_t *%sIEs);\n\n"%(re.sub('item' 'list' asn1cStruct) firstlower))<block_end><else_stmt><block_start>f.write("/** \\brief Display %s message using XER encoding.\n"%(asn1cStruct))<line_sep>f.write(" * \\param message_p Pointer to root message.\n")<line_sep>f.write(" * \\param file File descriptor to write output.\n")<line_sep>f.write(" **/\n")<line_sep>f.write("asn_enc_rval_t %s_xer_print_%s(\n"%(fileprefix firstlower.lower()))<line_sep>f.write(" asn_app_consume_bytes_f *cb,\n")<line_sep>f.write(" void *app_key,\n")<line_sep>f.write(" %s_message *message_p);\n\n"%(fileprefix))<block_end><block_end>f.write("int %s_xer__print2sp(const void *buffer, size_t size, void *app_key);\n\n"%(fileprefix.lower()))<line_sep>f.write("int %s_xer__print2fp(const void *buffer, size_t size, void *app_key);\n\n"%(fileprefix.lower()))<line_sep>f.write("extern size_t %s_string_total_size;\n\n"%(fileprefix.lower()))<for_stmt>key iesDefs<block_start><if_stmt>len(iesDefs[key]["ies"])<eq>0<block_start><continue><block_end>keyupperunderscore=re.sub('-' '_' key.upper())<line_sep>keylowerunderscore=re.sub('-' '_' key.lower())<line_sep>structName=re.sub('ies' '' key flags=re.IGNORECASE)<line_sep>f.write("int free_%s(\n"%(re.sub('-' '_' structName.lower())))<line_sep>f.write(" %s_t *%s);\n\n"%(prefix+re.sub('-' '_' key) lowerFirstCamelWord(re.sub('-' '_' key))))<block_end>f.write("#endif /* %s_IES_DEFS_H_ */\n\n"%(fileprefix.upper()))<line_sep># Generate Decode functions f=open(outdir+fileprefix+'_decoder.c' 'w')<line_sep>outputHeaderToFile(f filename)<line_sep>f.write("#include \"%s_common.h\"\n#include \"%s_ies_defs.h\"\n#include \"log.h\"\n\n"%(fileprefix fileprefix))<for_stmt>key iesDefs<block_start><if_stmt>key<in>ieofielist.values()<block_start><continue><block_end>structName=re.sub('ies' '' key)<line_sep>asn1cStruct=re.sub('-' '_' re.sub('IEs' '' key))<if_stmt>asn1cStruct.rfind('_')<eq>len(asn1cStruct)-1<block_start>asn1cStruct=asn1cStruct[:-1]<block_end>asn1cStruct=re.sub('Item' 'List' asn1cStruct)<line_sep>ielistname=re.sub('UE' 'ue' asn1cStruct)<line_sep>ielistnamefirstlower=ielistname[:1].lower()+ielistname[1:]<line_sep>asn1cStructfirstlower=asn1cStruct[:1].lower()+asn1cStruct[1:]<line_sep>keyName=re.sub('-' '_' key)<line_sep>keyupperunderscore=keyName.upper()<line_sep>firstlower=re.sub('Item' 'List' re.sub('enb' 'eNB' lowerFirstCamelWord(asn1cStruct)))<line_sep>iesaccess=""<if_stmt>key<not><in>ieofielist.values()<block_start>iesaccess="%s_ies."%(firstlower)<block_end>f.write("int %s_decode_%s(\n"%(fileprefix re.sub('-' '_' structName.lower())))<if_stmt>len(iesDefs[key]["ies"])<ne>0<block_start>f.write(" %s_t *%s,\n"%(re.sub('-' '_' key) lowerFirstCamelWord(re.sub('-' '_' key))))<block_end>f.write(" ANY_t *any_p) {\n\n")<line_sep>f.write(" %s_t %s;\n %s_t *%s_p = &%s;\n"%(asn1cStruct asn1cStructfirstlower asn1cStruct asn1cStructfirstlower asn1cStructfirstlower))<line_sep>f.write(" int i, decoded = 0;\n")<if_stmt>len(iesDefs[key]["ies"])<ne>0<block_start>f.write(" int tempDecoded = 0;\n")<block_end>f.write(" assert(any_p != NULL);\n")<if_stmt>len(iesDefs[key]["ies"])<ne>0<block_start>f.write(" assert(%s != NULL);\n\n"%(lowerFirstCamelWord(re.sub('-' '_' key))))<line_sep>f.write(" memset(%s, 0, sizeof(%s_t));\n"%(lowerFirstCamelWord(re.sub('-' '_' key)) prefix+re.sub('-' '_' key)))<block_end>f.write(" OAILOG_DEBUG (LOG_%s, \"Decoding message %s (%%s:%%d)\\n\", __FILE__, __LINE__);\n\n"%(fileprefix.upper() re.sub('-' '_' keyName)))<line_sep>f.write(" ANY_to_type_aper(any_p, &asn_DEF_%s, (void**)&%s_p);\n\n"%(asn1cStruct asn1cStructfirstlower))<line_sep>f.write(" for (i = 0; i < %s_p->%slist.count; i++) {\n"%(asn1cStructfirstlower iesaccess))<line_sep>f.write(" %s_IE_t *ie_p;\n"%(fileprefix[0].upper()+fileprefix[1:]))<line_sep>f.write(" ie_p = %s_p->%slist.array[i];\n"%(asn1cStructfirstlower iesaccess))<line_sep>f.write(" switch(ie_p->id) {\n")<for_stmt>ie iesDefs[key]["ies"]<block_start>iename=re.sub('id-' '' ie[0])<line_sep>ienameunderscore=lowerFirstCamelWord(re.sub('-' '_' iename))<line_sep>ienameunderscorefirstlower=lowerFirstCamelWord(ienameunderscore)<line_sep>ietypesubst=re.sub('-' '' ie[2])<line_sep>ietypeunderscore=re.sub('-' '_' ie[2])<line_sep>ieupperunderscore=re.sub('-' '_' re.sub('id-' '' ie[0])).upper()<if_stmt>ie[3]<eq>"optional"<block_start>f.write(" /* Optional field */\n")<block_end><elif_stmt>ie[3]<eq>"conditional"<block_start>f.write(" /* Conditional field */\n")<block_end>f.write(" case %s_ProtocolIE_ID_%s:\n"%(fileprefix_first_upper re.sub('-' '_' ie[0])))<line_sep>f.write(" {\n")<line_sep>f.write(" %s_t *%s_p = NULL;\n"%(ietypeunderscore lowerFirstCamelWord(ietypesubst)))<if_stmt>ie[3]<ne>"mandatory"<block_start>f.write(" %s->presenceMask |= %s_%s_PRESENT;\n"%(lowerFirstCamelWord(re.sub('-' '_' key)) keyupperunderscore ieupperunderscore))<block_end>f.write(" tempDecoded = ANY_to_type_aper(&ie_p->value, &asn_DEF_%s, (void**)&%s_p);\n"%(ietypeunderscore lowerFirstCamelWord(ietypesubst)))<line_sep>f.write(" if (tempDecoded < 0 || %s_p == NULL) {\n"%(lowerFirstCamelWord(ietypesubst)))<line_sep>f.write(" OAILOG_ERROR (LOG_%s, \"Decoding of IE %s failed\\n\");\n"%(fileprefix.upper() ienameunderscore))<line_sep>f.write(" if (%s_p)\n"%(lowerFirstCamelWord(ietypesubst)))<line_sep>f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n"%(ietypeunderscore lowerFirstCamelWord(ietypesubst)))<line_sep>f.write(" return -1;\n")<line_sep>f.write(" }\n")<line_sep>f.write(" decoded += tempDecoded;\n")<line_sep>f.write(" if (asn1_xer_print)\n")<line_sep>f.write(" xer_fprint(stdout, &asn_DEF_%s, %s_p);\n"%(ietypeunderscore lowerFirstCamelWord(ietypesubst)))<if_stmt>ie[2]<in>(list(ieofielist.keys())+list(choicelist.keys()))<block_start><if_stmt>ie[2]<in>choicelist.keys()<block_start>f.write(" if (%s_decode_%s(&%s->%s, %s_p) < 0) {\n"%(fileprefix ietypeunderscore.lower() lowerFirstCamelWord(re.sub('-' '_' key)) ienameunderscore lowerFirstCamelWord(ietypesubst)))<line_sep>f.write(" OAILOG_ERROR (LOG_%s, \"Decoding of encapsulated IE %s failed\\n\");\n"%(fileprefix.upper() lowerFirstCamelWord(ietypesubst)))<line_sep>f.write(" }\n")<line_sep>f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n"%(ietypeunderscore lowerFirstCamelWord(ietypesubst)) )<block_end><elif_stmt>ie[2]<in>ieofielist.keys()<block_start>f.write(" if (%s_decode_%s(&%s->%s, %s_p) < 0) {\n"%(fileprefix ietypeunderscore.lower() lowerFirstCamelWord(re.sub('-' '_' key)) ienameunderscore lowerFirstCamelWord(ietypesubst)))<line_sep>f.write(" OAILOG_ERROR (LOG_%s, \"Decoding of encapsulated IE %s failed\\n\");\n"%(fileprefix.upper() lowerFirstCamelWord(ietypesubst)))<line_sep>f.write(" }\n")<line_sep>f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n"%(ietypeunderscore lowerFirstCamelWord(ietypesubst)) )<block_end><block_end><else_stmt><block_start>f.write(" memcpy(&%s->%s, %s_p, sizeof(%s_t));\n"%(lowerFirstCamelWord(re.sub('-' '_' key)) ienameunderscore lowerFirstCamelWord(ietypesubst) ietypeunderscore))<line_sep>f.write(" FREEMEM(%s_p);\n"%(lowerFirstCamelWord(ietypesubst)))<line_sep>f.write(" %s_p = NULL;\n"%(lowerFirstCamelWord(ietypesubst)))<block_end>f.write(" } break;\n")<block_end>f.write(" default:\n")<line_sep>f.write(" OAILOG_ERROR (LOG_%s, \"Unknown protocol IE id (%%d) for message %s\\n\", (int)ie_p->id);\n"%(fileprefix.upper() re.sub('-' '_' structName.lower())))<line_sep>f.write(" }\n")<line_sep>f.write(" }\n")<line_sep>f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n"%(asn1cStruct asn1cStructfirstlower))<line_sep>f.write(" return decoded;\n")<line_sep>f.write("}\n\n")<block_end># Generate free functions for encapsulated IEs <for_stmt>key iesDefs<block_start><if_stmt>key<not><in>ieofielist.values()<block_start><continue><block_end><if_stmt>len(iesDefs[key]["ies"])<eq>0<block_start><continue><block_end># TODO: Check if the encapsulated IE also contains further encap. ie=iesDefs[key]["ies"][0]<line_sep>ietypeunderscore=prefix+re.sub('-' '_' ie[2])<line_sep>keyname=re.sub('IEs' '' re.sub('Item' 'List' key))<line_sep>iesStructName=lowerFirstCamelWord(re.sub('Item' 'List' re.sub('-' '_' key)))<line_sep>f.write("int free_%s(\n"%(re.sub('-' '_' keyname).lower()))<line_sep>f.write(" %sIEs_t *%s) {\n\n"%(re.sub('-' '_' keyname) iesStructName))<line_sep>f.write(" assert(%s != NULL);\n\n"%(iesStructName))<line_sep>f.write(" for (int i = 0; i < %s->%s.count; i++) {\n"%(iesStructName re.sub('IEs' '' lowerFirstCamelWord(re.sub('-' '_' key))) ) )<line_sep>f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s->%s.array[i]);\n"%(ietypeunderscore iesStructName re.sub('IEs' '' lowerFirstCamelWord(re.sub('-' '_' key))) ) )<line_sep>f.write(" }\n")<line_sep>f.write(" free(%s->%s.array);\n"%(iesStructName re.sub('IEs' '' lowerFirstCamelWord(re.sub('-' '_' key)))) )<line_sep>f.write(" return 0;\n")<line_sep>f.write("}\n\n")<block_end><for_stmt>key iesDefs<block_start><if_stmt>len(iesDefs[key]["ies"])<eq>0<block_start><continue><block_end>keyupperunderscore=re.sub('-' '_' key.upper())<line_sep>keylowerunderscore=re.sub('-' '_' key.lower())<line_sep>structName=re.sub('ies' '' key flags=re.IGNORECASE)<line_sep>f.write("int free_%s(\n"%(re.sub('-' '_' structName.lower())))<line_sep>f.write(" %s_t *%s) {\n\n"%(prefix+re.sub('-' '_' key) lowerFirstCamelWord(re.sub('-' '_' key)) ) )<for_stmt>ie iesDefs[key]["ies"]<block_start>ietypeunderscore=prefix+re.sub('-' '_' ie[2])<line_sep>ieupperunderscore=re.sub('-' '_' re.sub('id-' '' ie[0])).upper()<if_stmt>ie[3]<ne>"mandatory"<block_start><if_stmt>ie[3]<eq>"optional"<block_start>f.write(" /* Optional field */\n")<block_end><elif_stmt>ie[3]<eq>"conditional"<block_start>f.write(" /* Conditional field */\n")<block_end>f.write(" if ((%s->presenceMask & %s_%s_PRESENT)\n"%(lowerFirstCamelWord(re.sub('-' '_' key)) keyupperunderscore ieupperunderscore))<line_sep>f.write(" == %s_%s_PRESENT) \n "%(keyupperunderscore ieupperunderscore))<block_end>iename=re.sub('id-' '' ie[0])<line_sep>ienameunderscore=lowerFirstCamelWord(re.sub('-' '_' iename))<line_sep># Check if this is an encapsulated IE, if so call the free function. <if_stmt>ie[2]<in>ieofielist.keys()<block_start>keyname=re.sub('IEs' '' re.sub('Item' 'List' ie[2]))<line_sep>f.write(" free_%s(&%s->%s);\n"%(re.sub('-' '_' keyname).lower() lowerFirstCamelWord(re.sub('-' '_' key)) ienameunderscore ) )<block_end><else_stmt><block_start>f.write(" ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_%s, &%s->%s);\n"%(ietypeunderscore lowerFirstCamelWord(re.sub('-' '_' key)) ienameunderscore))<block_end><block_end>f.write(" return 0;\n")<line_sep>f.write("}\n\n")<block_end><for_stmt>key iesDefs<block_start><if_stmt>key<not><in>ieofielist.values()<block_start><continue><block_end>keyname=re.sub('IEs' '' re.sub('Item' 'List' key))<line_sep>f.write("int %s_decode_%s(\n"%(fileprefix re.sub('-' '_' keyname).lower()))<line_sep>f.write(" %sIEs_t *%sIEs,\n"%(re.sub('-' '_' keyname) lowerFirstCamelWord(re.sub('-' '_' keyname))))<line_sep>f.write(" %s_t *%s) {\n\n"%(re.sub('-' '_' keyname) lowerFirstCamelWord(re.sub('-' '_' keyname))))<line_sep>f.write(" int i, decoded = 0;\n")<line_sep>f.write(" int tempDecoded = 0;\n\n")<line_sep>f.write(" assert(%s != NULL);\n"%(lowerFirstCamelWord(re.sub('-' '_' keyname))))<line_sep>f.write(" assert(%sIEs != NULL);\n\n"%(lowerFirstCamelWord(re.sub('-' '_' keyname))))<line_sep>f.write(" for (i = 0; i < %s->list.count; i++) {\n"%(lowerFirstCamelWord(re.sub('-' '_' keyname))))<line_sep>f.write(" %s_IE_t *ie_p = %s->list.array[i];\n"%(fileprefix[0].upper()+fileprefix[1:] lowerFirstCamelWord(re.sub('-' '_' keyname))))<line_sep>f.write(" switch (ie_p->id) {\n")<for_stmt>ie iesDefs[key]["ies"]<block_start>iename=re.sub('id-' '' ie[0])<line_sep>ienameunderscore=lowerFirstCamelWord(re.sub('-' '_' iename))<line_sep>f.write(" case %s_ProtocolIE_ID_%s:\n"%(fileprefix_first_upper re.sub('-' '_' ie[0])))<line_sep>f.write(" {\n")<line_sep>f.write(" %s_t *%s_p = NULL;\n"%(re.sub('-' '_' ie[2]) lowerFirstCamelWord(re.sub('-' '' ie[2]))))<line_sep>f.write(" tempDecoded = ANY_to_type_aper(&ie_p->value, &asn_DEF_%s, (void**)&%s_p);\n"%(re.sub('-' '_' ie[2]) lowerFirstCamelWord(re.sub('-' '' ie[2]))))<line_sep>f.write(" if (tempDecoded < 0 || %s_p == NULL) {\n"%(lowerFirstCamelWord(re.sub('-' '' ie[2]))))<line_sep>f.write(" OAILOG_ERROR (LOG_%s, \"Decoding of IE %s for message %s failed\\n\");\n"%(fileprefix.upper() ienameunderscore re.sub('-' '_' keyname)))<line_sep>f.write(" if (%s_p)\n"%(lowerFirstCamelWord(re.sub('-' '' ie[2]))))<line_sep>f.write(" ASN_STRUCT_FREE(asn_DEF_%s, %s_p);\n"%(re.sub('-' '_' ie[2]) lowerFirstCamelWord(re.sub('-' '' ie[2]))))<line_sep>f.write(" return -1;\n")<line_sep>f.write(" }\n")<line_sep>f.write(" decoded += tempDecoded;\n")<line_sep>f.write(" if (asn1_xer_print)\n")<line_sep>f.write(" xer_fprint(stdout, &asn_DEF_%s, %s_p);\n"%(re.sub('-' '_' ie[2]) lowerFirstCamelWord(re.sub('-' '' ie[2]))))<line_sep>f.write(" ASN_SEQUENCE_ADD(&%sIEs->%s, %s_p);\n"%(lowerFirstCamelWord(re.sub('-' '_' keyname)) re.sub('IEs' '' lowerFirstCamelWord(re.sub('-' '_' key))) lowerFirstCamelWord(re.sub('-' '' ie[2])) ) )<line_sep>f.write(" } break;\n")<block_end>f.write(" default:\n")<line_sep>f.write(" OAILOG_ERROR (LOG_%s, \"Unknown protocol IE id (%%d) for message %s\\n\", (int)ie_p->id);\n"%(fileprefix.upper() re.sub('-' '_' structName.lower())))<line_sep>f.write(" return -1;\n")<line_sep>f.write(" }\n")<line_sep>f.write(" }\n")<line_sep>f.write(" return decoded;\n")<line_sep>f.write("}\n\n")<block_end><for_stmt>key choiceiesDefs<block_start><if_stmt>key<not><in>choicelist.values()<block_start><continue><block_end>keyname=re.sub('IEs' '' key)<line_sep>f.write("int %s_decode_%s(\n"%(fileprefix re.sub('-' '_' keyname).lower()))<line_sep>f.write(" %s_t *%s,\n"%(re.sub('-' '_' keyname) lowerFirstCamelWord(re.sub('-' '' keyname))))<line_sep>f.write(" %s_t *%s_p) {\n\n"%(re.sub('-' '_' keyname) lowerFirstCamelWord(re.sub('-' '' keyname))))<line_sep>f.write(" assert(%s_p != NULL);\n"%(lowerFirstCamelWord(re.sub('-' '' keyname))))<line_sep>f.write(" assert(%s != NULL);\n\n"%(lowerFirstCamelWord(re.sub('-' '' keyname))))<line_sep>f.write(" OAILOG_DEBUG (LOG_%s, \"Decoding choice %s (%%s:%%d)\\n\", __FILE__, __LINE__);\n"%(fileprefix.upper() re.sub('-' '_' keyname)))<line_sep>f.write(" %s->present = %s_p->present;\n\n"%(lowerFirstCamelWord(re.sub('-' '' keyname)) lowerFirstCamelWord(re.sub('-' '' keyname))))<line_sep>f.write(" switch (%s_p->present) {\n"%(lowerFirstCamelWord(re.sub('-' '' keyname))))<for_stmt>ie choiceiesDefs[key]["ies"]<block_start>iename=re.sub('-' '_' ie[0])<line_sep>f.write(" case %s_PR_%s:\n"%(re.sub('-' '_' keyname) iename))<line_sep>f.write(" {\n")<if_stmt>ie[1]<in>ieofielist.keys()<block_start>ienameunderscore=re.sub('-' '_' ie[1])<line_sep>f.write(" if (%s_decode_%s((%sIEs_t *)&%s->choice.%s, &%s_p->choice.%s) < 0) {\n"%(fileprefix ienameunderscore.lower() ienameunderscore lowerFirstCamelWord(re.sub('-' '' keyname)) iename lowerFirstCamelWord(re.sub('-' '' keyname)) iename))<line_sep>f.write(" OAILOG_ERROR (LOG_%s, \"Decoding of encapsulated IE %s failed\\n\");\n"%(fileprefix.upper() lowerFirstCamelWord(ienameunderscore)))<line_sep>f.write(" return -1;\n")<line_sep>f.write(" }\n")<block_end><else_stmt><block_start>f.write(" OAILOG_DEBUG (LOG_%s, \"Decoding %s (%%s:%%d)\\n\", __FILE__, __LINE__);\n"%(fileprefix.upper() re.sub('-' '_' iename)))<line_sep>f.write(" memcpy(%s, %s_p, sizeof(%s_t));\n"%(lowerFirstCamelWord(re.sub('-' '' keyname)) lowerFirstCamelWord(re.sub('-' '' keyname)) re.sub('-' '_' keyname)))<block_end>f.write(" } break;\n")<block_end>f.write(" default:\n")<line_sep>f.write(" OAILOG_ERROR (LOG_%s, \"Unknown choice type (%%d) for %s\\n\", (int)%s_p->present);\n"%(fileprefix.upper() re.sub('-' '_' keyname) lowerFirstCamelWord(re.sub('-' '' keyname))))<line_sep>f.write(" return -1;\n")<line_sep>f.write(" }\n")<line_sep>f.write(" return 0;\n")<line_sep>f.write("}\n\n")<block_end># Generate IES Encode functions f=open(outdir+fileprefix+'_encoder.c' 'w')<line_sep>outputHeaderToFile(f filename)<line_sep>f.write("#include \"%s_common.h\"\n"%(fileprefix))<line_sep>f.write("#include \"%s_ies_defs.h\"\n\n"%(fileprefix))<for_stmt>key iesDefs<block_start><if_stmt>key<in>ieofielist.values()<block_start><continue><block_end>structName=re.sub('ies' '' key)<line_sep>asn1cStruct=re.sub('-' '_' re.sub('IEs' '' key))<line_sep>asn1cStruct=re.sub('Item' 'List' asn1cStruct)<if_stmt>asn1cStruct.rfind('_')<eq>len(asn1cStruct)-1<block_start>asn1cStruct=asn1cStruct[:-1]<block_end>asn1cStructfirstlower=asn1cStruct[:1].lower()+asn1cStruct[1:]<line_sep>firstwordlower=re.sub('Item' 'List' re.sub('enb' 'eNB' lowerFirstCamelWord(asn1cStruct)))<line_sep>iesaccess=""<if_stmt>key<not><in>ieofielist.values()<block_start>iesaccess="%s_ies."%(firstwordlower)<block_end>keyName=re.sub('-' '_' key)<line_sep>keyupperunderscore=keyName.upper()<line_sep># No IE to encode... <if_stmt>len(iesDefs[key]["ies"])<eq>0<block_start><continue><block_end>f.write("int %s_encode_%s(\n"%(fileprefix re.sub('-' '_' structName.lower())))<line_sep>f.write(" %s_t *%s,\n"%(asn1cStruct firstwordlower))<line_sep>f.write(" %s_t *%s) {\n\n"%(re.sub('-' '_' key) lowerFirstCamelWord(re.sub('-' '_' key))))<line_sep>f.write(" %s_IE_t *ie;\n\n"%(fileprefix_first_upper))<line_sep>f.write(" assert(%s != NULL);\n"%(firstwordlower))<line_sep>f.write(" assert(%s != NULL);\n\n"%(lowerFirstCamelWord(re.sub('-' '_' key))))<for_stmt>ie iesDefs[key]["ies"]<block_start>iename=re.sub('-' '_' re.sub('id-' '' ie[0]))<line_sep>ienameunderscore=re.sub('-' '_' iename)<line_sep>ienamefirstwordlower=lowerFirstCamelWord(iename)<line_sep>ieupperunderscore=re.sub('-' '_' re.sub('id-' '' ie[0])).upper()<line_sep>ietypeunderscore=re.sub('-' '_' ie[2])<if_stmt>ie[3]<ne>"mandatory"<block_start><if_stmt>ie[3]<eq>"optional"<block_start>f.write(" /* Optional field */\n")<block_end><elif_stmt>ie[3]<eq>"conditional"<block_start>f.write(" /* Conditional field */\n")<block_end>f.write(" if (%s->presenceMask & %s_%s_PRESENT) {\n\n"%(lowerFirstCamelWord(re.sub('-' '_' key)) keyupperunderscore ieupperunderscore))<line_sep>#f.write(" == %s_%s_PRESENT) {\n" % (keyupperunderscore, ieupperunderscore)) <if_stmt>ie[2]<in>ieofielist.keys()<block_start>f.write(" %s_t %s;\n"%(ietypeunderscore ienamefirstwordlower))<line_sep>f.write(" memset(&%s, 0, sizeof(%s_t));\n"%(ienamefirstwordlower ietypeunderscore))<line_sep>f.write("\n")<line_sep>f.write(" if (%s_encode_%s(&%s, &%s->%s) < 0) return -1;\n"%(fileprefix ietypeunderscore.lower() ienamefirstwordlower lowerFirstCamelWord(re.sub('-' '_' key)) ienamefirstwordlower))<block_end>f.write(" if ((ie = %s_new_ie(%s_ProtocolIE_ID_%s,\n"%(fileprefix fileprefix_first_upper re.sub('-' '_' ie[0])))<line_sep>f.write(" %s_Criticality_%s,\n"%(fileprefix_first_upper ie[1]))<line_sep>f.write(" &asn_DEF_%s,\n"%(ietypeunderscore))<line_sep>#f.write(" &%s->%s)) == NULL) {\n" % (lowerFirstCamelWord(re.sub('-', '_', key)), ienamefirstwordlower)) <if_stmt>ie[2]<in>ieofielist.keys()<block_start>f.write(" &%s)) == NULL) {\n"%(ienamefirstwordlower))<block_end><else_stmt><block_start>f.write(" &%s->%s)) == NULL) {\n"%(lowerFirstCamelWord(re.sub('-' '_' key)) ienamefirstwordlower))<block_end>f.write(" return -1;\n")<line_sep>f.write(" }\n")<line_sep>f.write(" ASN_SEQUENCE_ADD(&%s->%slist, ie);\n"%(firstwordlower iesaccess))<if_stmt>ie[2]<in>ieofielist.keys()<block_start>f.write(" /* Free any dynamic allocation that is no more used */\n")<line_sep>f.write(" ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_%s, &%s);\n"%(ietypeunderscore ienamefirstwordlower))<block_end>f.write(" }\n\n")<block_end><else_stmt><block_start><if_stmt>ie[2]<in>ieofielist.keys()<block_start>f.write(" %s_t %s;\n\n"%(ietypeunderscore ienamefirstwordlower))<line_sep>f.write(" memset(&%s, 0, sizeof(%s_t));\n"%(ienamefirstwordlower ietypeunderscore))<line_sep>f.write("\n")<line_sep>f.write(" if (%s_encode_%s(&%s, &%s->%s) < 0) return -1;\n"%(fileprefix ietypeunderscore.lower() ienamefirstwordlower lowerFirstCamelWord(re.sub('-' '_' key)) ienamefirstwordlower))<block_end>f.write(" if ((ie = %s_new_ie(%s_ProtocolIE_ID_%s,\n"%(fileprefix fileprefix_first_upper re.sub('-' '_' ie[0])))<line_sep>f.write(" %s_Criticality_%s,\n"%(fileprefix_first_upper ie[1]))<line_sep>f.write(" &asn_DEF_%s,\n"%(ietypeunderscore))<if_stmt>ie[2]<in>ieofielist.keys()<block_start>f.write(" &%s)) == NULL) {\n"%(ienamefirstwordlower))<block_end><else_stmt><block_start>f.write(" &%s->%s)) == NULL) {\n"%(lowerFirstCamelWord(re.sub('-' '_' key)) ienamefirstwordlower))<block_end>f.write(" return -1;\n")<line_sep>f.write(" }\n")<line_sep>f.write(" ASN_SEQUENCE_ADD(&%s->%slist, ie);\n\n"%(firstwordlower iesaccess))<if_stmt>ie[2]<in>ieofielist.keys()<block_start>f.write(" /* Free any dynamic allocation that is no more used */\n")<line_sep>f.write(" ASN_STRUCT_FREE_CONTENTS_ONLY(asn_DEF_%s, &%s);\n\n"%(ietypeunderscore ienamefirstwordlower))<block_end><block_end><block_end>f.write(" return 0;\n")<line_sep>f.write("}\n\n")<block_end><for_stmt>(key value) iesDefs.items()<block_start><if_stmt>key<not><in>ieofielist.values()<block_start><continue><block_end>ie=value["ies"][0]<line_sep>ietypeunderscore=re.sub('-' '_' ie[2])<line_sep>asn1cStruct=re.sub('-' '_' re.sub('IEs' '' re.sub('-IEs' '' key)))<line_sep>asn1cStruct=re.sub('Item' 'List' asn1cStruct)<line_sep>firstwordlower=re.sub('Item' 'List' re.sub('enb' 'eNB' lowerFirstCamelWord(asn1cStruct)))<for_stmt>(i j) ieofielist.items()<block_start><if_stmt>j<eq>key<block_start><break><block_end><block_end>f.write("int %s_encode_%s(\n"%(fileprefix re.sub('-' '_' i).lower()))<line_sep>f.write(" %s_t *%s,\n"%(asn1cStruct firstwordlower))<line_sep>f.write(" %sIEs_t *%sIEs) {\n\n"%(re.sub('-' '_' i) lowerFirstCamelWord(re.sub('-' '_' i))))<line_sep>f.write(" int i;\n")<line_sep>f.write(" %s_IE_t *ie;\n\n"%(fileprefix_first_upper))<line_sep>f.write(" assert(%s != NULL);\n"%(firstwordlower))<line_sep>f.write(" assert(%sIEs != NULL);\n\n"%(lowerFirstCamelWord(re.sub('-' '_' i))))<line_sep>f.write(" for (i = 0; i < %sIEs->%s.count; i++) {\n"%(firstwordlower re.sub('IEs' '' lowerFirstCamelWord(re.sub('-' '_' key)))))<line_sep>f.write(" if ((ie = %s_new_ie(%s_ProtocolIE_ID_%s,\n"%(fileprefix fileprefix_first_upper re.sub('-' '_' ie[0])))<line_sep>f.write(" %s_Criticality_%s,\n"%(fileprefix_first_upper ie[1]))<line_sep>f.write(" &asn_DEF_%s,\n"%(ietypeunderscore))<line_sep>f.write(" %sIEs->%s.array[i])) == NULL) {\n"%(firstwordlower re.sub('IEs' '' lowerFirstCamelWord(re.sub('-' '_' key)))))<line_sep>f.write(" return -1;\n")<line_sep>f.write(" }\n")<line_sep>f.write(" ASN_SEQUENCE_ADD(&%s->list, ie);\n"%(firstwordlower))<line_sep>f.write(" }\n")<line_sep>f.write(" return 0;\n")<line_sep>f.write("}\n\n")<block_end># Generate xer print functions f=open(outdir+fileprefix+'_xer_print.c' 'w')<line_sep>outputHeaderToFile(f filename)<line_sep>f.write("#include <stdlib.h>\n")<line_sep>f.write("#include <stdio.h>\n\n")<line_sep>f.write("#include <asn_application.h>\n#include <asn_internal.h>\n\n")<line_sep>f.write("#include \"%s_common.h\"\n#include \"%s_ies_defs.h\"\n\n"%(fileprefix fileprefix))<line_sep>f.write("size_t %s_string_total_size = 0;\n\n"%(fileprefix.lower()))<line_sep>f.write("""int %s_xer__print2fp(const void *buffer, size_t size, void *app_key) { FILE *stream = (FILE *)app_key; if(fwrite(buffer, 1, size, stream) != size) return -1; return 0; } """%(fileprefix.lower()) )<line_sep>f.write("""int %s_xer__print2sp(const void *buffer, size_t size, void *app_key) { char *string = (char *)app_key; /* Copy buffer to the formatted string */ memcpy(&string[%s_string_total_size], buffer, size); %s_string_total_size += size; return 0; } """%(fileprefix.lower() fileprefix.lower() fileprefix.lower()) )<line_sep>f.write("""static asn_enc_rval_t xer_encode_local(asn_TYPE_descriptor_t *td, void *sptr, asn_app_consume_bytes_f *cb, void *app_key, int indent) { asn_enc_rval_t er, tmper; const char *mname; size_t mlen; int xcan = 2; if(!td || !sptr) goto cb_failed; mname = td->xml_tag; mlen = strlen(mname); _i_ASN_TEXT_INDENT(0, indent); _ASN_CALLBACK3("<", 1, mname, mlen, ">", 1); tmper = td->xer_encoder(td, sptr, indent + 1, XER_F_BASIC, cb, app_key); if(tmper.encoded == -1) return tmper; _ASN_CALLBACK3("</", 2, mname, mlen, ">\\n", xcan); er.encoded = 4 + xcan + (2 * mlen) + tmper.encoded; _ASN_ENCODED_OK(er); cb_failed: _ASN_ENCODE_FAILED; } """)<for_stmt>(key value) iesDefs.items()<block_start>keyName=re.sub('-' '_' key)<line_sep>keyupperunderscore=keyName.upper()<line_sep>iesStructName=lowerFirstCamelWord(re.sub('-' '_' key))<line_sep>ie=value["ies"][0]<line_sep>ietypeunderscore=re.sub('-' '_' ie[2])<if_stmt>key<in>ieofielist.values()<block_start>f.write("asn_enc_rval_t %s_xer_print_%s(\n"%(fileprefix re.sub('ies' '' re.sub('item' 'list' re.sub('-' '_' key).lower()))))<block_end><else_stmt><block_start>f.write("asn_enc_rval_t %s_xer_print_%s(\n"%(fileprefix re.sub('ies' '' re.sub('-' '_' key).lower())))<block_end>#f.write(" FILE *file,\n") f.write(" asn_app_consume_bytes_f *cb,\n")<line_sep>f.write(" void *app_key,\n")<if_stmt>key<in>ieofielist.values()<block_start>iesStructName=lowerFirstCamelWord(re.sub('Item' 'List' re.sub('-' '_' key)))<line_sep>f.write(" %sIEs_t *%s) {\n\n"%(re.sub('IEs' '' re.sub('Item' 'List' re.sub('-' '_' key))) iesStructName))<line_sep>f.write(" int i;\n")<line_sep>f.write(" asn_enc_rval_t er;\n")<block_end><else_stmt><block_start>f.write(" %s_message *message_p)\n{\n"%(fileprefix))<line_sep>f.write(" %s_t *%s;\n"%(re.sub('-' '_' key) iesStructName))<line_sep>f.write(" asn_enc_rval_t er;\n")<line_sep>#f.write(" void *app_key = (void *)file;\n") #f.write(" asn_app_consume_bytes_f *cb = %s_xer__print2fp;\n\n" % (fileprefix.lower())) f.write(" %s = &message_p->msg.%s;\n\n"%(iesStructName iesStructName))<block_end><if_stmt>key<in>ieofielist.values()# Increase indentation level <block_start>f.write(" for (i = 0; i < %s->%s.count; i++) {\n"%(iesStructName re.sub('IEs' '' lowerFirstCamelWord(re.sub('-' '_' key)))))<line_sep>#f.write(" xer_fprint(file, &asn_DEF_%s, %s->%s.array[i]);\n" % (ietypeunderscore, iesStructName, re.sub('IEs', '', lowerFirstCamelWord(re.sub('-', '_', key))))) f.write(" er = xer_encode(&asn_DEF_%s, %s->%s.array[i], XER_F_BASIC, cb, app_key);\n"%(ietypeunderscore iesStructName re.sub('IEs' '' lowerFirstCamelWord(re.sub('-' '_' key)))))<line_sep>f.write(" }\n")<block_end><else_stmt><block_start>f.write(" cb(\"<%s-PDU>\\n\", %d, app_key);\n"%(key len("<%s-PDU>\n"%(key))))<line_sep>f.write(" xer_encode_local(&asn_DEF_%s_Criticality, &message_p->criticality, cb, app_key, 1);\n"%fileprefix_first_upper)<line_sep>f.write(" xer_encode_local(&asn_DEF_%s_ProcedureCode, &message_p->procedureCode, cb, app_key, 1);\n"%fileprefix_first_upper)<line_sep>f.write(" cb(\" <%s>\\n\", %d, app_key);\n"%(key len(" <%s>\n"%(key))))<for_stmt>ie iesDefs[key]["ies"]<block_start>iename=re.sub('-' '_' re.sub('id-' '' ie[0]))<line_sep>ienameunderscore=re.sub('-' '_' iename)<line_sep>ienamefirstwordlower=lowerFirstCamelWord(iename)<line_sep>ietypeunderscore=re.sub('-' '_' ie[2])<line_sep>ieupperunderscore=re.sub('-' '_' re.sub('id-' '' ie[0])).upper()<if_stmt>ie[3]<ne>"mandatory"<block_start><if_stmt>ie[3]<eq>"optional"<block_start>f.write(" /* Optional field */\n")<block_end><elif_stmt>ie[3]<eq>"conditional"<block_start>f.write(" /* Conditional field */\n")<block_end>f.write(" if (%s->presenceMask & %s_%s_PRESENT)\n "%(iesStructName keyupperunderscore ieupperunderscore))<block_end># Is it an encapsulated IE ? <if_stmt>ie[2]<in>ieofielist.keys()<block_start>f.write(" %s_xer_print_%s(cb, app_key, &%s->%s);\n"%(fileprefix re.sub('ies' '' re.sub('-' '_' ie[2]).lower()) iesStructName ienamefirstwordlower))<block_end><else_stmt><block_start>f.write(" xer_encode_local(&asn_DEF_%s, &%s->%s, cb, app_key, 2);\n"%(ietypeunderscore iesStructName ienamefirstwordlower))<block_end><block_end>f.write(" cb(\" </%s>\\n\", %d, app_key);\n"%(key len(" </%s>\n"%(key))))<line_sep>f.write(" cb(\"</%s-PDU>\\n\", %d, app_key);\n"%(key len("</%s-PDU>\n"%(key))))<block_end>f.write(" _ASN_ENCODED_OK(er);\n")<line_sep># if key not in ieofielist.values(): # f.write("cb_failed:\n") #f.write(" return er;\n") f.write("}\n\n")<block_end>
<import_from_stmt>.lunarlander_env LunarLanderEnv<line_sep>
# # Monte Carlo Simulation of BCC97 Model # 12_val/BCC97_simulation.py # # (c) Dr. <NAME> # Derivatives Analytics with Python # <import_stmt>sys<import_stmt>math<import_stmt>numpy<as>np<import_stmt>matplotlib<as>mpl<import_stmt>matplotlib.pyplot<as>plt<line_sep>sys.path.append('11_cal')<import_from_stmt>H93_calibration S0 kappa_r theta_r sigma_r r0<line_sep>mpl.rcParams['font.family']='serif'<line_sep># # Model Parameters # opt=np.load('11_cal/opt_full.npy')<line_sep>kappa_v,theta_v,sigma_v,rho,v0,lamb,mu,delta=opt<line_sep># # Simulation Parameters # T=1.0# time horizon M=25# time steps I=10000# number of replications per valuation anti_paths=<true># antithetic paths for variance reduction moment_matching=<true># moment matching for variance reduction np.random.seed(100000)# seed value for random number generator # # Random Number Generation # <def_stmt>generate_cholesky rho<block_start>''' Function to generate Cholesky matrix. Parameters ========== rho: float correlation between index level and variance Returns ======= matrix: NumPy array Cholesky matrix '''<line_sep>rho_rs=0# correlation between index level and short rate covariance=np.zeros((4 4) dtype=np.float)<line_sep>covariance[0]=[1.0 rho_rs 0.0 0.0]<line_sep>covariance[1]=[rho_rs 1.0 rho 0.0]<line_sep>covariance[2]=[0.0 rho 1.0 0.0]<line_sep>covariance[3]=[0.0 0.0 0.0 1.0]<line_sep>cho_matrix=np.linalg.cholesky(covariance)<line_sep><return>cho_matrix<block_end><def_stmt>random_number_generator M I anti_paths moment_matching<block_start>''' Function to generate pseudo-random numbers. Parameters ========== M: int time steps I: int number of simulation paths anti_paths: bool flag for antithetic paths moment_matching: bool flag for moment matching Returns ======= rand: NumPy array random number array '''<if_stmt>anti_paths<block_start>rand=np.random.standard_normal((4 M+1 int(I/2)))<line_sep>rand=np.concatenate((rand -rand) 2)<block_end><else_stmt><block_start>rand=np.random.standard_normal((4 M+1 I))<block_end><if_stmt>moment_matching<block_start><for_stmt>a range(4)<block_start>rand[a]=rand[a]/np.std(rand[a])<line_sep>rand[a]=rand[a]-np.mean(rand[a])<block_end><block_end><return>rand<block_end># # Function for Short Rate and Volatility Processes # <def_stmt>SRD_generate_paths x0 kappa theta sigma T M I rand row cho_matrix<block_start>''' Function to simulate Square-Root Difussion (SRD/CIR) process. Parameters ========== x0: float initial value kappa: float mean-reversion factor theta: float long-run mean sigma: float volatility factor T: float final date/time horizon M: int number of time steps I: int number of paths row: int row number for random numbers cho_matrix: NumPy array cholesky matrix Returns ======= x: NumPy array simulated variance paths '''<line_sep>dt=T/M<line_sep>x=np.zeros((M+1 I) dtype=np.float)<line_sep>x[0]=x0<line_sep>xh=np.zeros_like(x)<line_sep>xh[0]=x0<line_sep>sdt=math.sqrt(dt)<for_stmt>t range(1 M+1)<block_start>ran=np.dot(cho_matrix rand[: t])<line_sep>xh[t]=(xh[t-1]+kappa<times>(theta-np.maximum(0 xh[t-1]))<times>dt+np.sqrt(np.maximum(0 xh[t-1]))<times>sigma<times>ran[row]<times>sdt)<line_sep>x[t]=np.maximum(0 xh[t])<block_end><return>x<block_end># # Function for B96 Index Process # <def_stmt>B96_generate_paths S0 r v lamb mu delta rand row1 row2 cho_matrix T M I moment_matching<block_start>''' Simulation of Bates (1996) index process. Parameters ========== S0: float initial value r: NumPy array simulated short rate paths v: NumPy array simulated variance paths lamb: float jump intensity mu: float expected jump size delta: float standard deviation of jump rand: NumPy array random number array row1, row2: int rows/matrices of random number array to use cho_matrix: NumPy array Cholesky matrix T: float time horizon, maturity M: int number of time intervals, steps I: int number of paths to simulate moment_matching: bool flag for moment matching Returns ======= S: NumPy array simulated index level paths '''<line_sep>S=np.zeros((M+1 I) dtype=np.float)<line_sep>S[0]=S0<line_sep>dt=T/M<line_sep>sdt=math.sqrt(dt)<line_sep>ranp=np.random.poisson(lamb<times>dt (M+1 I))<line_sep>bias=0.0<for_stmt>t range(1 M+1 1)<block_start>ran=np.dot(cho_matrix rand[: t :])<if_stmt>moment_matching<block_start>bias=np.mean(np.sqrt(v[t])<times>ran[row1]<times>sdt)<block_end>S[t]=S[t-1]<times>(np.exp(((r[t]+r[t-1])/2-0.5<times>v[t])<times>dt+np.sqrt(v[t])<times>ran[row1]<times>sdt-bias)+(np.exp(mu+delta<times>ran[row2])-1)<times>ranp[t])<block_end><return>S<block_end><if_stmt>__name__<eq>'__main__'# # Simulation # <block_start>cho_matrix=generate_cholesky(rho)<line_sep>rand=random_number_generator(M I anti_paths moment_matching)<line_sep>r=SRD_generate_paths(r0 kappa_r theta_r sigma_r T M I rand 0 cho_matrix)<line_sep>v=SRD_generate_paths(v0 kappa_v theta_v sigma_v T M I rand 2 cho_matrix)<line_sep>S=B96_generate_paths(S0 r v lamb mu delta rand 1 3 cho_matrix T M I moment_matching)<block_end><def_stmt>plot_rate_paths r<block_start>plt.figure(figsize=(10 6))<line_sep>plt.plot(r[: :10])<line_sep>plt.xlabel('time step')<line_sep>plt.ylabel('short rate level')<line_sep>plt.title('Short Rate Simulated Paths')<block_end><def_stmt>plot_volatility_paths v<block_start>plt.figure(figsize=(10 6))<line_sep>plt.plot(np.sqrt(v[: :10]))<line_sep>plt.xlabel('time step')<line_sep>plt.ylabel('volatility level')<line_sep>plt.title('Volatility Simulated Paths')<block_end><def_stmt>plot_index_paths S<block_start>plt.figure(figsize=(10 6))<line_sep>plt.plot(S[: :10])<line_sep>plt.xlabel('time step')<line_sep>plt.ylabel('index level')<line_sep>plt.title('EURO STOXX 50 Simulated Paths')<block_end><def_stmt>plot_index_histogram S<block_start>plt.figure(figsize=(10 6))<line_sep>plt.hist(S[-1] bins=30)<line_sep>plt.xlabel('index level')<line_sep>plt.ylabel('frequency')<line_sep>plt.title('EURO STOXX 50 Values after 1 Year')<block_end>
""" Implementation of the CNN Decoder part of "Convolutional Sequence to Sequence Learning" """<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>onmt.modules<import_from_stmt>onmt.decoders.decoder DecoderState<import_from_stmt>onmt.utils.misc aeq<import_from_stmt>onmt.utils.cnn_factory shape_transform GatedConv<line_sep>SCALE_WEIGHT=0.5<power>0.5<class_stmt>CNNDecoder(nn.Module)<block_start>""" Decoder built on CNN, based on :cite:`DBLP:journals/corr/GehringAGYD17`. Consists of residual convolutional layers, with ConvMultiStepAttention. """<def_stmt>__init__ self num_layers hidden_size attn_type copy_attn cnn_kernel_width dropout embeddings<block_start>super(CNNDecoder self).__init__()<line_sep># Basic attributes. self.decoder_type='cnn'<line_sep>self.num_layers=num_layers<line_sep>self.hidden_size=hidden_size<line_sep>self.cnn_kernel_width=cnn_kernel_width<line_sep>self.embeddings=embeddings<line_sep>self.dropout=dropout<line_sep># Build the CNN. input_size=self.embeddings.embedding_size<line_sep>self.linear=nn.Linear(input_size self.hidden_size)<line_sep>self.conv_layers=nn.ModuleList()<for_stmt>_ range(self.num_layers)<block_start>self.conv_layers.append(GatedConv(self.hidden_size self.cnn_kernel_width self.dropout <true>))<block_end>self.attn_layers=nn.ModuleList()<for_stmt>_ range(self.num_layers)<block_start>self.attn_layers.append(onmt.modules.ConvMultiStepAttention(self.hidden_size))<block_end># CNNDecoder has its own attention mechanism. # Set up a separated copy attention layer, if needed. self._copy=<false><if_stmt>copy_attn<block_start>self.copy_attn=onmt.modules.GlobalAttention(hidden_size attn_type=attn_type)<line_sep>self._copy=<true><block_end><block_end><def_stmt>forward self tgt memory_bank state memory_lengths=<none> step=<none><block_start>""" See :obj:`onmt.modules.RNNDecoderBase.forward()`"""<line_sep># NOTE: memory_lengths is only here for compatibility reasons # with onmt.modules.RNNDecoderBase.forward() # CHECKS <assert_stmt>isinstance(state CNNDecoderState)<line_sep>_,tgt_batch,_=tgt.size()<line_sep>_,contxt_batch,_=memory_bank.size()<line_sep>aeq(tgt_batch contxt_batch)<line_sep># END CHECKS <if_stmt>state.previous_input<is><not><none><block_start>tgt=torch.cat([state.previous_input tgt] 0)<block_end># Initialize return variables. outputs=[]<line_sep>attns={"std":[]}<assert_stmt><not>self._copy "Copy mechanism not yet tested in conv2conv"<if_stmt>self._copy<block_start>attns["copy"]=[]<block_end>emb=self.embeddings(tgt)<assert_stmt>emb.dim()<eq>3# len x batch x embedding_dim tgt_emb=emb.transpose(0 1).contiguous()<line_sep># The output of CNNEncoder. src_memory_bank_t=memory_bank.transpose(0 1).contiguous()<line_sep># The combination of output of CNNEncoder and source embeddings. src_memory_bank_c=state.init_src.transpose(0 1).contiguous()<line_sep># Run the forward pass of the CNNDecoder. emb_reshape=tgt_emb.contiguous().view(tgt_emb.size(0)<times>tgt_emb.size(1) -1)<line_sep>linear_out=self.linear(emb_reshape)<line_sep>x=linear_out.view(tgt_emb.size(0) tgt_emb.size(1) -1)<line_sep>x=shape_transform(x)<line_sep>pad=torch.zeros(x.size(0) x.size(1) self.cnn_kernel_width-1 1)<line_sep>pad=pad.type_as(x)<line_sep>base_target_emb=x<for_stmt>conv,attention zip(self.conv_layers self.attn_layers)<block_start>new_target_input=torch.cat([pad x] 2)<line_sep>out=conv(new_target_input)<line_sep>c,attn=attention(base_target_emb out src_memory_bank_t src_memory_bank_c)<line_sep>x=(x+(c+out)<times>SCALE_WEIGHT)<times>SCALE_WEIGHT<block_end>output=x.squeeze(3).transpose(1 2)<line_sep># Process the result and update the attentions. outputs=output.transpose(0 1).contiguous()<if_stmt>state.previous_input<is><not><none><block_start>outputs=outputs[state.previous_input.size(0):]<line_sep>attn=attn[: state.previous_input.size(0):].squeeze()<line_sep>attn=torch.stack([attn])<block_end>attns["std"]=attn<if_stmt>self._copy<block_start>attns["copy"]=attn<block_end># Update the state. state.update_state(tgt)<line_sep><return>outputs state attns<block_end><def_stmt>init_decoder_state self _ memory_bank enc_hidden with_cache=<false><block_start>""" Init decoder state. """<line_sep><return>CNNDecoderState(memory_bank enc_hidden)<block_end><block_end><class_stmt>CNNDecoderState(DecoderState)<block_start>""" Init CNN decoder state. """<def_stmt>__init__ self memory_bank enc_hidden<block_start>self.init_src=(memory_bank+enc_hidden)<times>SCALE_WEIGHT<line_sep>self.previous_input=<none><block_end>@property<def_stmt>_all self<block_start>""" Contains attributes that need to be updated in self.beam_update(). """<line_sep><return>(self.previous_input )<block_end><def_stmt>detach self<block_start>self.previous_input=self.previous_input.detach()<block_end><def_stmt>update_state self new_input<block_start>""" Called for every decoder forward pass. """<line_sep>self.previous_input=new_input<block_end><def_stmt>repeat_beam_size_times self beam_size<block_start>""" Repeat beam_size times along batch dimension. """<line_sep>self.init_src=self.init_src.data.repeat(1 beam_size 1)<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>factory<import_from_stmt>koalixcrm.crm.models TaskLinkType<class_stmt>RelatedToTaskLinkTypeFactory(factory.django.DjangoModelFactory)<block_start><class_stmt>Meta<block_start>model=TaskLinkType<line_sep>django_get_or_create=('title' )<block_end>title="Is related to"<line_sep>description="This task is related with ...."<block_end><class_stmt>RequiresLinkTypeFactory(factory.django.DjangoModelFactory)<block_start><class_stmt>Meta<block_start>model=TaskLinkType<line_sep>django_get_or_create=('title' )<block_end>title="This task requires"<line_sep>description="This task requires the completion or the existence of ...."<block_end>
<import_from_stmt>collections namedtuple<import_from_stmt>datetime datetime time<import_from_stmt>django forms<import_from_stmt>django.utils.dateparse parse_datetime<import_from_stmt>django.utils.encoding force_str<import_from_stmt>django.utils.translation gettext_lazy<as>_<import_from_stmt>.conf settings<import_from_stmt>.constants EMPTY_VALUES<import_from_stmt>.utils handle_timezone<import_from_stmt>.widgets BaseCSVWidget CSVWidget DateRangeWidget LookupChoiceWidget RangeWidget <class_stmt>RangeField(forms.MultiValueField)<block_start>widget=RangeWidget<def_stmt>__init__ self fields=<none> *args **kwargs<block_start><if_stmt>fields<is><none><block_start>fields=(forms.DecimalField() forms.DecimalField())<block_end>super().__init__(fields *args **kwargs)<block_end><def_stmt>compress self data_list<block_start><if_stmt>data_list<block_start><return>slice(*data_list)<block_end><return><none><block_end><block_end><class_stmt>DateRangeField(RangeField)<block_start>widget=DateRangeWidget<def_stmt>__init__ self *args **kwargs<block_start>fields=(forms.DateField() forms.DateField())<line_sep>super().__init__(fields *args **kwargs)<block_end><def_stmt>compress self data_list<block_start><if_stmt>data_list<block_start>start_date,stop_date=data_list<if_stmt>start_date<block_start>start_date=handle_timezone(datetime.combine(start_date time.min) <false>)<block_end><if_stmt>stop_date<block_start>stop_date=handle_timezone(datetime.combine(stop_date time.max) <false>)<block_end><return>slice(start_date stop_date)<block_end><return><none><block_end><block_end><class_stmt>DateTimeRangeField(RangeField)<block_start>widget=DateRangeWidget<def_stmt>__init__ self *args **kwargs<block_start>fields=(forms.DateTimeField() forms.DateTimeField())<line_sep>super().__init__(fields *args **kwargs)<block_end><block_end><class_stmt>IsoDateTimeRangeField(RangeField)<block_start>widget=DateRangeWidget<def_stmt>__init__ self *args **kwargs<block_start>fields=(IsoDateTimeField() IsoDateTimeField())<line_sep>super().__init__(fields *args **kwargs)<block_end><block_end><class_stmt>TimeRangeField(RangeField)<block_start>widget=DateRangeWidget<def_stmt>__init__ self *args **kwargs<block_start>fields=(forms.TimeField() forms.TimeField())<line_sep>super().__init__(fields *args **kwargs)<block_end><block_end><class_stmt>Lookup(namedtuple('Lookup' ('value' 'lookup_expr')))<block_start><def_stmt>__new__ cls value lookup_expr<block_start><if_stmt>value<in>EMPTY_VALUES<or>lookup_expr<in>EMPTY_VALUES<block_start><raise>ValueError("Empty values ([], (), {}, '', None) are not "<concat>"valid Lookup arguments. Return None instead.")<block_end><return>super().__new__(cls value lookup_expr)<block_end><block_end><class_stmt>LookupChoiceField(forms.MultiValueField)<block_start>default_error_messages={'lookup_required':_('Select a lookup.') }<def_stmt>__init__ self field lookup_choices *args **kwargs<block_start>empty_label=kwargs.pop('empty_label' settings.EMPTY_CHOICE_LABEL)<line_sep>fields=(field ChoiceField(choices=lookup_choices empty_label=empty_label))<line_sep>widget=LookupChoiceWidget(widgets=[f.widget<for>f fields])<line_sep>kwargs['widget']=widget<line_sep>kwargs['help_text']=field.help_text<line_sep>super().__init__(fields *args **kwargs)<block_end><def_stmt>compress self data_list<block_start><if_stmt>len(data_list)<eq>2<block_start>value,lookup_expr=data_list<if_stmt>value<not><in>EMPTY_VALUES<block_start><if_stmt>lookup_expr<not><in>EMPTY_VALUES<block_start><return>Lookup(value=value lookup_expr=lookup_expr)<block_end><else_stmt><block_start><raise>forms.ValidationError(self.error_messages['lookup_required'] code='lookup_required')<block_end><block_end><block_end><return><none><block_end><block_end><class_stmt>IsoDateTimeField(forms.DateTimeField)<block_start>""" Supports 'iso-8601' date format too which is out the scope of the ``datetime.strptime`` standard library # ISO 8601: ``http://www.w3.org/TR/NOTE-datetime`` Based on Gist example by <NAME> https://gist.github.com/copitux/5773821 """<line_sep>ISO_8601='iso-8601'<line_sep>input_formats=[ISO_8601]<def_stmt>strptime self value format<block_start>value=force_str(value)<if_stmt>format<eq>self.ISO_8601<block_start>parsed=parse_datetime(value)<if_stmt>parsed<is><none># Continue with other formats if doesn't match <block_start><raise>ValueError<block_end><return>handle_timezone(parsed)<block_end><return>super().strptime(value format)<block_end><block_end><class_stmt>BaseCSVField(forms.Field)<block_start>""" Base field for validating CSV types. Value validation is performed by secondary base classes. ex:: class IntegerCSVField(BaseCSVField, filters.IntegerField): pass """<line_sep>base_widget_class=BaseCSVWidget<def_stmt>__init__ self *args **kwargs<block_start>widget=kwargs.get('widget')<or>self.widget<line_sep>kwargs['widget']=self._get_widget_class(widget)<line_sep>super().__init__(*args **kwargs)<block_end><def_stmt>_get_widget_class self widget# passthrough, allows for override <block_start><if_stmt>isinstance(widget BaseCSVWidget)<or>(isinstance(widget type)<and>issubclass(widget BaseCSVWidget))<block_start><return>widget<block_end># complain since we are unable to reconstruct widget instances <assert_stmt>isinstance(widget type) "'%s.widget' must be a widget class, not %s."%(self.__class__.__name__ repr(widget))<line_sep>bases=(self.base_widget_class widget )<line_sep><return>type(str('CSV%s'%widget.__name__) bases {})<block_end><def_stmt>clean self value<block_start><if_stmt>value<in>self.empty_values<and>self.required<block_start><raise>forms.ValidationError(self.error_messages['required'] code='required')<block_end><if_stmt>value<is><none><block_start><return><none><block_end><return>[super(BaseCSVField self).clean(v)<for>v value]<block_end><block_end><class_stmt>BaseRangeField(BaseCSVField)# Force use of text input, as range must always have two inputs. A date # input would only allow a user to input one value and would always fail. <block_start>widget=CSVWidget<line_sep>default_error_messages={'invalid_values':_('Range query expects two values.')}<def_stmt>clean self value<block_start>value=super().clean(value)<assert_stmt>value<is><none><or>isinstance(value list)<if_stmt>value<and>len(value)<ne>2<block_start><raise>forms.ValidationError(self.error_messages['invalid_values'] code='invalid_values')<block_end><return>value<block_end><block_end><class_stmt>ChoiceIterator# Emulates the behavior of ModelChoiceIterator, but instead wraps # the field's _choices iterable. <block_start><def_stmt>__init__ self field choices<block_start>self.field=field<line_sep>self.choices=choices<block_end><def_stmt>__iter__ self<block_start><if_stmt>self.field.empty_label<is><not><none><block_start><yield>("" self.field.empty_label)<block_end><if_stmt>self.field.null_label<is><not><none><block_start><yield>(self.field.null_value self.field.null_label)<block_end><yield><from>self.choices<block_end><def_stmt>__len__ self<block_start>add=1<if>self.field.empty_label<is><not><none><else>0<line_sep>add<augadd>1<if>self.field.null_label<is><not><none><else>0<line_sep><return>len(self.choices)+add<block_end><block_end><class_stmt>ModelChoiceIterator(forms.models.ModelChoiceIterator)# Extends the base ModelChoiceIterator to add in 'null' choice handling. # This is a bit verbose since we have to insert the null choice after the # empty choice, but before the remainder of the choices. <block_start><def_stmt>__iter__ self<block_start>iterable=super().__iter__()<if_stmt>self.field.empty_label<is><not><none><block_start><yield>next(iterable)<block_end><if_stmt>self.field.null_label<is><not><none><block_start><yield>(self.field.null_value self.field.null_label)<block_end><yield><from>iterable<block_end><def_stmt>__len__ self<block_start>add=1<if>self.field.null_label<is><not><none><else>0<line_sep><return>super().__len__()+add<block_end><block_end><class_stmt>ChoiceIteratorMixin<block_start><def_stmt>__init__ self *args **kwargs<block_start>self.null_label=kwargs.pop('null_label' settings.NULL_CHOICE_LABEL)<line_sep>self.null_value=kwargs.pop('null_value' settings.NULL_CHOICE_VALUE)<line_sep>super().__init__(*args **kwargs)<block_end><def_stmt>_get_choices self<block_start><return>super()._get_choices()<block_end><def_stmt>_set_choices self value<block_start>super()._set_choices(value)<line_sep>value=self.iterator(self self._choices)<line_sep>self._choices=self.widget.choices=value<block_end>choices=property(_get_choices _set_choices)<block_end># Unlike their Model* counterparts, forms.ChoiceField and forms.MultipleChoiceField do not set empty_label <class_stmt>ChoiceField(ChoiceIteratorMixin forms.ChoiceField)<block_start>iterator=ChoiceIterator<def_stmt>__init__ self *args **kwargs<block_start>self.empty_label=kwargs.pop('empty_label' settings.EMPTY_CHOICE_LABEL)<line_sep>super().__init__(*args **kwargs)<block_end><block_end><class_stmt>MultipleChoiceField(ChoiceIteratorMixin forms.MultipleChoiceField)<block_start>iterator=ChoiceIterator<def_stmt>__init__ self *args **kwargs<block_start>self.empty_label=<none><line_sep>super().__init__(*args **kwargs)<block_end><block_end><class_stmt>ModelChoiceField(ChoiceIteratorMixin forms.ModelChoiceField)<block_start>iterator=ModelChoiceIterator<def_stmt>to_python self value# bypass the queryset value check <block_start><if_stmt>self.null_label<is><not><none><and>value<eq>self.null_value<block_start><return>value<block_end><return>super().to_python(value)<block_end><block_end><class_stmt>ModelMultipleChoiceField(ChoiceIteratorMixin forms.ModelMultipleChoiceField)<block_start>iterator=ModelChoiceIterator<def_stmt>_check_values self value<block_start>null=self.null_label<is><not><none><and>value<and>self.null_value<in>value<if_stmt>null# remove the null value and any potential duplicates <block_start>value=[v<for>v value<if>v<ne>self.null_value]<block_end>result=list(super()._check_values(value))<line_sep>result<augadd>[self.null_value]<if>null<else>[]<line_sep><return>result<block_end><block_end>
# encoding: utf-8 <import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>scipy.stats<as>scst<import_stmt>statsmodels.api<as>sm<import_from_stmt>jaqs.trade.common CALENDAR_CONST<def_stmt>calc_signal_ic signal_data<block_start>""" Computes the Spearman Rank Correlation based Information Coefficient (IC) between signal values and N period forward returns for each period in the signal index. Parameters ---------- signal_data : pd.DataFrame - MultiIndex Index is pd.MultiIndex ['trade_date', 'symbol'], columns = ['signal', 'return', 'quantile'] Returns ------- ic : pd.DataFrame Spearman Rank correlation between signal and provided forward returns. """<def_stmt>src_ic df<block_start>_ic=scst.spearmanr(df['signal'] df['return'])[0]<line_sep><return>_ic<block_end>signal_data=signal_data.copy()<line_sep>grouper=['trade_date']<line_sep>ic=signal_data.groupby(grouper).apply(src_ic)<line_sep>ic=pd.DataFrame(ic)<line_sep>ic.columns=['ic']<line_sep><return>ic<block_end><def_stmt>calc_ic_stats_table ic_data<block_start>ic_summary_table=pd.DataFrame()<line_sep>ic_summary_table["IC Mean"]=ic_data.mean()<line_sep>ic_summary_table["IC Std."]=ic_data.std()<line_sep>t_stat,p_value=scst.ttest_1samp(ic_data 0)<line_sep>ic_summary_table["t-stat(IC)"]=t_stat<line_sep>ic_summary_table["p-value(IC)"]=p_value<line_sep>ic_summary_table["IC Skew"]=scst.skew(ic_data)<line_sep>ic_summary_table["IC Kurtosis"]=scst.kurtosis(ic_data)<line_sep>ic_summary_table["Ann. IR"]=ic_data.mean()/ic_data.std()<line_sep><return>ic_summary_table<block_end><def_stmt>mean_information_coefficient ic by_time=<none><block_start>""" Get the mean information coefficient of specified groups. Answers questions like: What is the mean IC for each month? What is the mean IC for each group for our whole timerange? What is the mean IC for for each group, each week? Parameters ---------- by_time : str (pd time_rule), optional Time window to use when taking mean IC. See http://pandas.pydata.org/pandas-docs/stable/timeseries.html for available options. Returns ------- ic : pd.DataFrame Mean Spearman Rank correlation between signal and provided forward price movement windows. """<line_sep>grouper=[]<if_stmt>by_time<is><not><none><block_start>grouper.append(pd.TimeGrouper(by_time))<block_end><if_stmt>len(grouper)<eq>0<block_start>ic=ic.mean()<block_end><else_stmt><block_start>ic.index=pd.to_datetime(ic.index format="%Y%m%d")<line_sep>ic=(ic.reset_index().set_index('trade_date').groupby(grouper).mean())<block_end><return>ic<block_end><def_stmt>calc_period_wise_weighted_signal_return signal_data weight_method<block_start>""" Computes period wise period_wise_returns for portfolio weighted by signal values. Weights are computed by demeaning signals and dividing by the sum of their absolute value (achieving gross leverage of 1). Parameters ---------- signal_data : pd.DataFrame - MultiIndex Index is pd.MultiIndex ['trade_date', 'symbol'], columns = ['signal', 'return', 'quantile'] weight_method : {'equal_weight', 'long_only', 'long_short'} Returns ------- res : pd.DataFrame Period wise period_wise_returns of dollar neutral portfolio weighted by signal value. """<def_stmt>calc_norm_weights ser method<block_start><if_stmt>method<eq>'equal_weight'<block_start>ser.loc[:]=1.0/len(ser)<block_end><elif_stmt>method<eq>'long_short'# TODO: do we need to de-mean? <block_start>ser=ser-ser.mean()<block_end><elif_stmt>method<eq>'long_only'<block_start>ser=(ser+ser.abs())/2.0<block_end><elif_stmt>method<eq>'short_only'<block_start>ser=(ser-ser.abs())/2.0<block_end><else_stmt><block_start><raise>ValueError("method can only be equal_weight, long_only or long_short,"<concat>"but [{}] is provided".format(method))<block_end><return>ser/ser.abs().sum()<block_end>grouper=['trade_date']<line_sep>weights=signal_data.groupby(grouper)['signal'].apply(calc_norm_weights weight_method)<line_sep># df_sig = signal_data['signal'].unstack(level='symbol') # weights = df_sig.apply(calc_norm_weights, axis=1, args=(weight_method, )) weighted_returns=signal_data['return'].multiply(weights axis=0)<line_sep>period_wise_returns=weighted_returns.groupby(level='trade_date').sum()<line_sep>res=pd.DataFrame(period_wise_returns)<line_sep>res.columns=['return']<line_sep><return>res<block_end><def_stmt>regress_period_wise_signal_return signal_data group=<false><block_start>""" Computes period wise period_wise_returns for portfolio weighted by signal values. Weights are computed by demeaning signals and dividing by the sum of their absolute value (achieving gross leverage of 1). Parameters ---------- signal_data : pd.DataFrame - MultiIndex Index is pd.MultiIndex ['trade_date', 'symbol'], columns = ['signal', 'return', 'quantile'] Returns ------- period_wise_returns : pd.DataFrame Period wise period_wise_returns of dollar neutral portfolio weighted by signal value. """<def_stmt>regress df<block_start>x=df['signal'].values<line_sep>y=df['return'].values<line_sep>x=sm.add_constant(x)<line_sep>mod=sm.OLS(y x).fit()<line_sep>idiosyncractic,signal_return=mod.params<line_sep># return pd.Series(index=['idio', 'signal_return'], data=[idiosyncractic, signal_return]) <return>signal_return<block_end>grouper=[signal_data.index.get_level_values('trade_date')]<if_stmt>group<block_start>grouper.append('group')<block_end>regress_res=signal_data.groupby(grouper).apply(regress)<line_sep><return>pd.DataFrame(regress_res)<block_end>''' def calc_alpha_beta(active_return, period, benchmark_return=None): if isinstance(active_return, pd.Series): active_return = pd.DataFrame(active_return) if isinstance(benchmark_return, pd.Series): benchmark_return = pd.DataFrame(benchmark_return) benchmark_return = benchmark_return.loc[active_return.index, :] alpha_beta = pd.DataFrame() x = benchmark_return.values y = active_return.values x = sm.add_constant(x) reg_fit = sm.OLS(y, x).fit() alpha, beta = reg_fit.params alpha_beta.loc['Ann. alpha', period] = \ (1 + alpha) ** (252.0 / period) - 1 alpha_beta.loc['beta', period] = beta return alpha_beta '''<def_stmt>calc_quantile_return_mean_std signal_data time_series=<false><block_start>""" Computes mean returns for signal quantiles across provided forward returns columns. Parameters ---------- signal_data : pd.DataFrame - MultiIndex Index is pd.MultiIndex ['trade_date', 'symbol'], columns = ['signal', 'return', 'quantile'] Returns ------- res : pd.DataFrame of dict """<line_sep>signal_data=signal_data.copy()<line_sep>grouper=['quantile']<if_stmt>time_series<block_start>grouper.append('trade_date')<block_end>group_mean_std=signal_data.groupby(grouper)['return'].agg(['mean' 'std' 'count'])<line_sep># TODO: why? ''' std_error_ret = group_mean_std.loc[:, 'std'].copy() / np.sqrt(group_mean_std.loc[:, 'count'].copy()) '''<if_stmt>time_series<block_start>quantile_daily_mean_std_dic=dict()<line_sep>quantiles=np.unique(group_mean_std.index.get_level_values(level='quantile'))<for_stmt>q quantiles# loop for different quantiles <block_start>df_q=group_mean_std.loc[pd.IndexSlice[q :] :]# bug df_q.index=df_q.index.droplevel(level='quantile')<line_sep>quantile_daily_mean_std_dic[q]=df_q<block_end><return>quantile_daily_mean_std_dic<block_end><else_stmt><block_start><return>group_mean_std<block_end><block_end><def_stmt>calc_return_diff_mean_std q1 q2<block_start>""" Computes the difference between the mean returns of two quantiles. Optionally, computes the standard error of this difference. Parameters ---------- q1, q2 : pd.DataFrame DataFrame of mean period wise returns by quantile. Index is datet, columns = ['mean', 'std', 'count'] Returns ------- res : pd.DataFrame Difference of mean return and corresponding std. """<line_sep>res_raw=pd.merge(q1 q2 how='outer' suffixes=('_1' '_2') left_index=<true> right_index=<true>).fillna(0)<line_sep>res_raw['mean_diff']=res_raw['mean_1']-res_raw['mean_2']<line_sep>res_raw['std']=np.sqrt(res_raw['mean_1']<power>2+res_raw['mean_2']<power>2)<line_sep>res=res_raw[['mean_diff' 'std']]<line_sep><return>res<block_end>''' def period2daily(ser, period, do_roll_mean=False): if not period > 1: return ser if do_roll_mean: ser = ser.rolling(window=period, min_periods=1, axis=1).mean() ser_daily_pow = (ser + 1) ** (1. / period) return ser_daily_pow - 1.0 '''<def_stmt>calc_active_cum_return_way2 portfolio_ret benchmark_ret<block_start>benchmark_ret=benchmark_ret.loc[portfolio_ret.index]<line_sep>portfolio_cum=portfolio_ret.add(1.0).cumprod(axis=0)<line_sep>benchmark_cum=benchmark_ret.add(1.0).cumprod(axis=0)<line_sep>active_cum=portfolio_cum.sub(benchmark_cum.values.flatten() axis=0)+1.0<line_sep><return>active_cum<block_end><def_stmt>calc_active_cum_return portfolio_ret benchmark_ret<block_start>benchmark_ret=benchmark_ret.loc[portfolio_ret.index]<line_sep>active_ret=portfolio_ret.sub(benchmark_ret.values.flatten() axis=0)<line_sep>active_cum=active_ret.add(1.0).cumprod()<line_sep><return>active_cum<block_end><def_stmt>price2ret prices period=5 axis=<none><block_start>""" Parameters ---------- prices : pd.DataFrame or pd.Series Index is datetime. period : int axis : {0, 1, None} Returns ------- ret : pd.DataFrame or pd.Series """<line_sep>ret=prices.pct_change(periods=period axis=axis)<line_sep><return>ret<block_end><def_stmt>cum2ret cum period=1 axis=<none> compound=<false><block_start>""" Parameters ---------- cum : pd.Series Starts from zero. period : int axis : {0, 1, None} compound : bool Returns ------- ret : pd.Series """<if_stmt>axis<is><none><block_start>kwargs=dict()<block_end><else_stmt><block_start>kwargs={'axis':axis}<block_end><if_stmt>np.any(cum.min(**kwargs))<l>0<block_start><raise>ValueError("Minimum value of cumulative return is less than zero.")<block_end>cum=cum.add(1.0)<if_stmt>compound<block_start>ret=cum.pct_change(periods=period **kwargs)<block_end><else_stmt><block_start>ret=cum.diff(periods=period **kwargs)<block_end><return>ret<block_end><def_stmt>ret2cum ret compound=<false> axis=<none><block_start>""" Parameters ---------- ret : pd.Series Starts from zero. compound : bool axis : {0, 1, None} Returns ------- cum : pd.Series """<if_stmt>axis<is><none><block_start>kwargs=dict()<block_end><else_stmt><block_start>kwargs={'axis':axis}<block_end><if_stmt>compound# use log to avoid numerical problems <block_start>log_sum=np.log(ret.add(1.0)).cumsum(**kwargs)<line_sep>cum=np.exp(log_sum).sub(1.0)<block_end><else_stmt><block_start>cum=ret.cumsum(**kwargs)<block_end><return>cum<block_end><def_stmt>calc_performance_metrics ser cum_return=<false> compound=<false><block_start>""" Calculate annualized return, volatility and sharpe. We assumed data frequency to be day. Parameters ---------- ser : pd.DataFrame or pd.Series Index is int date, values are floats. ser should start from 0. cum_return : bool Whether ser is cumulative or daily return. compound Whether calculation of return is compound. Returns ------- res : dict """<if_stmt>isinstance(ser pd.DataFrame)<block_start>ser=ser.iloc[: 0]<block_end>idx=ser.index<if_stmt>cum_return<block_start>cum_ret=ser<line_sep>ret=cum2ret(cum_ret period=1 compound=compound)<block_end><else_stmt><block_start>ret=ser<line_sep>cum_ret=ret2cum(ret compound=compound)<block_end>n_trade_days=len(idx)<line_sep>n_years=n_trade_days<times>1./CALENDAR_CONST.TRADE_DAYS_PER_YEAR<line_sep>total_ret=cum_ret.iat[-1]<if_stmt>compound<block_start>ann_ret=np.power(cum_ret.iat[-1]+1.0 1./n_years)-1<block_end><else_stmt><block_start>ann_ret=total_ret/n_years<block_end>std=np.std(ret)# use std instead of np.sqrt( (ret**2).sum() / len(ret) ) ann_vol=std<times>np.sqrt(CALENDAR_CONST.TRADE_DAYS_PER_YEAR)<line_sep>sharpe=ann_ret/ann_vol<line_sep># print "ann. ret = {:.1f}%; ann. vol = {:.1f}%, sharpe = {:.2f}".format(ann_ret * 100, ann_vol * 100, sharpe) res={'ann_ret':ann_ret 'ann_vol':ann_vol 'sharpe':sharpe}<line_sep><return>res<block_end><def_stmt>period_wise_ret_to_cum ret period compound=<false><block_start>""" Calculate cumulative returns from N-periods returns, no compounding. When 'period' N is greater than 1 the cumulative returns plot is computed building and averaging the cumulative returns of N interleaved portfolios (started at subsequent periods 1,2,3,...,N) each one rebalancing every N periods. Parameters ---------- ret: pd.Series or pd.DataFrame pd.Series containing N-periods returns period: integer Period for which the returns are computed compound : bool Whether calculate using compound return. Returns ------- pd.Series Cumulative returns series starting from zero. """<if_stmt>isinstance(ret pd.DataFrame)# deal with each column recursively <block_start><return>ret.apply(period_wise_ret_to_cum axis=0 args=(period ))<block_end><elif_stmt>isinstance(ret pd.Series)<block_start><if_stmt>period<eq>1<block_start><return>ret.add(1).cumprod().sub(1.0)<block_end># invest in each portfolio separately periods_index=np.arange(len(ret.index))<floordiv>period<line_sep>period_portfolios=ret.groupby(by=periods_index axis=0).apply(<lambda>ser:pd.DataFrame(np.diag(ser)))<line_sep>period_portfolios.index=ret.index<line_sep># cumulate returns separately <if_stmt>compound<block_start>cum_returns=period_portfolios.add(1).cumprod().sub(1.0)<block_end><else_stmt><block_start>cum_returns=period_portfolios.cumsum()<block_end># since capital of all portfolios are the same, return in all equals average return res=cum_returns.mean(axis=1)<line_sep><return>res<block_end><else_stmt><block_start><raise>NotImplementedError("ret must be Series or DataFrame.")<block_end><block_end>
<import_stmt>unittest<import_from_stmt>test test_support<class_stmt>Empty<block_start><def_stmt>__repr__ self<block_start><return>'<Empty>'<block_end><block_end><class_stmt>Coerce<block_start><def_stmt>__init__ self arg<block_start>self.arg=arg<block_end><def_stmt>__repr__ self<block_start><return>'<Coerce %s>'%self.arg<block_end><def_stmt>__coerce__ self other<block_start><if_stmt>isinstance(other Coerce)<block_start><return>self.arg other.arg<block_end><else_stmt><block_start><return>self.arg other<block_end><block_end><block_end><class_stmt>Cmp<block_start><def_stmt>__init__ self arg<block_start>self.arg=arg<block_end><def_stmt>__repr__ self<block_start><return>'<Cmp %s>'%self.arg<block_end><def_stmt>__cmp__ self other<block_start><return>cmp(self.arg other)<block_end><block_end><class_stmt>ComparisonTest(unittest.TestCase)<block_start>set1=[2 2.0 2L 2+0j Coerce(2) Cmp(2.0)]<line_sep>set2=[[1] (3 ) <none> Empty()]<line_sep>candidates=set1+set2<def_stmt>test_comparisons self<block_start><for_stmt>a self.candidates<block_start><for_stmt>b self.candidates<block_start><if_stmt>((a<in>self.set1)<and>(b<in>self.set1))<or>a<is>b<block_start>self.assertEqual(a b)<block_end><else_stmt><block_start>self.assertNotEqual(a b)<block_end><block_end><block_end><block_end><def_stmt>test_id_comparisons self# Ensure default comparison compares id() of args <block_start>L=[]<for_stmt>i range(10)<block_start>L.insert(len(L)<floordiv>2 Empty())<block_end><for_stmt>a L<block_start><for_stmt>b L<block_start>self.assertEqual(cmp(a b) cmp(id(a) id(b)) 'a=%r, b=%r'%(a b))<block_end><block_end><block_end><block_end><def_stmt>test_main <block_start>test_support.run_unittest(ComparisonTest)<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_main()<block_end>
<import_from_stmt>flask.signals Namespace<line_sep>__all__=['request_token_fetched']<line_sep>_signals=Namespace()<line_sep>request_token_fetched=_signals.signal('request-token-fetched')<line_sep>
# coding=utf-8 <import_stmt>sys<import_stmt>os<if_stmt>__name__<eq>'__main__'<block_start>start_ip,end_ip=sys.argv[1] sys.argv[2]<line_sep>split_start_ip=start_ip.split('.')<line_sep>split_end_ip=end_ip.split('.')<line_sep>ip_list_str=""<line_sep>ip_base=split_start_ip[0]+'.'+split_start_ip[1]+'.'+split_start_ip[2]+'.'<line_sep>ip_count=int(split_end_ip[-1])-int(split_start_ip[-1])+1<for_stmt>ip_index range(ip_count)<block_start>ip_list_str<augadd>ip_base+str(int(split_start_ip[3])+ip_index)+" "<block_end>cmd_1="ssh-keyscan -t rsa %s"%ip_list_str<line_sep>os.system("%s > ~/.ssh/known_hosts"%cmd_1)<block_end>
<import_stmt>os<import_stmt>subprocess<import_from_stmt>datetime date<import_from_stmt>mne __version__<as>release_version<line_sep># NOTE: ../codemeta.json should not be continuously updated. Run this script # only at release time. # add to these as necessary compound_surnames=('<NAME>' '<NAME>' '<NAME>' '<NAME>' '<NAME>' '<NAME>' '<NAME>' '<NAME>' '<NAME>' '<NAME>' '<NAME>')<def_stmt>parse_name name<block_start>"""Split name blobs from `git shortlog -nse` into first/last/email."""<line_sep># remove commit count _,name_and_email=name.strip().split('\t')<line_sep>name,email=name_and_email.split(' <')<line_sep>email=email.strip('>')<line_sep>email=''<if>'noreply'<in>email<else>email# ignore "noreply" emails name=' '.join(name.split('.'))# remove periods from initials # handle compound surnames <for_stmt>compound_surname compound_surnames<block_start><if_stmt>name.endswith(compound_surname)<block_start>ix=name.index(compound_surname)<line_sep>first=name[:ix].strip()<line_sep>last=compound_surname<line_sep><return>(first last email)<block_end><block_end># handle non-compound surnames name_elements=name.split()<if_stmt>len(name_elements)<eq>1# mononyms / usernames <block_start>first=''<line_sep>last=name<block_end><else_stmt><block_start>first=' '.join(name_elements[:-1])<line_sep>last=name_elements[-1]<block_end><return>(first last email)<block_end># MAKE SURE THE RELEASE STRING IS PROPERLY FORMATTED <try_stmt><block_start>split_version=list(map(int release_version.split('.')))<block_end><except_stmt>ValueError<block_start><raise><block_end>msg=f'version string must be X.Y.Z (all integers), got {release_version}'<assert_stmt>len(split_version)<eq>3 msg<line_sep># RUN GIT SHORTLOG TO GET ALL AUTHORS, SORTED BY NUMBER OF COMMITS args=['git' 'shortlog' '-nse']<line_sep>result=subprocess.run(args capture_output=<true> text=<true>)<line_sep>lines=result.stdout.strip().split('\n')<line_sep>all_names=[parse_name(line)<for>line lines]<line_sep># CONSTRUCT JSON AUTHORS LIST authors=[f'''{{ "@type":"Person", "email":"{email}", "givenName":"{first}", "familyName": "{last}" }}'''<for>(first last email) all_names]<line_sep># GET OUR DEPENDENCIES <with_stmt>open(os.path.join('..' 'setup.py') 'r')<as>fid<block_start><for_stmt>line fid<block_start><if_stmt>line.strip().startswith('python_requires=')<block_start>version=line.strip().split('=' maxsplit=1)[1].strip("'\",")<line_sep>dependencies=[f'python{version}']<line_sep><break><block_end><block_end><block_end>hard_dependencies=('numpy' 'scipy')<with_stmt>open(os.path.join('..' 'requirements.txt') 'r')<as>fid<block_start><for_stmt>line fid<block_start>req=line.strip()<for_stmt>hard_dep hard_dependencies<block_start><if_stmt>req.startswith(hard_dep)<block_start>dependencies.append(req)<block_end><block_end><block_end><block_end># these must be done outside the boilerplate (no \n allowed in f-strings): authors=',\n '.join(authors)<line_sep>dependencies='",\n "'.join(dependencies)<line_sep># ASSEMBLE COMPLETE JSON codemeta_boilerplate=f'''{{ "@context": "https://doi.org/10.5063/schema/codemeta-2.0", "@type": "SoftwareSourceCode", "license": "https://spdx.org/licenses/BSD-3-Clause", "codeRepository": "git+https://github.com/mne-tools/mne-python.git", "dateCreated": "2010-12-26", "datePublished": "2014-08-04", "dateModified": "{str(date.today())}", "downloadUrl": "https://github.com/mne-tools/mne-python/archive/v{release_version}.zip", "issueTracker": "https://github.com/mne-tools/mne-python/issues", "name": "MNE-Python", "version": "{release_version}", "description": "MNE-Python is an open-source Python package for exploring, visualizing, and analyzing human neurophysiological data. It provides methods for data input/output, preprocessing, visualization, source estimation, time-frequency analysis, connectivity analysis, machine learning, and statistics.", "applicationCategory": "Neuroscience", "developmentStatus": "active", "referencePublication": "https://doi.org/10.3389/fnins.2013.00267", "keywords": [ "MEG", "EEG", "fNIRS", "ECoG", "sEEG", "DBS" ], "programmingLanguage": [ "Python" ], "operatingSystem": [ "Linux", "Windows", "macOS" ], "softwareRequirements": [ "{dependencies}" ], "author": [ {authors} ] }} '''<line_sep># noqa E501 # WRITE TO FILE <with_stmt>open(os.path.join('..' 'codemeta.json') 'w')<as>codemeta_file<block_start>codemeta_file.write(codemeta_boilerplate)<block_end>
<import_stmt>sys<import_from_stmt>_multiprocess.interp_semaphore RECURSIVE_MUTEX SEMAPHORE <class_stmt>AppTestSemaphore<block_start>spaceconfig=dict(usemodules=('_multiprocess' 'thread' 'signal' 'select' 'binascii' 'struct'))<if_stmt>sys.platform<eq>'win32'<block_start>spaceconfig['usemodules']<augadd>('_rawffi' '_cffi_backend')<block_end><else_stmt><block_start>spaceconfig['usemodules']<augadd>('fcntl' )<block_end><def_stmt>setup_class cls<block_start>cls.w_SEMAPHORE=cls.space.wrap(SEMAPHORE)<line_sep>cls.w_RECURSIVE=cls.space.wrap(RECURSIVE_MUTEX)<line_sep>cls.w_runappdirect=cls.space.wrap(cls.runappdirect)<line_sep># import here since importing _multiprocess imports multiprocess # (in interp_connection) to get the BufferTooShort exception, which on # win32 imports msvcrt which imports via cffi which allocates ccharp # that are never released. This trips up the LeakChecker if done in a # test function cls.w_multiprocessing=cls.space.appexec([] '(): import multiprocess as m; return m')<block_end><def_stmt>test_semaphore_basic self<block_start><import_from_stmt>_multiprocess SemLock<import_stmt>sys<assert_stmt>SemLock.SEM_VALUE_MAX<g>10<line_sep>kind=self.SEMAPHORE<line_sep>value=1<line_sep>maxvalue=1<line_sep># the following line gets OSError: [Errno 38] Function not implemented # if /dev/shm is not mounted on Linux sem=SemLock(kind value maxvalue)<assert_stmt>sem.kind<eq>kind<assert_stmt>sem.maxvalue<eq>maxvalue<assert_stmt>isinstance(sem.handle (int long))<assert_stmt>sem._count()<eq>0<if_stmt>sys.platform<eq>'darwin'<block_start>raises(NotImplementedError 'sem._get_value()')<block_end><else_stmt><block_start><assert_stmt>sem._get_value()<eq>1<block_end><assert_stmt>sem._is_zero()<eq><false><line_sep>sem.acquire()<assert_stmt>sem._is_mine()<assert_stmt>sem._count()<eq>1<if_stmt>sys.platform<eq>'darwin'<block_start>raises(NotImplementedError 'sem._get_value()')<block_end><else_stmt><block_start><assert_stmt>sem._get_value()<eq>0<block_end><assert_stmt>sem._is_zero()<eq><true><line_sep>sem.release()<assert_stmt>sem._count()<eq>0<line_sep>sem.acquire()<line_sep>sem._after_fork()<assert_stmt>sem._count()<eq>0<block_end><def_stmt>test_recursive self<block_start><import_from_stmt>_multiprocess SemLock<line_sep>kind=self.RECURSIVE<line_sep>value=1<line_sep>maxvalue=1<line_sep># the following line gets OSError: [Errno 38] Function not implemented # if /dev/shm is not mounted on Linux sem=SemLock(kind value maxvalue)<line_sep>sem.acquire()<line_sep>sem.release()<assert_stmt>sem._count()<eq>0<line_sep>sem.acquire()<line_sep>sem.release()<line_sep># now recursively sem.acquire()<line_sep>sem.acquire()<assert_stmt>sem._count()<eq>2<line_sep>sem.release()<line_sep>sem.release()<block_end><def_stmt>test_semaphore_maxvalue self<block_start><import_from_stmt>_multiprocess SemLock<import_stmt>sys<line_sep>kind=self.SEMAPHORE<line_sep>value=SemLock.SEM_VALUE_MAX<line_sep>maxvalue=SemLock.SEM_VALUE_MAX<line_sep>sem=SemLock(kind value maxvalue)<for_stmt>i range(10)<block_start>res=sem.acquire()<assert_stmt>res<eq><true><assert_stmt>sem._count()<eq>i+1<if_stmt>sys.platform<ne>'darwin'<block_start><assert_stmt>sem._get_value()<eq>maxvalue-(i+1)<block_end><block_end>value=0<line_sep>maxvalue=SemLock.SEM_VALUE_MAX<line_sep>sem=SemLock(kind value maxvalue)<for_stmt>i range(10)<block_start>sem.release()<assert_stmt>sem._count()<eq>-(i+1)<if_stmt>sys.platform<ne>'darwin'<block_start><assert_stmt>sem._get_value()<eq>i+1<block_end><block_end><block_end><def_stmt>test_semaphore_wait self<block_start><import_from_stmt>_multiprocess SemLock<line_sep>kind=self.SEMAPHORE<line_sep>value=1<line_sep>maxvalue=1<line_sep>sem=SemLock(kind value maxvalue)<line_sep>res=sem.acquire()<assert_stmt>res<eq><true><line_sep>res=sem.acquire(timeout=0.1)<assert_stmt>res<eq><false><block_end><def_stmt>test_semaphore_rebuild self<block_start><import_from_stmt>_multiprocess SemLock<line_sep>kind=self.SEMAPHORE<line_sep>value=1<line_sep>maxvalue=1<line_sep>sem=SemLock(kind value maxvalue)<line_sep>sem2=SemLock._rebuild(sem.handle kind value)<assert_stmt>sem.handle<eq>sem2.handle<block_end><def_stmt>test_semaphore_contextmanager self<block_start><import_from_stmt>_multiprocess SemLock<line_sep>kind=self.SEMAPHORE<line_sep>value=1<line_sep>maxvalue=1<line_sep>sem=SemLock(kind value maxvalue)<with_stmt>sem<block_start><assert_stmt>sem._count()<eq>1<block_end><assert_stmt>sem._count()<eq>0<block_end><def_stmt>test_in_threads self<block_start><import_from_stmt>_multiprocess SemLock<import_from_stmt>threading Thread<import_from_stmt>time sleep<line_sep>l=SemLock(0 1 1)<if_stmt>self.runappdirect<block_start><def_stmt>f id<block_start><for_stmt>i range(10000)<block_start><pass><block_end><block_end><block_end><else_stmt><block_start><def_stmt>f id<block_start><for_stmt>i range(1000)# reduce the probability of thread switching # at exactly the wrong time in semlock_acquire <block_start><for_stmt>j range(10)<block_start><pass><block_end><block_end><block_end><block_end>threads=[Thread(<none> f args=(i ))<for>i range(2)]<line_sep>[t.start()<for>t threads]<line_sep># if the RLock calls to sem_wait and sem_post do not match, # one of the threads will block and the call to join will fail [t.join()<for>t threads]<block_end><block_end>
<import_stmt>execjs<def_stmt>get_params data=<none><block_start><with_stmt>open('params.js' 'r')<as>f<block_start>content=f.read()<block_end>ctx=execjs.compile(content)<if_stmt>data<block_start>result=ctx.call('result' data)<block_end><else_stmt><block_start>result=ctx.call('result')<block_end><return>result<block_end><def_stmt>get_dv <block_start><with_stmt>open('dv.js' 'r')<as>f<block_start>content=f.read()<block_end>ctx=execjs.compile(content)<line_sep>result=ctx.call('f')<line_sep><return>result<block_end><def_stmt>get_fs nameL<block_start><with_stmt>open('fs.js' 'r')<as>f<block_start>content=f.read()<block_end>ctx=execjs.compile(content)<line_sep>result=ctx.call('result' nameL)<line_sep><return>result<block_end><def_stmt>get_traceid <block_start><with_stmt>open('traceid.js' 'r')<as>f<block_start>content=f.read()<block_end>ctx=execjs.compile(content)<line_sep>result=ctx.call('getid')<line_sep><return>result<block_end>
# Copyright 2020 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for initializers."""<import_stmt>math<line_sep># Dependency imports <import_stmt>numpy<as>np<import_from_stmt>scipy stats<as>sp_stats<import_stmt>tensorflow.compat.v1<as>tf1<import_stmt>tensorflow.compat.v2<as>tf<import_stmt>tensorflow_probability<as>tfp<import_from_stmt>tensorflow_probability.python distributions<as>tfd<import_from_stmt>tensorflow_probability.python.internal test_util<line_sep>@test_util.test_all_tf_execution_regimes<class_stmt>JohnsonSUTest(test_util.TestCase)<block_start><def_stmt>setUp self<block_start>self._rng=np.random.RandomState(123)<line_sep>super(JohnsonSUTest self).setUp()<block_end><def_stmt>_testParamShapes self sample_shape expected<block_start>param_shapes=tfd.JohnsonSU.param_shapes(sample_shape)<line_sep>skewness_shape,tailweight_shape,mu_shape,sigma_shape=param_shapes['skewness'] param_shapes['tailweight'] param_shapes['loc'] param_shapes['scale']<line_sep>self.assertAllEqual(expected self.evaluate(skewness_shape))<line_sep>self.assertAllEqual(expected self.evaluate(tailweight_shape))<line_sep>self.assertAllEqual(expected self.evaluate(mu_shape))<line_sep>self.assertAllEqual(expected self.evaluate(sigma_shape))<line_sep>skewness=tf.zeros(skewness_shape)<line_sep>tailweight=tf.ones(tailweight_shape)<line_sep>mu=tf.zeros(mu_shape)<line_sep>sigma=tf.ones(sigma_shape)<line_sep>self.assertAllEqual(expected self.evaluate(tf.shape(tfd.JohnsonSU(skewness tailweight mu sigma validate_args=<true>).sample(seed=test_util.test_seed()))))<block_end><def_stmt>_testParamStaticShapes self sample_shape expected<block_start>param_shapes=tfd.JohnsonSU.param_static_shapes(sample_shape)<line_sep>mu_shape,sigma_shape=param_shapes['loc'] param_shapes['scale']<line_sep>self.assertEqual(expected mu_shape)<line_sep>self.assertEqual(expected sigma_shape)<block_end><def_stmt>testSampleLikeArgsGetDistDType self<block_start>dist=tfd.JohnsonSU(1. 2. 0. 1.)<line_sep>self.assertEqual(tf.float32 dist.dtype)<for_stmt>method ('log_prob' 'prob' 'log_cdf' 'cdf' 'log_survival_function' 'survival_function' 'quantile')<block_start>self.assertEqual(tf.float32 getattr(dist method)(1).dtype method)<block_end><block_end><def_stmt>testParamShapes self<block_start>sample_shape=[10 3 4]<line_sep>self._testParamShapes(sample_shape sample_shape)<line_sep>self._testParamShapes(tf.constant(sample_shape) sample_shape)<block_end><def_stmt>testParamStaticShapes self<block_start>sample_shape=[10 3 4]<line_sep>self._testParamStaticShapes(sample_shape sample_shape)<line_sep>self._testParamStaticShapes(tf.TensorShape(sample_shape) sample_shape)<block_end><def_stmt>testJohnsonSULogPDF self<block_start>batch_size=6<line_sep>skewness=tf.constant([1.0]<times>batch_size)<line_sep>tailweight=tf.constant([2.0]<times>batch_size)<line_sep>mu=tf.constant([3.0]<times>batch_size)<line_sep>sigma=tf.constant([math.sqrt(10.0)]<times>batch_size)<line_sep>x=np.array([-2.5 2.5 4.0 0.0 -1.0 2.0] dtype=np.float32)<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>log_pdf=johnson_su.log_prob(x)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) log_pdf.shape)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) self.evaluate(log_pdf).shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape log_pdf.shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape self.evaluate(log_pdf).shape)<line_sep>pdf=johnson_su.prob(x)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) pdf.shape)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) self.evaluate(pdf).shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape pdf.shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape self.evaluate(pdf).shape)<line_sep>expected_log_pdf=sp_stats.johnsonsu(self.evaluate(skewness) self.evaluate(tailweight) self.evaluate(mu) self.evaluate(sigma)).logpdf(x)<line_sep>self.assertAllClose(expected_log_pdf self.evaluate(log_pdf))<line_sep>self.assertAllClose(np.exp(expected_log_pdf) self.evaluate(pdf))<block_end><def_stmt>testJohnsonSULogPDFMultidimensional self<block_start>batch_size=6<line_sep>skewness=tf.constant([[1.0 -1.0]]<times>batch_size)<line_sep>tailweight=tf.constant([[1.0 2.0]]<times>batch_size)<line_sep>mu=tf.constant([[3.0 -3.0]]<times>batch_size)<line_sep>sigma=tf.constant([[math.sqrt(10.0) math.sqrt(15.0)]]<times>batch_size)<line_sep>x=np.array([[-2.5 2.5 4.0 0.0 -1.0 2.0]] dtype=np.float32).T<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>log_pdf=johnson_su.log_prob(x)<line_sep>log_pdf_values=self.evaluate(log_pdf)<line_sep>self.assertEqual(log_pdf.shape (6 2))<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) log_pdf.shape)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) self.evaluate(log_pdf).shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape log_pdf.shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape self.evaluate(log_pdf).shape)<line_sep>pdf=johnson_su.prob(x)<line_sep>pdf_values=self.evaluate(pdf)<line_sep>self.assertEqual(pdf.shape (6 2))<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) pdf.shape)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) pdf_values.shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape pdf.shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape pdf_values.shape)<line_sep>expected_log_pdf=sp_stats.johnsonsu.logpdf(x self.evaluate(skewness) self.evaluate(tailweight) self.evaluate(mu) self.evaluate(sigma))<line_sep>self.assertAllClose(expected_log_pdf log_pdf_values)<line_sep>self.assertAllClose(np.exp(expected_log_pdf) pdf_values)<block_end><def_stmt>testJohnsonSUCDF self<block_start>batch_size=50<line_sep>skewness=self._rng.randn(batch_size)<line_sep>tailweight=self._rng.rand(batch_size)+1.0<line_sep>mu=self._rng.randn(batch_size)<line_sep>sigma=self._rng.rand(batch_size)+1.0<line_sep>x=np.linspace(-8.0 8.0 batch_size).astype(np.float64)<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>cdf=johnson_su.cdf(x)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) cdf.shape)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) self.evaluate(cdf).shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape cdf.shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape self.evaluate(cdf).shape)<line_sep>expected_cdf=sp_stats.johnsonsu.cdf(x skewness tailweight mu sigma)<line_sep>self.assertAllClose(expected_cdf self.evaluate(cdf) atol=0)<block_end><def_stmt>testJohnsonSUSurvivalFunction self<block_start>batch_size=50<line_sep>skewness=self._rng.randn(batch_size)<line_sep>tailweight=self._rng.rand(batch_size)+1.0<line_sep>mu=self._rng.randn(batch_size)<line_sep>sigma=self._rng.rand(batch_size)+1.0<line_sep>x=np.linspace(-8.0 8.0 batch_size).astype(np.float64)<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>sf=johnson_su.survival_function(x)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) sf.shape)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) self.evaluate(sf).shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape sf.shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape self.evaluate(sf).shape)<line_sep>expected_sf=sp_stats.johnsonsu.sf(x skewness tailweight mu sigma)<line_sep>self.assertAllClose(expected_sf self.evaluate(sf) atol=0)<block_end><def_stmt>testJohnsonSULogCDF self<block_start>batch_size=50<line_sep>skewness=self._rng.randn(batch_size)<line_sep>tailweight=self._rng.rand(batch_size)+1.0<line_sep>mu=self._rng.randn(batch_size)<line_sep>sigma=self._rng.rand(batch_size)+1.0<line_sep>x=np.linspace(-100.0 10.0 batch_size).astype(np.float64)<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>cdf=johnson_su.log_cdf(x)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) cdf.shape)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) self.evaluate(cdf).shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape cdf.shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape self.evaluate(cdf).shape)<line_sep>expected_cdf=sp_stats.johnsonsu.logcdf(x skewness tailweight mu sigma)<line_sep>self.assertAllClose(expected_cdf self.evaluate(cdf) atol=0 rtol=1e-3)<block_end><def_stmt>testFiniteGradientAtDifficultPoints self<block_start><def_stmt>make_fn dtype attr<block_start>x=np.array([-100. -20. -5. 0. 5. 20. 100.]).astype(dtype)<line_sep><return><lambda>g d m s:getattr(# pylint: disable=g-long-lambda tfd.JohnsonSU(skewness=g tailweight=d loc=m scale=s validate_args=<true>) attr)(x)<block_end><for_stmt>dtype np.float32 np.float64<block_start><for_stmt>attr ['cdf' 'log_cdf' 'survival_function' 'log_survival_function' 'log_prob' 'prob']<block_start>value,grads=self.evaluate(tfp.math.value_and_gradient(make_fn(dtype attr) [tf.constant(0 dtype) tf.constant(1 dtype) tf.constant(2 dtype) tf.constant(3 dtype)]))<line_sep>self.assertAllFinite(value)<line_sep>self.assertAllFinite(grads[0])<line_sep>self.assertAllFinite(grads[1])<line_sep>self.assertAllFinite(grads[2])<line_sep>self.assertAllFinite(grads[3])<block_end><block_end><block_end><def_stmt>testJohnsonSULogSurvivalFunction self<block_start>batch_size=50<line_sep>skewness=self._rng.randn(batch_size)<line_sep>tailweight=self._rng.rand(batch_size)+1.0<line_sep>mu=self._rng.randn(batch_size)<line_sep>sigma=self._rng.rand(batch_size)+1.0<line_sep>x=np.linspace(-10.0 10.0 batch_size).astype(np.float64)<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>sf=johnson_su.log_survival_function(x)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) sf.shape)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) self.evaluate(sf).shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape sf.shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape self.evaluate(sf).shape)<line_sep>expected_sf=sp_stats.johnsonsu.logsf(x skewness tailweight mu sigma)<line_sep>self.assertAllClose(expected_sf self.evaluate(sf) atol=0 rtol=1e-5)<block_end><def_stmt>testJohnsonSUMean self<block_start>skewness=[1.]<line_sep>tailweight=[2.]<line_sep># Mu will be broadcast to [7, 7, 7]. mu=[7.]<line_sep>sigma=[11. 12. 13.]<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>self.assertAllEqual((3 ) johnson_su.mean().shape)<line_sep># sp_stats doesn't work with array tailweight expected_mean=sp_stats.johnsonsu.mean(skewness tailweight[0] mu sigma)<line_sep>self.assertAllClose(expected_mean self.evaluate(johnson_su.mean()))<block_end><def_stmt>testJohnsonSUQuantile self<block_start>batch_size=52<line_sep>skewness=self._rng.randn(batch_size)<line_sep>tailweight=self._rng.rand(batch_size)+1.0<line_sep>mu=self._rng.randn(batch_size)<line_sep>sigma=self._rng.rand(batch_size)+1.0<line_sep>p=np.linspace(0. 1.0 batch_size-2).astype(np.float64)<line_sep># Quantile performs piecewise rational approximation so adding some # special input values to make sure we hit all the pieces. p=np.hstack((p np.exp(-33) 1.-np.exp(-33)))<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>x=johnson_su.quantile(p)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) x.shape)<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) self.evaluate(x).shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape x.shape)<line_sep>self.assertAllEqual(johnson_su.batch_shape self.evaluate(x).shape)<line_sep>expected_x=sp_stats.johnsonsu.ppf(p skewness tailweight mu sigma)<line_sep>self.assertAllClose(expected_x self.evaluate(x) atol=0.)<block_end><def_stmt>_testQuantileFiniteGradientAtDifficultPoints self dtype<block_start>skewness=tf.constant(dtype(0))<line_sep>tailweight=tf.constant(dtype(1))<line_sep>mu=tf.constant(dtype(0))<line_sep>sigma=tf.constant(dtype(1))<line_sep>p=tf.constant(dtype([np.exp(-32.) np.exp(-2.) 1.-np.exp(-2.) 1.-np.exp(-8.)]))<line_sep>value,grads=tfp.math.value_and_gradient(<lambda>m p_:tfd.JohnsonSU(skewness=skewness tailweight=tailweight # pylint:disable=g-long-lambda loc=m scale=sigma validate_args=<true>).quantile(p_) [mu p])<line_sep>value,grads=self.evaluate([value grads])<line_sep>self.assertAllFinite(grads[0])<line_sep>self.assertAllFinite(grads[1])<block_end><def_stmt>testQuantileFiniteGradientAtDifficultPointsFloat32 self<block_start>self._testQuantileFiniteGradientAtDifficultPoints(np.float32)<block_end><def_stmt>testQuantileFiniteGradientAtDifficultPointsFloat64 self<block_start>self._testQuantileFiniteGradientAtDifficultPoints(np.float64)<block_end><def_stmt>testJohnsonSUVariance self<block_start>skewness=[1.]<line_sep>tailweight=[2.]<line_sep># sigma will be broadcast to [7, 7, 7] mu=[1. 2. 3.]<line_sep>sigma=[7.]<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>self.assertAllEqual((3 ) johnson_su.variance().shape)<line_sep>expected_v=sp_stats.johnsonsu.var(skewness[0] tailweight[0] mu[0] sigma[0])<line_sep>self.assertAllClose([expected_v]<times>3 self.evaluate(johnson_su.variance()))<block_end><def_stmt>testJohnsonSUStandardDeviation self<block_start>skewness=[1.]<line_sep>tailweight=[2.]<line_sep># sigma will be broadcast to [7, 7, 7] mu=[1. 2. 3.]<line_sep>sigma=[7.]<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>self.assertAllEqual((3 ) johnson_su.stddev().shape)<line_sep>expected_d=sp_stats.johnsonsu.std(skewness[0] tailweight[0] mu[0] sigma[0])<line_sep>self.assertAllClose([expected_d]<times>3 self.evaluate(johnson_su.stddev()))<block_end><def_stmt>testJohnsonSUSample self<block_start>skewness=tf.constant(1.0)<line_sep>tailweight=tf.constant(2.0)<line_sep>mu=tf.constant(3.0)<line_sep>sigma=tf.constant(math.sqrt(3.0))<line_sep>mu_v=sp_stats.johnsonsu.mean(1 2 3 math.sqrt(3.0))<line_sep>sigma_v=sp_stats.johnsonsu.std(1 2 3 math.sqrt(3.0))<line_sep>n=tf.constant(100000)<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>samples=johnson_su.sample(n seed=test_util.test_seed())<line_sep>sample_values=self.evaluate(samples)<line_sep># Note that the standard error for the sample mean is ~ sigma / sqrt(n). # The sample variance similarly is dependent on sigma and n. # Thus, the tolerances below are very sensitive to number of samples # as well as the variances chosen. self.assertEqual(sample_values.shape (100000 ))<line_sep>self.assertAllClose(sample_values.mean() mu_v atol=1e-1)<line_sep>self.assertAllClose(sample_values.std() sigma_v atol=1e-1)<line_sep>expected_samples_shape=tf.TensorShape([self.evaluate(n)]).concatenate(tf.TensorShape(self.evaluate(johnson_su.batch_shape_tensor())))<line_sep>self.assertAllEqual(expected_samples_shape samples.shape)<line_sep>self.assertAllEqual(expected_samples_shape sample_values.shape)<line_sep>expected_samples_shape=(tf.TensorShape([self.evaluate(n)]).concatenate(johnson_su.batch_shape))<line_sep>self.assertAllEqual(expected_samples_shape samples.shape)<line_sep>self.assertAllEqual(expected_samples_shape sample_values.shape)<block_end><def_stmt>testJohnsonSUFullyReparameterized self<block_start>skewness=tf.constant(1.0)<line_sep>tailweight=tf.constant(2.0)<line_sep>mu=tf.constant(4.0)<line_sep>sigma=tf.constant(3.0)<line_sep>_,[grad_skewness grad_tailweight grad_mu grad_sigma]=(tfp.math.value_and_gradient(<lambda>g d m s:tfd.JohnsonSU(skewness=g tailweight=d loc=m # pylint:disable=g-long-lambda scale=s validate_args=<true>).sample(100 seed=test_util.test_seed()) [skewness tailweight mu sigma]))<line_sep>grad_skewness,grad_tailweight,grad_mu,grad_sigma=self.evaluate([grad_skewness grad_tailweight grad_mu grad_sigma])<line_sep>self.assertIsNotNone(grad_skewness)<line_sep>self.assertIsNotNone(grad_tailweight)<line_sep>self.assertIsNotNone(grad_mu)<line_sep>self.assertIsNotNone(grad_sigma)<block_end><def_stmt>testJohnsonSUSampleMultiDimensional self<block_start>batch_size=2<line_sep>skewness=tf.constant([[1.0 -1.0]]<times>batch_size)<line_sep>tailweight=tf.constant([[2.0 3.0]]<times>batch_size)<line_sep>mu=tf.constant([[3.0 -3.0]]<times>batch_size)<line_sep>sigma=tf.constant([[math.sqrt(2.0) math.sqrt(3.0)]]<times>batch_size)<line_sep>sp_stats_params=[(1 2 3 math.sqrt(2.)) (-1 3 -3 math.sqrt(3.))]<line_sep>mu_v=[sp_stats.johnsonsu.mean(*params)<for>params sp_stats_params]<line_sep>sigma_v=[sp_stats.johnsonsu.std(*params)<for>params sp_stats_params]<line_sep>n=tf.constant(100000)<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>samples=johnson_su.sample(n seed=test_util.test_seed())<line_sep>sample_values=self.evaluate(samples)<line_sep># Note that the standard error for the sample mean is ~ sigma / sqrt(n). # The sample variance similarly is dependent on sigma and n. # Thus, the tolerances below are very sensitive to number of samples # as well as the variances chosen. self.assertEqual(samples.shape (100000 batch_size 2))<line_sep>self.assertAllClose(sample_values[: 0 0].mean() mu_v[0] atol=1e-1)<line_sep>self.assertAllClose(sample_values[: 0 0].std() sigma_v[0] atol=1e-1)<line_sep>self.assertAllClose(sample_values[: 0 1].mean() mu_v[1] atol=1e-1)<line_sep>self.assertAllClose(sample_values[: 0 1].std() sigma_v[1] atol=1e-1)<line_sep>expected_samples_shape=tf.TensorShape([self.evaluate(n)]).concatenate(tf.TensorShape(self.evaluate(johnson_su.batch_shape_tensor())))<line_sep>self.assertAllEqual(expected_samples_shape samples.shape)<line_sep>self.assertAllEqual(expected_samples_shape sample_values.shape)<line_sep>expected_samples_shape=(tf.TensorShape([self.evaluate(n)]).concatenate(johnson_su.batch_shape))<line_sep>self.assertAllEqual(expected_samples_shape samples.shape)<line_sep>self.assertAllEqual(expected_samples_shape sample_values.shape)<block_end><def_stmt>testNegativetailweightFails self<block_start><with_stmt>self.assertRaisesOpError('Argument `tailweight` must be positive.')<block_start>johnson_su=tfd.JohnsonSU(skewness=[1.] tailweight=[-1.] loc=[1.] scale=[5.] validate_args=<true> name='D')<line_sep>self.evaluate(johnson_su.mean())<block_end><block_end><def_stmt>testNegativeScaleFails self<block_start><with_stmt>self.assertRaisesOpError('Argument `scale` must be positive.')<block_start>johnson_su=tfd.JohnsonSU(skewness=[1.] tailweight=[1.] loc=[1.] scale=[-5.] validate_args=<true> name='S')<line_sep>self.evaluate(johnson_su.mean())<block_end><block_end><def_stmt>testJohnsonSUShape self<block_start>skewness=tf.constant(1.0)<line_sep>tailweight=tf.constant(2.0)<line_sep>mu=tf.constant([-3.0]<times>5)<line_sep>sigma=tf.constant(11.0)<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep>self.assertEqual(self.evaluate(johnson_su.batch_shape_tensor()) [5])<line_sep>self.assertEqual(johnson_su.batch_shape tf.TensorShape([5]))<line_sep>self.assertAllEqual(self.evaluate(johnson_su.event_shape_tensor()) [])<line_sep>self.assertEqual(johnson_su.event_shape tf.TensorShape([]))<block_end><def_stmt>testJohnsonSUShapeWithPlaceholders self<block_start>skewness=tf1.placeholder_with_default(np.float32(5) shape=<none>)<line_sep>tailweight=tf1.placeholder_with_default(np.float32(5) shape=<none>)<line_sep>mu=tf1.placeholder_with_default(np.float32(5) shape=<none>)<line_sep>sigma=tf1.placeholder_with_default(np.float32([1.0 2.0]) shape=<none>)<line_sep>johnson_su=tfd.JohnsonSU(skewness=skewness tailweight=tailweight loc=mu scale=sigma validate_args=<true>)<line_sep># get_batch_shape should return an '<unknown>' tensor (graph mode only). self.assertEqual(johnson_su.event_shape ())<line_sep>self.assertEqual(johnson_su.batch_shape tf.TensorShape([2]<if>tf.executing_eagerly()<else><none>))<line_sep>self.assertAllEqual(self.evaluate(johnson_su.event_shape_tensor()) [])<line_sep>self.assertAllEqual(self.evaluate(johnson_su.batch_shape_tensor()) [2])<block_end><def_stmt>testVariableScale self<block_start>x=tf.Variable(1.)<line_sep>d=tfd.JohnsonSU(skewness=0. tailweight=2. loc=0. scale=x validate_args=<true>)<line_sep>self.evaluate([v.initializer<for>v d.variables])<line_sep>self.assertIs(x d.scale)<line_sep>self.assertEqual(0. self.evaluate(d.mean()))<with_stmt>self.assertRaisesOpError('Argument `scale` must be positive.')<block_start><with_stmt>tf.control_dependencies([x.assign(-1.)])<block_start>self.evaluate(d.mean())<block_end><block_end><block_end><def_stmt>testIncompatibleArgShapes self<block_start>scale=tf1.placeholder_with_default(tf.ones([4 1]) shape=<none>)<with_stmt>self.assertRaisesRegexp(Exception r'Incompatible shapes')<block_start>d=tfd.JohnsonSU(skewness=1. tailweight=2. loc=tf.zeros([2 3]) scale=scale validate_args=<true>)<line_sep>self.evaluate(d.batch_shape_tensor())<block_end><block_end><def_stmt>testBatchSamplesAreIndependent self<block_start>num_samples=1000<line_sep>d=tfd.JohnsonSU(loc=[0. 0.] scale=1. skewness=0. tailweight=1.)<line_sep>xs=d.sample(num_samples seed=test_util.test_seed())<line_sep>cov=1./num_samples<times>tf.matmul(xs xs transpose_a=<true>)<line_sep>self.assertAllClose(cov/d.variance() tf.eye(2) atol=0.4)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test_util.main()<block_end>
<class_stmt>BaseError(Exception)<block_start>"""Base package error."""<block_end><class_stmt>InvalidModelInputError(BaseError)<block_start>"""Model input contains an error."""<block_end>
<import_stmt>os<import_stmt>pickle<import_stmt>re<import_stmt>sys<import_from_stmt>typing Any Dict List<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>argoverse.data_loading.argoverse_forecasting_loader ArgoverseForecastingLoader<import_from_stmt>argoverse.map_representation.map_api ArgoverseMap<import_from_stmt>tqdm tqdm<import_from_stmt>.preprocess_utils.feature_utils compute_feature_for_one_seq save_features<import_from_stmt>.preprocess_utils.map_utils_vec save_map<line_sep># vectorization <import_from_stmt>.vectorization VectorizedCase<class_stmt>ArgoverseConvertor(object)<block_start><def_stmt>__init__ self cfg<block_start>self.data_dir=cfg['DATA_DIR']<line_sep>self.obs_len=cfg['OBS_LEN']<line_sep>self.lane_radius=cfg['LANE_RADIUS']<line_sep>self.object_radius=cfg['OBJ_RADIUS']<line_sep>self.raw_dataformat=cfg['RAW_DATA_FORMAT']<line_sep>self.am=ArgoverseMap()<line_sep>self.Afl=ArgoverseForecastingLoader<line_sep>self.out_dir=cfg['INTERMEDIATE_DATA_DIR']<line_sep>self.save_dir_pretext=cfg['info_prefix']<line_sep>self.specific_data_fold_list=cfg['specific_data_fold_list']<line_sep># vectorization self.vec_processor=VectorizedCase(cfg['vectorization_cfg'])<block_end><def_stmt>preprocess_map self<block_start>os.makedirs(self.out_dir exist_ok=<true>)<if_stmt><not>os.path.exists(os.path.join(self.out_dir 'map.pkl'))<block_start>print("Processing maps ...")<line_sep>save_map(self.out_dir)<line_sep>print('Map is save at '+os.path.join(self.out_dir 'map.pkl'))<block_end><block_end><def_stmt>process self # preprocess the map <block_start>self.preprocess_map()<line_sep># storage the case infomation data_info={}<for_stmt>folder os.listdir(self.data_dir)<block_start><if_stmt>folder<not><in>self.specific_data_fold_list<block_start><continue><block_end>afl=self.Afl(os.path.join(self.data_dir folder 'data'))<line_sep>info_dict={}<line_sep>data_info[folder]={}<for_stmt>path_name_ext tqdm(afl.seq_list)<block_start>afl_=afl.get(path_name_ext)<line_sep>path,name_ext=os.path.split(path_name_ext)<line_sep>name,ext=os.path.splitext(name_ext)<line_sep>info_dict[name]=self.process_case(afl_.seq_df)<block_end>out_path=os.path.join(self.out_dir self.save_dir_pretext+f'{folder}.pkl')<with_stmt>open(out_path 'wb')<as>f<block_start>pickle.dump(info_dict f pickle.HIGHEST_PROTOCOL)<block_end>data_info[folder]['sample_num']=len(afl.seq_list)<line_sep>print('Data is save at '+out_path)<block_end># print info print("Finish Preprocessing.")<for_stmt>k data_info.keys()<block_start>print('dataset name: '+k+'\n sample num: {}'.format(data_info[k]['sample_num']))<block_end><block_end><def_stmt>preprocess_case self seq_df<block_start>''' Args: seq_df: '''<line_sep># retrieve info from csv agent_feature,obj_feature_ls,nearby_lane_ids,norm_center,city_name=compute_feature_for_one_seq(seq_df self.am self.obs_len self.lane_radius self.object_radius self.raw_dataformat viz=<false> mode='nearby')<line_sep># pack as the output dic=save_features(agent_feature obj_feature_ls nearby_lane_ids norm_center city_name)<line_sep><return>dic<block_end><def_stmt>process_case self seq_df# tensorized <block_start>data=self.preprocess_case(seq_df)<line_sep># vectorized vec_dic=self.vec_processor.process_case(data)<line_sep><return>vec_dic<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser(description='Preprocess argoverse dataset')<line_sep>parser.add_argument('config' help='config file path')<line_sep>args=parser.parse_args()<import_from_stmt>config.Config Config<line_sep>cfg=Config.fromfile(args.config)<line_sep>preprocess_cfg=cfg.get('preprocess_dataset')<line_sep>processor=ArgoverseConvertor(preprocess_cfg)<line_sep>processor.process()<block_end>
<import_stmt>sys<import_stmt>requests<def_stmt>main <block_start><try_stmt><block_start>value=int(sys.argv[1])<block_end><except_stmt>(IndexError ValueError)<block_start>print(f'Usage: {sys.argv[0]} [integer]')<line_sep><return><block_end>payload={'vector':{'values':[{'value':value}]}}<line_sep>r=requests.post('http://localhost:9000/predict' json=payload)<line_sep>r.raise_for_status()<line_sep>print(r.json()['data']['values'][0]['value'])<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. <import_from_stmt>.create_connection_details CreateConnectionDetails<import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401 <import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>CreateConnectionFromObjectStorage(CreateConnectionDetails)<block_start>""" The details to create an Oracle Object Storage data asset connection. """<def_stmt>__init__ self **kwargs<block_start>""" Initializes a new CreateConnectionFromObjectStorage object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.CreateConnectionFromObjectStorage.model_type` attribute of this class is ``ORACLE_OBJECT_STORAGE_CONNECTION`` and it should not be changed. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param model_type: The value to assign to the model_type property of this CreateConnectionFromObjectStorage. Allowed values for this property are: "ORACLE_ADWC_CONNECTION", "ORACLE_ATP_CONNECTION", "ORACLE_OBJECT_STORAGE_CONNECTION", "ORACLEDB_CONNECTION", "MYSQL_CONNECTION", "GENERIC_JDBC_CONNECTION", "BICC_CONNECTION", "AMAZON_S3_CONNECTION" :type model_type: str :param key: The value to assign to the key property of this CreateConnectionFromObjectStorage. :type key: str :param model_version: The value to assign to the model_version property of this CreateConnectionFromObjectStorage. :type model_version: str :param parent_ref: The value to assign to the parent_ref property of this CreateConnectionFromObjectStorage. :type parent_ref: oci.data_integration.models.ParentReference :param name: The value to assign to the name property of this CreateConnectionFromObjectStorage. :type name: str :param description: The value to assign to the description property of this CreateConnectionFromObjectStorage. :type description: str :param object_status: The value to assign to the object_status property of this CreateConnectionFromObjectStorage. :type object_status: int :param identifier: The value to assign to the identifier property of this CreateConnectionFromObjectStorage. :type identifier: str :param connection_properties: The value to assign to the connection_properties property of this CreateConnectionFromObjectStorage. :type connection_properties: list[oci.data_integration.models.ConnectionProperty] :param registry_metadata: The value to assign to the registry_metadata property of this CreateConnectionFromObjectStorage. :type registry_metadata: oci.data_integration.models.RegistryMetadata :param credential_file_content: The value to assign to the credential_file_content property of this CreateConnectionFromObjectStorage. :type credential_file_content: str :param user_id: The value to assign to the user_id property of this CreateConnectionFromObjectStorage. :type user_id: str :param finger_print: The value to assign to the finger_print property of this CreateConnectionFromObjectStorage. :type finger_print: str :param pass_phrase: The value to assign to the pass_phrase property of this CreateConnectionFromObjectStorage. :type pass_phrase: str """<line_sep>self.swagger_types={'model_type':'str' 'key':'str' 'model_version':'str' 'parent_ref':'ParentReference' 'name':'str' 'description':'str' 'object_status':'int' 'identifier':'str' 'connection_properties':'list[ConnectionProperty]' 'registry_metadata':'RegistryMetadata' 'credential_file_content':'str' 'user_id':'str' 'finger_print':'str' 'pass_phrase':'<PASSWORD>'}<line_sep>self.attribute_map={'model_type':'modelType' 'key':'key' 'model_version':'modelVersion' 'parent_ref':'parentRef' 'name':'name' 'description':'description' 'object_status':'objectStatus' 'identifier':'identifier' 'connection_properties':'connectionProperties' 'registry_metadata':'registryMetadata' 'credential_file_content':'credentialFileContent' 'user_id':'userId' 'finger_print':'fingerPrint' 'pass_phrase':'<PASSWORD>'}<line_sep>self._model_type=<none><line_sep>self._key=<none><line_sep>self._model_version=<none><line_sep>self._parent_ref=<none><line_sep>self._name=<none><line_sep>self._description=<none><line_sep>self._object_status=<none><line_sep>self._identifier=<none><line_sep>self._connection_properties=<none><line_sep>self._registry_metadata=<none><line_sep>self._credential_file_content=<none><line_sep>self._user_id=<none><line_sep>self._finger_print=<none><line_sep>self._pass_phrase=<none><line_sep>self._model_type='ORACLE_OBJECT_STORAGE_CONNECTION'<block_end>@property<def_stmt>credential_file_content self<block_start>""" Gets the credential_file_content of this CreateConnectionFromObjectStorage. The credential file content from an Oracle Object Storage wallet. :return: The credential_file_content of this CreateConnectionFromObjectStorage. :rtype: str """<line_sep><return>self._credential_file_content<block_end>@credential_file_content.setter<def_stmt>credential_file_content self credential_file_content<block_start>""" Sets the credential_file_content of this CreateConnectionFromObjectStorage. The credential file content from an Oracle Object Storage wallet. :param credential_file_content: The credential_file_content of this CreateConnectionFromObjectStorage. :type: str """<line_sep>self._credential_file_content=credential_file_content<block_end>@property<def_stmt>user_id self<block_start>""" Gets the user_id of this CreateConnectionFromObjectStorage. The OCI user OCID for the user to connect to. :return: The user_id of this CreateConnectionFromObjectStorage. :rtype: str """<line_sep><return>self._user_id<block_end>@user_id.setter<def_stmt>user_id self user_id<block_start>""" Sets the user_id of this CreateConnectionFromObjectStorage. The OCI user OCID for the user to connect to. :param user_id: The user_id of this CreateConnectionFromObjectStorage. :type: str """<line_sep>self._user_id=user_id<block_end>@property<def_stmt>finger_print self<block_start>""" Gets the finger_print of this CreateConnectionFromObjectStorage. The fingerprint for the user. :return: The finger_print of this CreateConnectionFromObjectStorage. :rtype: str """<line_sep><return>self._finger_print<block_end>@finger_print.setter<def_stmt>finger_print self finger_print<block_start>""" Sets the finger_print of this CreateConnectionFromObjectStorage. The fingerprint for the user. :param finger_print: The finger_print of this CreateConnectionFromObjectStorage. :type: str """<line_sep>self._finger_print=finger_print<block_end>@property<def_stmt>pass_phrase self<block_start>""" Gets the pass_phrase of this CreateConnectionFromObjectStorage. The passphrase for the connection. :return: The pass_phrase of this CreateConnectionFromObjectStorage. :rtype: str """<line_sep><return>self._pass_phrase<block_end>@pass_phrase.setter<def_stmt>pass_phrase self pass_phrase<block_start>""" Sets the pass_phrase of this CreateConnectionFromObjectStorage. The passphrase for the connection. :param pass_phrase: The pass_phrase of this CreateConnectionFromObjectStorage. :type: str """<line_sep>self._pass_phrase=pass_phrase<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
<import_stmt>torch.nn<as>nn<import_from_stmt>. common<def_stmt>build_model args<block_start><return>ResNet(args)<block_end><class_stmt>ResNet(nn.Module)<block_start><def_stmt>__init__ self args in_channels=3 out_channels=3 n_feats=<none> kernel_size=<none> n_resblocks=<none> mean_shift=<true><block_start>super(ResNet self).__init__()<line_sep>self.in_channels=in_channels<line_sep>self.out_channels=out_channels<line_sep>self.n_feats=args.n_feats<if>n_feats<is><none><else>n_feats<line_sep>self.kernel_size=args.kernel_size<if>kernel_size<is><none><else>kernel_size<line_sep>self.n_resblocks=args.n_resblocks<if>n_resblocks<is><none><else>n_resblocks<line_sep>self.mean_shift=mean_shift<line_sep>self.rgb_range=args.rgb_range<line_sep>self.mean=self.rgb_range/2<line_sep>modules=[]<line_sep>modules.append(common.default_conv(self.in_channels self.n_feats self.kernel_size))<for_stmt>_ range(self.n_resblocks)<block_start>modules.append(common.ResBlock(self.n_feats self.kernel_size))<block_end>modules.append(common.default_conv(self.n_feats self.out_channels self.kernel_size))<line_sep>self.body=nn.Sequential(*modules)<block_end><def_stmt>forward self input<block_start><if_stmt>self.mean_shift<block_start>input=input-self.mean<block_end>output=self.body(input)<if_stmt>self.mean_shift<block_start>output=output+self.mean<block_end><return>output<block_end><block_end>
# -*- coding: utf-8 -*- u""" Created on 2015-8-19 @author: cheng.li """<import_from_stmt>PyFin.Analysis.DataProviders.DataProviders DataProvider<line_sep>__all__=['DataProvider']<line_sep>
<import_stmt>asyncio<import_stmt>pytest<import_from_stmt>kopf.engines.sleeping sleep_or_wait<async_keyword><def_stmt>test_the_only_delay_is_awaited timer<block_start><with_stmt>timer<block_start>unslept=<await>asyncio.wait_for(sleep_or_wait(0.10) timeout=1.0)<block_end><assert_stmt>0.10<le>timer.seconds<l>0.11<assert_stmt>unslept<is><none><block_end><async_keyword><def_stmt>test_the_shortest_delay_is_awaited timer<block_start><with_stmt>timer<block_start>unslept=<await>asyncio.wait_for(sleep_or_wait([0.10 0.20]) timeout=1.0)<block_end><assert_stmt>0.10<le>timer.seconds<l>0.11<assert_stmt>unslept<is><none><block_end><async_keyword><def_stmt>test_specific_delays_only_are_awaited timer<block_start><with_stmt>timer<block_start>unslept=<await>asyncio.wait_for(sleep_or_wait([0.10 <none>]) timeout=1.0)<block_end><assert_stmt>0.10<le>timer.seconds<l>0.11<assert_stmt>unslept<is><none><block_end><async_keyword><def_stmt>test_passed_delays_skip_sleeping timer<block_start><with_stmt>timer<block_start>unslept=<await>asyncio.wait_for(sleep_or_wait([0.10 -10]) timeout=1.0)<block_end><assert_stmt>timer.seconds<l>0.01<assert_stmt>unslept<is><none><block_end>@pytest.mark.parametrize('delays' [pytest.param([] id='empty-list') pytest.param([<none>] id='list-of-none') ])<async_keyword><def_stmt>test_no_delays_skip_sleeping timer delays<block_start><with_stmt>timer<block_start>unslept=<await>asyncio.wait_for(sleep_or_wait(delays) timeout=1.0)<block_end><assert_stmt>timer.seconds<l>0.01<assert_stmt>unslept<is><none><block_end><async_keyword><def_stmt>test_by_event_set_before_time_comes timer<block_start>event=asyncio.Event()<line_sep>asyncio.get_running_loop().call_later(0.07 event.set)<with_stmt>timer<block_start>unslept=<await>asyncio.wait_for(sleep_or_wait(0.10 event) timeout=1.0)<block_end><assert_stmt>unslept<is><not><none><assert_stmt>0.02<le>unslept<le>0.04<assert_stmt>0.06<le>timer.seconds<le>0.08<block_end><async_keyword><def_stmt>test_with_zero_time_and_event_initially_cleared timer<block_start>event=asyncio.Event()<line_sep>event.clear()<with_stmt>timer<block_start>unslept=<await>asyncio.wait_for(sleep_or_wait(0 event) timeout=1.0)<block_end><assert_stmt>timer.seconds<le>0.01<assert_stmt>unslept<is><none><block_end><async_keyword><def_stmt>test_with_zero_time_and_event_initially_set timer<block_start>event=asyncio.Event()<line_sep>event.set()<with_stmt>timer<block_start>unslept=<await>asyncio.wait_for(sleep_or_wait(0 event) timeout=1.0)<block_end><assert_stmt>timer.seconds<le>0.01<assert_stmt><not>unslept<block_end># 0/None; undefined for such case: both goals reached.
# Code Listing #1 """ Glossary models - Showing django admin view """<import_from_stmt>django.db models<class_stmt>GlossaryTerm(models.Model)<block_start>""" Model for describing a glossary word (term) """<line_sep>term=models.CharField(max_length=1024)<line_sep>meaning=models.CharField(max_length=1024)<line_sep>meaning_html=models.CharField('Meaning with HTML markup' max_length=4096 null=<true> blank=<true>)<line_sep>example=models.CharField(max_length=4096 null=<true> blank=<true>)<line_sep># can be a ManyToManyField? domains=models.CharField(max_length=128 null=<true> blank=<true>)<line_sep>notes=models.CharField(max_length=2048 null=<true> blank=<true>)<line_sep>url=models.CharField('URL' max_length=2048 null=<true> blank=<true>)<line_sep>name=models.ForeignKey('GlossarySource' verbose_name='Source' blank=<true>)<def_stmt>__unicode__ self<block_start><return>self.term<block_end><class_stmt>Meta<block_start>unique_together=('term' 'meaning' 'url')<block_end><block_end><class_stmt>GlossarySource(models.Model)<block_start>""" Model for describing a glossary source """<line_sep>name=models.CharField(max_length=256 primary_key=<true>)<line_sep>url=models.CharField(max_length=2048 blank=<true>)<line_sep>description=models.CharField(max_length=512)<line_sep># can be a ManyToManyField? tags=models.CharField(max_length=1024 blank=<true>)<line_sep>mainlang=models.CharField(max_length=8 default='en_US')<line_sep>singlepage=models.BooleanField(default=<true>)<line_sep>translations=models.BooleanField(default=<false>)<def_stmt>__unicode__ self<block_start><return>self.name<block_end><block_end>
<import_from_stmt>django forms<import_from_stmt>zentral.core.probes.forms BaseCreateProbeForm<import_from_stmt>zentral.utils.forms CommaSeparatedQuotedStringField<import_from_stmt>.models Configuration Enrollment PrincipalUserDetectionSource<import_from_stmt>.probes MunkiInstallProbe<class_stmt>PrincipalUserDetectionSourceWidget(forms.CheckboxSelectMultiple)<block_start><def_stmt>__init__ self attrs=<none> choices=()<block_start>super().__init__(attrs choices=PrincipalUserDetectionSource.choices())<block_end><def_stmt>format_value self value<block_start><if_stmt>isinstance(value str)<and>value<block_start>value=[v.strip()<for>v value.split(",")]<block_end><return>super().format_value(value)<block_end><block_end><class_stmt>ConfigurationForm(forms.ModelForm)<block_start><class_stmt>Meta<block_start>model=Configuration<line_sep>fields="__all__"<line_sep>widgets={"principal_user_detection_sources":PrincipalUserDetectionSourceWidget}<block_end><block_end><class_stmt>EnrollmentForm(forms.ModelForm)<block_start><class_stmt>Meta<block_start>model=Enrollment<line_sep>fields="__all__"<block_end><def_stmt>__init__ self *args **kwargs<block_start>self.configuration=kwargs.pop("configuration" <none>)<line_sep>kwargs.pop("enrollment_only" <none>)<line_sep>kwargs.pop("standalone" <none>)<line_sep>super().__init__(*args **kwargs)<if_stmt>self.configuration<block_start>self.fields["configuration"].widget=forms.HiddenInput()<block_end><block_end><block_end><class_stmt>UpdateInstallProbeForm(forms.Form)<block_start>installed_item_names=CommaSeparatedQuotedStringField(help_text="Comma separated names of the installed items" required=<false>)<line_sep>install_types=forms.ChoiceField(choices=(('install,removal' 'install & removal') ('install' 'install') ('removal' 'removal')) initial='install' widget=forms.RadioSelect)<line_sep>unattended_installs=forms.ChoiceField(choices=(('' 'yes & no') ('1' 'yes') ('0' 'no')) widget=forms.RadioSelect required=<false>)<def_stmt>get_body self<block_start>cleaned_data=self.cleaned_data<line_sep># install types body={'install_types':sorted(cleaned_data['install_types'].split(','))}<line_sep># installed item names installed_item_names=cleaned_data.get('installed_item_names')<if_stmt>installed_item_names<block_start>body['installed_item_names']=installed_item_names<block_end># unattended installs <try_stmt><block_start>unattended_installs=bool(int(cleaned_data.get('unattended_installs')))<block_end><except_stmt>ValueError<block_start><pass><block_end><else_stmt><block_start>body['unattended_installs']=unattended_installs<block_end><return>body<block_end>@staticmethod<def_stmt>get_probe_initial probe<block_start>initial={'installed_item_names':sorted(probe.installed_item_names) 'install_types':','.join(sorted(probe.install_types))}<if_stmt>probe.unattended_installs<is><none><block_start>initial['unattended_installs']=''<block_end><else_stmt><block_start>initial['unattended_installs']=str(int(probe.unattended_installs))<block_end><return>initial<block_end><block_end><class_stmt>CreateInstallProbeForm(BaseCreateProbeForm UpdateInstallProbeForm)<block_start>model=MunkiInstallProbe<line_sep>field_order=("name" "installed_item_names" "unattended_yes" "unattended_no")<block_end>
"""Result class definitions."""<class_stmt>_WriteResult(object)<block_start>"""Base class for write result classes."""<def_stmt>__init__ self acknowledged=<true><block_start>self.acknowledged=acknowledged<block_end><block_end># here only to PyMongo compat <class_stmt>InsertOneResult(_WriteResult)<block_start>"""The return type for :meth:`~tinymongo.TinyMongoCollection.insert_one`. """<line_sep>__slots__=("__inserted_id" "__acknowledged" "__eid")<def_stmt>__init__ self eid inserted_id acknowledged=<true><block_start>self.__eid=eid<line_sep>self.__inserted_id=inserted_id<line_sep>super(InsertOneResult self).__init__(acknowledged)<block_end>@property<def_stmt>inserted_id self<block_start>"""The inserted document's _id."""<line_sep><return>self.__inserted_id<block_end>@property<def_stmt>eid self<block_start>"""The inserted document's tinyDB eid."""<line_sep><return>self.__eid<block_end><block_end><class_stmt>InsertManyResult(_WriteResult)<block_start>"""The return type for :meth:`~tinymongo.TinyMongoCollection.insert_many`. """<line_sep>__slots__=("__inserted_ids" "__acknowledged" "__eids")<def_stmt>__init__ self eids inserted_ids acknowledged=<true><block_start>self.__eids=eids<line_sep>self.__inserted_ids=inserted_ids<line_sep>super(InsertManyResult self).__init__(acknowledged)<block_end>@property<def_stmt>inserted_ids self<block_start>"""A list of _ids of the inserted documents, in the order provided."""<line_sep><return>self.__inserted_ids<block_end>@property<def_stmt>eids self<block_start>"""A list of _ids of the inserted documents, in the order provided."""<line_sep><return>self.__eids<block_end><block_end><class_stmt>UpdateResult(_WriteResult)<block_start>"""The return type for :meth:`~tinymongo.TinyMongoCollection.update_one`, :meth:`~tinymongo.TinyMongoCollection.update_many`, and :meth:`~tinymongo.TinyMongoCollection.replace_one`. """<line_sep>__slots__=("__raw_result" "__acknowledged")<def_stmt>__init__ self raw_result acknowledged=<true><block_start>self.__raw_result=raw_result<line_sep>super(UpdateResult self).__init__(acknowledged)<block_end>@property<def_stmt>raw_result self<block_start>"""The raw result document returned by the server."""<line_sep><return>self.__raw_result<block_end>@property<def_stmt>matched_count self<block_start>"""The number of documents matched for this update."""<line_sep># TODO: Implement this <block_end>@property<def_stmt>modified_count self<block_start>"""The number of documents modified. """<line_sep># TODO: Implement this <block_end>@property<def_stmt>upserted_id self<block_start>"""The _id of the inserted document if an upsert took place. Otherwise ``None``. """<line_sep># TODO: Implement this <block_end><block_end><class_stmt>DeleteResult(_WriteResult)<block_start>"""The return type for :meth:`~tinymongo.TinyMongoCollection.delete_one` and :meth:`~tinymongo.TinyMongoCollection.delete_many`"""<line_sep>__slots__=("__raw_result" "__acknowledged")<def_stmt>__init__ self raw_result acknowledged=<true><block_start>self.__raw_result=raw_result<line_sep>super(DeleteResult self).__init__(acknowledged)<block_end>@property<def_stmt>raw_result self<block_start>"""The raw result document returned by the server."""<line_sep><return>self.__raw_result<block_end>@property<def_stmt>deleted_count self<block_start>"""The number of documents deleted."""<if_stmt>isinstance(self.raw_result list)<block_start><return>len(self.raw_result)<block_end><else_stmt><block_start><return>self.raw_result<block_end><block_end><block_end>
# Code derived from https://github.com/openai/improved-gan/tree/master/inception_score <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os.path<import_stmt>sys<import_stmt>tarfile<import_stmt>numpy<as>np<import_from_stmt>six.moves urllib<import_stmt>tensorflow<as>tf<import_stmt>glob<import_stmt>scipy.misc<import_stmt>math<import_stmt>sys<import_stmt>chainer<import_from_stmt>chainer functions<as>F<line_sep>MODEL_DIR='/tmp/imagenet'<line_sep>DATA_URL='http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'<line_sep>softmax=<none><line_sep>last_layer=<none><line_sep>config=tf.ConfigProto()<line_sep>config.gpu_options.per_process_gpu_memory_fraction=0.3<def_stmt>inception_forward images layer<block_start><assert_stmt>(type(images[0])<eq>np.ndarray)<assert_stmt>(len(images[0].shape)<eq>3)<assert_stmt>(np.max(images[0])<g>10)<assert_stmt>(np.min(images[0])<ge>0.0)<line_sep>bs=100<line_sep>images=images.transpose(0 2 3 1)<with_stmt>tf.Session(config=config)<as>sess<block_start>preds=[]<line_sep>n_batches=int(math.ceil(float(len(images))/float(bs)))<for_stmt>i range(n_batches)<block_start>sys.stdout.write(".")<line_sep>sys.stdout.flush()<line_sep>inp=images[(i<times>bs):min((i+1)<times>bs len(images))]<line_sep>pred=sess.run(layer {'ExpandDims:0':inp})<line_sep>preds.append(pred)<block_end>preds=np.concatenate(preds 0)<block_end><return>preds<block_end><def_stmt>get_mean_and_cov images<block_start>before_preds=inception_forward(images last_layer)<line_sep>m=np.mean(before_preds 0)<line_sep>cov=np.cov(before_preds rowvar=<false>)<line_sep><return>m cov<block_end><def_stmt>get_fid images ref_stats=<none> images_ref=<none> splits=10<block_start>before_preds=inception_forward(images last_layer)<if_stmt>ref_stats<is><none><block_start><if_stmt>images_ref<is><none><block_start><raise>ValueError('images_ref should be provided if ref_stats is None')<block_end>m_ref,cov_ref=get_mean_and_cov(images_ref)<block_end>fids=[]<for_stmt>i range(splits)<block_start>part=before_preds[(i<times>before_preds.shape[0]<floordiv>splits):((i+1)<times>before_preds.shape[0]<floordiv>splits) :]<line_sep>m_gen=np.mean(part 0)<line_sep>cov_gen=np.cov(part rowvar=<false>)<line_sep>fid=np.sum((m_ref-m_gen)<power>2)+np.trace(cov_ref+cov_gen-2<times>scipy.linalg.sqrtm(np.dot(cov_ref cov_gen)))<line_sep>fids.append(fid)<block_end><return>np.mean(fids) np.std(fids)<block_end># Call this function with list of images. Each of elements should be a # numpy array with values ranging from 0 to 255. <def_stmt>get_inception_score images splits=10<block_start>preds=inception_forward(images softmax)<line_sep>scores=[]<for_stmt>i range(splits)<block_start>part=preds[(i<times>preds.shape[0]<floordiv>splits):((i+1)<times>preds.shape[0]<floordiv>splits) :]<line_sep>kl=part<times>(np.log(part)-np.log(np.expand_dims(np.mean(part 0) 0)))<line_sep>kl=np.mean(np.sum(kl 1))<line_sep>scores.append(np.exp(kl))<block_end><return>np.mean(scores) np.std(scores)<block_end># Call this function with list of images. Each of elements should be a # numpy array with values ranging from 0 to 255. <def_stmt>get_inception_accuracy images labels<block_start>batch_size=100<if_stmt>isinstance(images (list tuple))<block_start>ims_list=images<line_sep>ys_list=[]<for_stmt>ims ims_list<block_start>n,_,_,_=ims.shape<line_sep>n_batches=int(math.ceil(float(n)/float(batch_size)))<line_sep>print('batch_size:{}, n_ims{}, n_batches{}'.format(batch_size n n_batches))<line_sep>print('Calculating inception accuracy...')<line_sep>ys=inception_forward(ims softmax)[: 1:1001]<line_sep>ys_list.append(ys)<block_end>ys=sum(ys_list)/len(ys_list)<block_end><else_stmt><block_start>n,_,_,_,=images.shape<line_sep>n_batches=int(math.ceil(float(n)/float(batch_size)))<line_sep>print('batch_size:{}, n_ims{}, n_batches{}'.format(batch_size n n_batches))<line_sep>print('Calculating inception accuracy...')<line_sep>ys=inception_forward(images softmax)[: 1:1001]<block_end><return>F.accuracy(ys labels).data<block_end># This function is called automatically. <def_stmt>_init_inception <block_start><global>softmax<line_sep><global>last_layer<if_stmt><not>os.path.exists(MODEL_DIR)<block_start>os.makedirs(MODEL_DIR)<block_end>filename=DATA_URL.split('/')[-1]<line_sep>filepath=os.path.join(MODEL_DIR filename)<if_stmt><not>os.path.exists(filepath)<block_start><def_stmt>_progress count block_size total_size<block_start>sys.stdout.write('\r>> Downloading %s %.1f%%'%(filename float(count<times>block_size)/float(total_size)<times>100.0))<line_sep>sys.stdout.flush()<block_end>filepath,_=urllib.request.urlretrieve(DATA_URL filepath _progress)<line_sep>print()<line_sep>statinfo=os.stat(filepath)<line_sep>print('Succesfully downloaded' filename statinfo.st_size 'bytes.')<block_end>tarfile.open(filepath 'r:gz').extractall(MODEL_DIR)<with_stmt>tf.gfile.FastGFile(os.path.join(MODEL_DIR 'classify_image_graph_def.pb') 'rb')<as>f<block_start>graph_def=tf.GraphDef()<line_sep>graph_def.ParseFromString(f.read())<line_sep>_=tf.import_graph_def(graph_def name='')<block_end># Works with an arbitrary minibatch size. <with_stmt>tf.Session(config=config)<as>sess<block_start>pool3=sess.graph.get_tensor_by_name('pool_3:0')<line_sep>ops=pool3.graph.get_operations()<for_stmt>op_idx,op enumerate(ops)<block_start><for_stmt>o op.outputs<block_start>shape=o.get_shape()<line_sep>shape=[s.value<for>s shape]<line_sep>new_shape=[]<for_stmt>j,s enumerate(shape)<block_start><if_stmt>s<eq>1<and>j<eq>0<block_start>new_shape.append(<none>)<block_end><else_stmt><block_start>new_shape.append(s)<block_end><block_end>o._shape=tf.TensorShape(new_shape)<block_end><block_end>w=sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]<line_sep>last_layer=tf.squeeze(pool3)<line_sep>logits=tf.matmul(last_layer w)<line_sep>softmax=tf.nn.softmax(logits)<block_end><block_end><if_stmt>softmax<is><none><block_start>_init_inception()<block_end>
<import_from_stmt>gooey.gui.components.filtering.prefix_filter PrefixTokenizers<def_stmt>_include_layout_docs f<block_start>""" Combines the layout_options docsstring with the wrapped function's doc string. """<line_sep>f.__doc__=(f.__doc__<or>'')+LayoutOptions.__doc__<line_sep><return>f<block_end><def_stmt>_include_global_option_docs f<block_start>""" Combines docstrings for options available to all widget types. """<line_sep>_doc=""":param initial_value: Sets the initial value in the UI. """<line_sep>f.__doc__=(f.__doc__<or>'')+_doc<line_sep><return>f<block_end><def_stmt>_include_chooser_msg_wildcard_docs f<block_start>""" Combines the basic Chooser options (wildard, message) docsstring with the wrapped function's doc string. """<line_sep>_doc=""":param wildcard: Sets the wildcard, which can contain multiple file types, for example: "BMP files (.bmp)|.bmp|GIF files (.gif)|.gif" :param message: Sets the message that will be displayed on the dialog. """<line_sep>f.__doc__=(f.__doc__<or>'')+_doc<line_sep><return>f<block_end><def_stmt>_include_choose_dir_file_docs f<block_start>""" Combines the basic Chooser options (wildard, message) docsstring with the wrapped function's doc string. """<line_sep>_doc=""":param default_dir: The default directory selected when the dialog spawns :param default_file: The default filename used in the dialog """<line_sep>f.__doc__=(f.__doc__<or>'')+_doc<line_sep><return>f<block_end><def_stmt>LayoutOptions label_color=<none> label_bg_color=<none> help_color=<none> help_bg_color=<none> error_color=<none> error_bg_color=<none> show_label=<true> show_help=<true> visible=<true> full_width=<false><block_start>""" Layout Options: --------------- Color options can be passed either as a hex string ('#ff0000') or as a collection of RGB values (e.g. `[255, 0, 0]` or `(255, 0, 0)`) :param label_color: The foreground color of the label text :param label_bg_color: The background color of the label text. :param help_color: The foreground color of the help text. :param help_bg_color: The background color of the help text. :param error_color: The foreground color of the error text (when visible). :param error_bg_color: The background color of the error text (when visible). :param show_label: Toggles whether or not to display the label text :param show_help: Toggles whether or not to display the help text :param visible: Hides the entire widget when False. Note: the widget is still present in the UI and will still send along any default values that have been provided in code. This option is here for when you want to hide certain advanced / dangerous inputs from your GUI users. :param full_width: This is a layout hint for this widget. When True the widget will fill the entire available space within a given row. Otherwise, it will be sized based on the column rules provided elsewhere. """<line_sep><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>TextField initial_value=<none> validator=<none> **layout_options<block_start><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>PasswordField initial_value=<none> validator=<none> **layout_options<block_start><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>IntegerField initial_value=<none> validator=<none> min=0 max=100 increment=1 **layout_options<block_start>""" :param min: The minimum value allowed :param max: The maximum value allowed :param increment: The step size of the spinner """<line_sep><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>Slider initial_value=<none> validator=<none> min=0 max=100 increment=1 **layout_options<block_start>""" :param min: The minimum value allowed :param max: The maximum value allowed :param increment: The step size of the slider """<line_sep><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>DecimalField validator=<none> initial_value=<none> min=0.0 max=1.0 increment=0.01 precision=2 **layout_options<block_start>""" :param min: The minimum value allowed :param max: The maximum value allowed :param increment: The step size of the spinner :param precision: The precision of the decimal (0-20) """<line_sep><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>TextArea initial_value=<none> height=<none> readonly=<false> validator=<none> **layout_options<block_start>""" :param height: The height of the TextArea. :param readonly: Controls whether or not user's may modify the contents """<line_sep><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>RichTextConsole **layout_options<block_start><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>ListBox initial_value=<none> height=<none> **layout_options<block_start>""" :param height: The height of the ListBox """<line_sep><return>_clean(locals())<block_end># TODO: what are this guy's layout options..? <def_stmt>MutexGroup initial_selection=<none> title=<none> **layout_options<block_start>""" :param initial_selection: The index of the option which should be initially selected. :param title: Adds the supplied title above the RadioGroup options (when present) """<line_sep><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>Dropdown initial_value=<none> **layout_options<block_start><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>Counter initial_value=<none> **layout_options<block_start><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>CheckBox initial_value=<none> **layout_options<block_start><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>BlockCheckBox initial_value=<none> checkbox_label=<none> **layout_options<block_start><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs<def_stmt>FilterableDropdown placeholder=<none> empty_message=<none> max_size=80 search_strategy=<none> initial_value=<none> **layout_options<block_start>""" :param placeholder: Text to display when the user has provided no input :param empty_message: Text to display if the user's query doesn't match anything :param max_size: maximum height of the dropdown :param search_strategy: see: PrefixSearchStrategy """<line_sep><return>_clean(locals())<block_end><def_stmt>PrefixSearchStrategy choice_tokenizer=PrefixTokenizers.WORDS input_tokenizer=PrefixTokenizers.REGEX('\s') ignore_case=<true> operator='AND' index_suffix=<false><block_start>""" :param choice_tokenizer: See: PrefixTokenizers - sets the tokenization strategy for the `choices` :param input_tokenizer: See: PrefixTokenizers sets how the users's `input` get tokenized. :param ignore_case: Controls whether or not to honor case while searching :param operator: see: `OperatorType` - controls whether or not individual search tokens get `AND`ed or `OR`d together when evaluating a match. :param index_suffix: When enabled, generates a suffix-tree to enable efficient partial-matching against any of the tokens. """<line_sep><return>{**_clean(locals()) 'type':'PrefixFilter'}<block_end>@_include_layout_docs@_include_global_option_docs@_include_choose_dir_file_docs@_include_chooser_msg_wildcard_docs<def_stmt>FileChooser wildcard=<none> default_dir=<none> default_file=<none> message=<none> initial_value=<none> **layout_options<block_start><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs@_include_chooser_msg_wildcard_docs<def_stmt>DirectoryChooser wildcard=<none> default_path=<none> message=<none> initial_value=<none> **layout_options<block_start>""" :param default_path: The default path selected when the dialog spawns """<line_sep><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs@_include_choose_dir_file_docs@_include_chooser_msg_wildcard_docs<def_stmt>FileSaver wildcard=<none> default_dir=<none> default_file=<none> message=<none> initial_value=<none> **layout_options<block_start><return>_clean(locals())<block_end>@_include_layout_docs@_include_global_option_docs@_include_choose_dir_file_docs@_include_chooser_msg_wildcard_docs<def_stmt>MultiFileSaver wildcard=<none> default_dir=<none> default_file=<none> message=<none> initial_value=<none> **layout_options<block_start><return>_clean(locals())<block_end><def_stmt>ExpressionValidator test=<none> message=<none><block_start>""" Creates the data for a basic expression validator. Your test function can be made up of any valid Python expression. It receives the variable user_input as an argument against which to perform its validation. Note that all values coming from Gooey are in the form of a string, so you'll have to cast as needed in order to perform your validation. """<line_sep><return>{**_clean(locals()) 'type':'ExpressionValidator'}<block_end><def_stmt>RegexValidator test=<none> message=<none><block_start>""" Creates the data for a basic RegexValidator. :param test: the regex expression. This should be the expression directly (i.e. `test='\d+'`). Gooey will test that the user's input satisfies this expression. :param message: The message to display if the input doesn't match the regex """<line_sep><return>{**_clean(locals()) 'type':'RegexValidator'}<block_end><def_stmt>ArgumentGroup show_border=<false> show_underline=<true> label_color=<none> columns=<none> margin_top=<none><block_start>""" :param show_border: When True a labeled border will surround all widgets added to this group. :param show_underline: Controls whether or not to display the underline when using the default border style :param label_color: The foreground color for the group name :param columns: Controls the number of widgets on each row :param margin_top: specifies the top margin in pixels for this group """<line_sep><return>_clean(locals())<block_end><def_stmt>_clean options<block_start>cleaned={k:v<for>k,v options.items()<if>v<is><not><none><and>k<ne>"layout_options"}<line_sep><return>{**options.get('layout_options' {}) **cleaned}<block_end>
# -*- coding: utf-8 -*- # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") # # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>megengine<as>mge<import_stmt>megengine.distributed<as>dist<import_from_stmt>megengine tensor<import_from_stmt>megengine.distributed.functional all_gather all_to_all gather reduce_scatter_sum scatter <import_from_stmt>megengine.jit trace<line_sep>@pytest.mark.require_ngpu(2)@pytest.mark.parametrize("shape" [(2 3) (8 10) (99 77) (2 2 2 2)] ids=str)@pytest.mark.parametrize("symbolic" [<false> <true>] ids=str)@pytest.mark.parametrize("axis" [0 1] ids=str)@pytest.mark.isolated_distributed<def_stmt>test_all_gather shape symbolic axis<block_start>@dist.launcher(n_gpus=2)<def_stmt>worker data expect<block_start>rank=dist.get_rank()<line_sep>inp=tensor(data[rank])<def_stmt>func <block_start>output=all_gather(inp axis=axis)<line_sep><return>output<block_end>func=trace(symbolic=symbolic)(func)<line_sep>output=func()<assert_stmt>np.allclose(output.numpy() expect[rank])<block_end>x=np.random.random_sample(shape).astype("float32")<line_sep>y=np.random.random_sample(shape).astype("float32")<line_sep>z=np.concatenate((x y) axis=axis)<line_sep>data=(x y)<line_sep>expect=(z z)<line_sep>worker(data expect)<block_end>@pytest.mark.require_ngpu(2)@pytest.mark.parametrize("shape,symbolic" [((2 4 6 8) <false>) ((2 4 6 8) <true>)] ids=str)@pytest.mark.parametrize("axis" [1 0 2 3] ids=str)@pytest.mark.isolated_distributed<def_stmt>test_reduce_scatter_sum shape symbolic axis<block_start>@dist.launcher(n_gpus=2)<def_stmt>worker data expect<block_start>rank=dist.get_rank()<line_sep>inp=tensor(data[rank])<def_stmt>func <block_start>output=reduce_scatter_sum(inp axis=axis)<line_sep><return>output<block_end>func=trace(symbolic=symbolic)(func)<line_sep>output=func()<assert_stmt>np.allclose(output.numpy() expect[rank])<block_end>x=np.random.random_sample(shape).astype("float32")<line_sep>y=np.random.random_sample(shape).astype("float32")<line_sep>z=x+y<line_sep>data=(x y)<line_sep>z=np.split(z 2 axis=axis)<line_sep>z=np.concatenate(z axis=0)<line_sep>expect=(z[:z.shape[0]<floordiv>2] z[z.shape[0]<floordiv>2:])<line_sep>worker(data expect)<block_end>@pytest.mark.require_ngpu(2)@pytest.mark.parametrize("shape,symbolic" [((2 4 6 8) <true>) ((2 4 6 8) <false>)] ids=str)@pytest.mark.parametrize("axis" [1 0 2 3] ids=str)@pytest.mark.isolated_distributed<def_stmt>test_scatter shape symbolic axis<block_start>@dist.launcher(n_gpus=2)<def_stmt>worker data expect<block_start>rank=dist.get_rank()<line_sep>inp=tensor(data[rank])<def_stmt>func <block_start>output=scatter(inp axis=axis)<line_sep><return>output<block_end>func=trace(symbolic=symbolic)(func)<line_sep>output=func()<assert_stmt>np.allclose(output.numpy() expect[rank])<block_end>x=np.random.random_sample(shape).astype("float32")<line_sep>y=x+1<line_sep>data=(x y)<line_sep>_x=np.split(x 2 axis=axis)<line_sep>_x=np.concatenate(_x axis=0)<line_sep>expect=(_x[:_x.shape[0]<floordiv>2] _x[_x.shape[0]<floordiv>2:])<line_sep>worker(data expect)<block_end>@pytest.mark.require_ngpu(2)@pytest.mark.parametrize("shape" [(2 4 6 8)] ids=str)@pytest.mark.parametrize("symbolic" [<false> <true>] ids=str)@pytest.mark.parametrize("split_axis,concat_axis" [(0 1) (1 0) (2 0) (0 2) (2 3)] ids=str)@pytest.mark.isolated_distributed<def_stmt>test_all_to_all shape symbolic split_axis concat_axis<block_start>@dist.launcher(n_gpus=2)<def_stmt>worker data<block_start>rank=dist.get_rank()<line_sep>inp=tensor(data[rank])<def_stmt>func <block_start>all_to_all_output=all_to_all(inp split_axis=split_axis concat_axis=concat_axis)<line_sep>gather_C=gather(inp axis=concat_axis)<line_sep>gather_B=gather(all_to_all_output axis=split_axis)<if_stmt>rank<eq>0<block_start><return>gather_B gather_C<block_end><return>all_to_all_output<block_end>func=trace(symbolic=symbolic)(func)<line_sep>ret=func()<if_stmt>rank<eq>0<block_start><assert_stmt>np.allclose(ret[0] ret[1])<block_end><block_end>x=np.random.random_sample(shape).astype("float32")<line_sep>y=np.random.random_sample(shape).astype("float32")<line_sep>data=(x y)<line_sep>worker(data)<block_end>
# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_from_stmt>oslo_log log<as>logging<import_from_stmt>oslo_utils importutils<line_sep>LOG=logging.getLogger(__name__)<class_stmt>VFS(object)<block_start>"""Interface for manipulating disk image. The VFS class defines an interface for manipulating files within a virtual disk image filesystem. This allows file injection code to avoid the assumption that the virtual disk image can be mounted in the host filesystem. All paths provided to the APIs in this class should be relative to the root of the virtual disk image filesystem. Subclasses will translate paths as required by their implementation. """<line_sep># Class level flag to indicate whether we can consider # that guestfs is ready to be used. guestfs_ready=<false><line_sep>@staticmethod<def_stmt>instance_for_image image partition<block_start>"""Get a VFS instance for the image :param image: instance of nova.virt.image.model.Image :param partition: the partition number to access """<line_sep>LOG.debug("Instance for image image=%(image)s "<concat>"partition=%(partition)s" {'image':image 'partition':partition})<line_sep>LOG.debug("Using primary VFSGuestFS")<line_sep>vfs=importutils.import_object("nova.virt.disk.vfs.guestfs.VFSGuestFS" image partition)<if_stmt><not>VFS.guestfs_ready# Inspect for capabilities and keep # track of the result only if succeeded. <block_start>vfs.inspect_capabilities()<line_sep>VFS.guestfs_ready=<true><block_end><return>vfs<block_end><def_stmt>__init__ self image partition<block_start>"""Create a new local VFS instance :param image: instance of nova.virt.image.model.Image :param partition: the partition number to access """<line_sep>self.image=image<line_sep>self.partition=partition<block_end><def_stmt>setup self mount=<true><block_start>"""Performs any one-time setup. Perform any one-time setup tasks to make the virtual filesystem available to future API calls. """<line_sep><pass><block_end><def_stmt>teardown self<block_start>"""Releases all resources initialized in the setup method."""<line_sep><pass><block_end><def_stmt>make_path self path<block_start>"""Creates a directory @path. Create a directory @path, including all intermedia path components if they do not already exist. """<line_sep><pass><block_end><def_stmt>append_file self path content<block_start>"""Appends @content to the end of the file. Append @content to the end of the file identified by @path, creating the file if it does not already exist. """<line_sep><pass><block_end><def_stmt>replace_file self path content<block_start>"""Replaces contents of the file. Replace the entire contents of the file identified by @path, with @content, creating the file if it does not already exist. """<line_sep><pass><block_end><def_stmt>read_file self path<block_start>"""Returns the entire contents of the file identified by @path."""<line_sep><pass><block_end><def_stmt>has_file self path<block_start>"""Returns a True if the file identified by @path exists."""<line_sep><pass><block_end><def_stmt>set_permissions self path mode<block_start>"""Sets the permissions on the file. Set the permissions on the file identified by @path to @mode. The file must exist prior to this call. """<line_sep><pass><block_end><def_stmt>set_ownership self path user group<block_start>"""Sets the ownership on the file. Set the ownership on the file identified by @path to the username @user and groupname @group. Either of @user or @group may be None, in which case the current ownership will be left unchanged. The ownership must be passed in string form, allowing subclasses to translate to uid/gid form as required. The file must exist prior to this call. """<line_sep><pass><block_end><def_stmt>get_image_fs self<block_start>"""Returns the filesystem type or an empty string. Determine the filesystem type whether the disk image is partition less. """<line_sep><pass><block_end><block_end>
# -*- coding: utf-8 -*- # Copyright 2016 Yelp Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_future_stmt> absolute_import<import_from_future_stmt> unicode_literals<import_stmt>optparse<import_stmt>re<import_stmt>simplejson<as>json<import_stmt>yelp_batch<import_from_stmt>cached_property cached_property<import_from_stmt>yelp_batch.batch batch_command_line_options<import_from_stmt>yelp_batch.batch os<import_from_stmt>data_pipeline.tools._glob_util get_file_paths_from_glob_patterns<line_sep># See https://regex101.com/r/kC0kZ1/2 CREATE_TABLE_REGEX=re.compile('^create(\s*table)?\s*((.+)\.)?(\w+)\s*\(?')<line_sep># See https://regex101.com/r/zG9kV1/2 PRIMARY_KEY_REGEX=re.compile('^primary\s*key\s*\((.+)?\)')<line_sep># See https://regex101.com/r/kD8iN5/17 FIELD_LINE_REGEX=re.compile('^(\w+)\s*(\w+)\s*(\(\s*(\d+|\d+\s*\,\s*\d+)\s*\))?\s*(?P<pk>primary\s+key)?\s*(not\s+null|null)?\s*((default)\s+(\"|\')?(null|false|true|\d+\.\d+|\d+|[\w\s]*)(\"|\')?)?(\"|\')?.*,'# noqa )<line_sep># See https://regex101.com/r/bN3xL0 START_FIELDS_REGEX=re.compile('^.*\(')<line_sep># See https://regex101.com/r/bR7bH2 STOP_FIELDS_REGEX=re.compile('^\)')<line_sep>REDSHIFT_SQL_TO_AVRO_TYPE_MAPPING={'bigint':'long' 'bool':'boolean' 'boolean':'boolean' 'bpchar':'string' 'char':'string' 'character':'string' 'date':'string' 'decimal':'double' 'numeric':'double' 'double':'double' 'float':'double' 'float4':'float' 'float8':'double' 'int':'int' 'int2':'int' 'int4':'int' 'int8':'long' 'integer':'int' 'nchar':'string' 'nvarchar':'string' 'real':'float' 'smallint':'int' 'text':'string' 'timestamp':'long' 'varchar':'string'}<def_stmt>_sanitize_line line<block_start><return>line.strip().lower()<block_end><class_stmt>RedshiftFieldLineToAvroFieldConverter(object)<block_start>""" Converter for a single redshift column definition line in a `CREATE TABLE` statement. This should eventually be replaced by DATAPIPE-353. """<def_stmt>__init__ self field_line pkeys<block_start>""" Args: field_line(string): Content of a column definition line from a redshift *.sql file pkeys([string]): A list of the primary keys, used for determining the meta attribute of "pkey" """<line_sep>self.field_line=_sanitize_line(field_line)<line_sep>self.pkeys=pkeys<block_end>@cached_property<def_stmt>avro_field self<block_start>field={"name":self.name "type":self.avro_type "doc":""}<line_sep>field.update(self.avro_meta_attributes)<line_sep><return>field<block_end>@cached_property<def_stmt>name self<block_start><return>self._regex_matcher.group(1)<block_end>@cached_property<def_stmt>avro_core_type self<block_start><return>REDSHIFT_SQL_TO_AVRO_TYPE_MAPPING[self.sql_type]<block_end>@cached_property<def_stmt>avro_type self<block_start>avro_type=self.avro_core_type<if_stmt>self.nullable<block_start><if_stmt>self.default_null<block_start><return>['null' avro_type]<block_end><else_stmt><block_start><return>[avro_type 'null']<block_end><block_end><else_stmt><block_start><return>avro_type<block_end><block_end>@cached_property<def_stmt>sql_type self<block_start><return>self._regex_matcher.group(2)<block_end>@cached_property<def_stmt>sql_default self<block_start>""" Return the default value defined for the column, if any. Note: This will succeed only if the 'default' follows the 'NOT NULL'/ 'NULL' on the column line. I've reached the limits of what black magic I'm willing to deal with in this regex and DATAPIPE-353 should be replacing this eventually anyway. :) """<line_sep><return>self._regex_matcher.group(10)<block_end>@cached_property<def_stmt>nullable self<block_start>nullable_str=self._regex_matcher.group(6)<line_sep><return><not>(nullable_str<and>re.search('^(not\s+null)' nullable_str))<block_end>@cached_property<def_stmt>default_null self<block_start><return>self.nullable<and>self.sql_default<in>['null' <none>]<block_end>@cached_property<def_stmt>avro_meta_attributes self<block_start>meta={}<line_sep>field_name=self.name<for_stmt>index,pkey_name enumerate(self.pkeys)<block_start><if_stmt>pkey_name<eq>field_name<block_start>meta['pkey']=index+1<line_sep><break><block_end><block_end><if_stmt>self.sql_type<in>['varchar' 'nvarchar' 'text']<block_start>meta['maxlen']=self.sql_type_width<block_end><if_stmt>self.sql_type<in>['char' 'character' 'nchar' 'bpchar']<block_start>meta['fixlen']=self.sql_type_width<block_end><if_stmt>self.sql_type<in>['date' 'timestamp']<block_start>meta[self.sql_type]=<true><block_end><if_stmt>self.sql_type<in>['decimal' 'numeric']<block_start>meta['fixed_pt']=<true><line_sep>meta['precision']=self.sql_type_width[0]<line_sep>meta['scale']=self.sql_type_width[1]<block_end><if_stmt>self.default_null<block_start>meta['default']=<none><block_end><elif_stmt>self.sql_default<is><not><none><block_start><if_stmt>self.avro_core_type<eq>'boolean'<block_start><if_stmt>self.sql_default<eq>'true'<block_start>meta['default']=<true><block_end><elif_stmt>self.sql_default<eq>'false'<block_start>meta['default']=<false><block_end><else_stmt><block_start><try_stmt><block_start>meta['default']=bool(int(self.sql_default))<block_end><except_stmt>ValueError# suppress the exception <block_start><pass><block_end><block_end><block_end><elif_stmt>self.avro_core_type<in>['long' 'int']<block_start><try_stmt><block_start>meta['default']=int(self.sql_default)<block_end><except_stmt>ValueError# suppress the exception. This can be thrown when the # default is something like 'getdate()' <block_start><pass><block_end><block_end><elif_stmt>self.avro_core_type<in>['double' 'float']<block_start><try_stmt><block_start>meta['default']=float(self.sql_default)<block_end><except_stmt>ValueError# suppress the exception. <block_start><pass><block_end><block_end><else_stmt><block_start>meta['default']=self.sql_default<block_end><block_end><return>meta<block_end>@cached_property<def_stmt>sql_type_width self<block_start>""" Return the sql type width, which is an int defining the the maximum size for character types and a (presumably two element) list of ints (the precision and scale) for the decimal type. Note: Some redshift sql types have default widths associated to them, see http://docs.aws.amazon.com/redshift/latest/dg/r_Character_types.html for more details """<line_sep>width=self._regex_matcher.group(4)<if_stmt>width<block_start><if_stmt>','<in>width<block_start><return>[int(part.strip())<for>part width.split(',')]<block_end><else_stmt><block_start><return>int(width)<block_end><block_end><else_stmt><block_start><if_stmt>self.sql_type<in>['text' 'bpchar' 'varchar' 'nvarchar']<block_start><return>256<block_end><if_stmt>self.sql_type<in>['char' 'character' 'nchar']<block_start><return>1<block_end><return><none><block_end><block_end>@cached_property<def_stmt>_regex_matcher self<block_start><return>FIELD_LINE_REGEX.search(self.field_line)<block_end><block_end><class_stmt>RedshiftSQLToAVSCConverter(object)<block_start>""" Simple converter from redshift *.sql CREATE TABLE definitions (such as those in yelp-main/schema/yelp_dw_redshift/tables) to data pipeline format Avro *.avsc schemas. This should eventually be replaced by DATAPIPE-353. Notes: This makes a number of assumptions about the input content, namely that there is a column definition per line, that is followed by convention in all yelp *.sql files - however this is NOT a general purpose parser/converter. """<def_stmt>__init__ self sql_content base_namespace default_schema='public'<block_start>""" Args: sql_content(string): Content of a redshift *.sql file base_namespace(string): The base namespace (the namespace will be a combination of "{base_namespace}.{schema}" default_schema(string): The default schema, for any tables encountered which do not specify a schema. """<line_sep>self.sql_content=sql_content<line_sep>self.base_namespace=base_namespace<line_sep>self.default_schema=default_schema<block_end>@cached_property<def_stmt>avro_record self<block_start>""" Get the data pipeline format Avro representation of self.sql_content. """<line_sep><return>{'type':'record' 'namespace':self.namespace 'name':self.table 'doc':'' 'pkey':self.pkeys 'fields':[field_line_converter.avro_field<for>field_line_converter self.field_line_converters]}<block_end>@cached_property<def_stmt>namespace self<block_start><return>'{0}.{1}'.format(self.base_namespace self.schema)<block_end>@cached_property<def_stmt>schema self<block_start>m=CREATE_TABLE_REGEX.search(self.create_table_line)<line_sep><return>m.group(3)<if>m.group(3)<else>self.default_schema<block_end>@cached_property<def_stmt>table self<block_start>m=CREATE_TABLE_REGEX.search(self.create_table_line)<if_stmt>m.group(4)<block_start><return>m.group(4)<block_end><else_stmt><block_start><raise>ValueError("Could not locate the table name")<block_end><block_end>@cached_property<def_stmt>sql_lines self<block_start><return>[_sanitize_line(line)<for>line self.sql_content.split('\n')]<block_end>@cached_property<def_stmt>create_table_line self<block_start><for_stmt>line self.sql_lines<block_start><if_stmt>CREATE_TABLE_REGEX.search(line)<block_start><return>line<block_end><block_end><raise>ValueError("Could not locate a 'CREATE TABLE' statement!")<block_end>@cached_property<def_stmt>pkeys self<block_start>pkeys=[]<line_sep># loop through field lines to extract primary keys <for_stmt>line self.sql_lines<block_start><if_stmt>self._get_primary_key_in_field_line(line)<block_start>pkeys.append(self._get_primary_key_in_field_line(line))<block_end><block_end><if_stmt>self.primary_key_line<block_start>pkeys.extend([pkey.strip()<for>pkey PRIMARY_KEY_REGEX.search(self.primary_key_line).group(1).split(',')])<block_end><return>pkeys<block_end>@cached_property<def_stmt>primary_key_line self<block_start><for_stmt>line self.sql_lines<block_start><if_stmt>self._is_primary_key_line(line)<block_start><return>line<block_end><block_end><block_end><def_stmt>_is_primary_key_line self line<block_start><return>bool(PRIMARY_KEY_REGEX.search(line))<block_end><def_stmt>_get_primary_key_in_field_line self line<block_start>field_line=FIELD_LINE_REGEX.search(line)<if_stmt>field_line<and>field_line.group(5)<is><not><none># if primary key present in sql field line return field name <block_start><return>field_line.group(1)<block_end><block_end>@cached_property<def_stmt>field_line_converters self<block_start><return>[RedshiftFieldLineToAvroFieldConverter(field_line=line pkeys=self.pkeys)<for>line self._raw_field_lines]<block_end>@cached_property<def_stmt>_raw_field_lines self<block_start>raw_field_lines=[]<for_stmt>line self.sql_lines[self._find_field_lines_start_index():]<block_start>line=_sanitize_line(line=line)<if_stmt>self._is_stop_line(line=line)<block_start><break><block_end><elif_stmt>FIELD_LINE_REGEX.search(line)<block_start>raw_field_lines.append(line)<block_end><block_end><return>raw_field_lines<block_end><def_stmt>_find_field_lines_start_index self<block_start><for_stmt>index,line enumerate(self.sql_lines)<block_start>line=_sanitize_line(line=line)<if_stmt>self._is_start_line(line=line)<block_start><return>index<block_end><block_end><block_end><def_stmt>_is_start_line self line<block_start><return>bool(START_FIELDS_REGEX.search(line))<block_end><def_stmt>_is_stop_line self line<block_start><return>STOP_FIELDS_REGEX.search(line)<or>self._is_primary_key_line(line)<block_end><block_end><class_stmt>RedshiftSQLToAVSCBatch(yelp_batch.batch.Batch)<block_start>notify_emails=['<EMAIL>']<line_sep>@batch_command_line_options<def_stmt>parse_options self option_parser<block_start>opt_group=optparse.OptionGroup(option_parser "RedshiftSQLToAVSC Options")<line_sep>opt_group.add_option('--glob' action='append' type='string' default=[] dest='globs' help='[REQUIRED] Either a path to a specific CREATE TABLE redshift'<concat>' *.sql file, or a glob pattern for a directory containing '<concat>'such files. (For example: '<concat>'"/nail/home/USER/some_dw_redshift_tables/*.sql") '<concat>'Note --glob may be provided multiple times.')<line_sep>opt_group.add_option('--base-namespace' type='string' default='yelp_dw_redshift' help='[REQUIRED] Base of the namespace. The namespace will be a '<concat>'combination of "{base-namespace}.{schema}" and it is best to '<concat>'choose a base-namespace which reflects the data store '<concat>'associated with the table (such as yelp_dw_redshift for the '<concat>'yelp datawarehouse redshift tables). '<concat>'Default is "%default"')<line_sep>opt_group.add_option('--default-schema' type='string' default='public' help='[REQUIRED] default schema for tables without any specified. '<concat>'The namespace will be a combination of '<concat>'"{base-namespace}.{schema}". '<concat>'Default is "%default"')<line_sep>opt_group.add_option('--overwrite' action="store_true" default=<false> help='Overwrite existing *.avsc files with new output from the '<concat>'conversion run. '<concat>'Default is "%default"')<line_sep><return>opt_group<block_end><def_stmt>run self<block_start>""" Primary entry point for the batch """<line_sep>sql_file_paths=get_file_paths_from_glob_patterns(glob_patterns=self.options.globs)<for_stmt>sql_file_path sql_file_paths<block_start>avsc_file_path=sql_file_path.replace('.sql' '.avsc')<line_sep>self.log.info('Converting "{0}" to "{1}"'.format(sql_file_path avsc_file_path))<if_stmt>os.path.exists(avsc_file_path)<and><not>self.options.overwrite<block_start>self.log.info('Skipping "{0}", use "--overwrite" to overwrite existing '<concat>'*.avsc files.'.format(avsc_file_path))<line_sep><continue><block_end>self.convert_sql_to_avsc(avsc_file_path=avsc_file_path sql_file_path=sql_file_path)<block_end><block_end><def_stmt>convert_sql_to_avsc self avsc_file_path sql_file_path<block_start><with_stmt>open(sql_file_path)<as>sql_file<block_start>sql_content=sql_file.read()<block_end>converter=RedshiftSQLToAVSCConverter(sql_content=sql_content base_namespace=self.options.base_namespace default_schema=self.options.default_schema)<line_sep>avro=converter.avro_record<with_stmt>open(avsc_file_path 'w')<as>avsc_file<block_start>self.log.info('Writing "{0}"'.format(avsc_file_path))<line_sep>json.dump(obj=avro fp=avsc_file indent=' ' sort_keys=<true>)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>RedshiftSQLToAVSCBatch().start()<block_end>
<import_stmt>uuid<import_from_stmt>flask_login login_user<import_from_stmt>app LoginWrappedDBUser<import_from_stmt>data model<import_from_stmt>auth.cookie validate_session_cookie<import_from_stmt>test.fixtures *<def_stmt>test_anonymous_cookie app<block_start><assert_stmt>validate_session_cookie().missing<block_end><def_stmt>test_invalidformatted_cookie app# "Login" with a non-UUID reference. <block_start>someuser=model.user.get_user("devtable")<line_sep>login_user(LoginWrappedDBUser("somenonuuid" someuser))<line_sep># Ensure we get an invalid session cookie format error. result=validate_session_cookie()<assert_stmt>result.authed_user<is><none><assert_stmt>result.context.identity<is><none><assert_stmt><not>result.has_nonrobot_user<assert_stmt>result.error_message<eq>"Invalid session cookie format"<block_end><def_stmt>test_disabled_user app# "Login" with a disabled user. <block_start>someuser=model.user.get_user("disabled")<line_sep>login_user(LoginWrappedDBUser(someuser.uuid someuser))<line_sep># Ensure we get an invalid session cookie format error. result=validate_session_cookie()<assert_stmt>result.authed_user<is><none><assert_stmt>result.context.identity<is><none><assert_stmt><not>result.has_nonrobot_user<assert_stmt>result.error_message<eq>"User account is disabled"<block_end><def_stmt>test_valid_user app# Login with a valid user. <block_start>someuser=model.user.get_user("devtable")<line_sep>login_user(LoginWrappedDBUser(someuser.uuid someuser))<line_sep>result=validate_session_cookie()<assert_stmt>result.authed_user<eq>someuser<assert_stmt>result.context.identity<is><not><none><assert_stmt>result.has_nonrobot_user<assert_stmt>result.error_message<is><none><block_end><def_stmt>test_valid_organization app# "Login" with a valid organization. <block_start>someorg=model.user.get_namespace_user("buynlarge")<line_sep>someorg.uuid=str(uuid.uuid4())<line_sep>someorg.verified=<true><line_sep>someorg.save()<line_sep>login_user(LoginWrappedDBUser(someorg.uuid someorg))<line_sep>result=validate_session_cookie()<assert_stmt>result.authed_user<is><none><assert_stmt>result.context.identity<is><none><assert_stmt><not>result.has_nonrobot_user<assert_stmt>result.error_message<eq>"Cannot login to organization"<block_end>
<import_stmt>copy<import_from_stmt>pinout.core SvgShape Group Rect Text BoundingCoords Coords<import_from_stmt>pinout.components leaderline<as>lline<import_from_stmt>pinout config<class_stmt>Body(SvgShape)<block_start>"""Graphical shape that makes up the body of a pinlabel."""<def_stmt>__init__ self x y width height corner_radius=0 **kwargs<block_start>self.corner_radius=corner_radius<line_sep>super().__init__(x=x y=y width=width height=height **kwargs)<block_end><def_stmt>bounding_coords self# PinLabelBody origin is vertically centered <block_start><return>BoundingCoords(self.x self.y-(self.height/2) self.x+self.width self.y+(self.height/2) )<block_end><def_stmt>render self<block_start>body=Rect(x=self.x y=self.y-(self.height/2) width=self.width height=self.height corner_radius=self.corner_radius )<line_sep>body.add_tag(config.pinlabel["body"]["tag"])<line_sep><return>body.render()<block_end><block_end><class_stmt>Leaderline(lline.Curved)<block_start>"""Graphical line joining the label origin coordinates to the label body."""<line_sep><pass><block_end><class_stmt>Base(Group)<block_start>"""Label component designed specifically for labelling pins."""<def_stmt>__init__ self content="" x=0 y=0 tag=<none> body=<none> leaderline=<none> **kwargs <block_start>self.content=content<line_sep>self._leaderline=<none><line_sep>self._body=<none><line_sep>super().__init__(x y tag=tag **kwargs)<line_sep>self.update_config(config.pinlabel)<line_sep>self.body=body<line_sep>self.leaderline=leaderline<line_sep># Add leaderline and body reference into children self.add(self._body)<line_sep># Add leaderline at render as it is replaced by pinlabelGroup!!! # Add SvgShape so pin label reports correct dimensions. self.add(SvgShape(x=self.leaderline.x y=self.leaderline.x))<line_sep>self.add_tag(config.pinlabel["tag"])<block_end>@property<def_stmt>body self<block_start><return>self._body<block_end>@body.setter<def_stmt>body self body# ensure instance data is unique <block_start>body=copy.deepcopy(body<or>self.config["body"])<line_sep># Convert dict into body object <if_stmt>isinstance(body dict)<block_start>body_config=self.config["body"]<line_sep>body_config.update(body)<line_sep>body=Body(**body_config)<line_sep># Add body config tag if not there <block_end>body.add_tag(self.config["body"]["tag"])<line_sep>self._body=body<block_end>@property<def_stmt>leaderline self<block_start><return>self._leaderline<block_end>@leaderline.setter<def_stmt>leaderline self leaderline# ensure instance data is unique <block_start>leaderline=copy.deepcopy(leaderline<or>self.config["leaderline"])<line_sep># Convert dict into leaderline object <if_stmt>isinstance(leaderline dict)<block_start>leaderline_config=self.config["leaderline"]<line_sep>leaderline_config.update(leaderline)<line_sep>leaderline=Leaderline(**leaderline_config)<block_end># Add leaderline config tag if not there leaderline.add_tag(self.config["leaderline"]["tag"])<line_sep>self._leaderline=leaderline<block_end><def_stmt>render self# Add text content <block_start>x=self.body.width/2+self.body.x<line_sep>y=self.body.y<line_sep>self.add(Text(self.content x=x y=y tag=config.pinlabel["text"]["tag"] scale=self.scale ))<line_sep># Route leaderline self.leaderline.route(Rect() self._body)<line_sep>self.add(self.leaderline)<line_sep><return>super().render()<block_end><block_end><class_stmt>PinLabel(Base)<block_start><pass><block_end><class_stmt>PinLabelGroup(Group)<block_start>"""Convenience class to place multiple rows of pin-labels on a pin-header."""<def_stmt>__init__ self x y pin_pitch label_start label_pitch labels leaderline=<none> body=<none> **kwargs <block_start>scale=Coords(*kwargs.pop("scale" (1 1)))<line_sep>super().__init__(x=x y=y **kwargs)<line_sep># Setup generators for row locations pin_coords=config.pitch_generator((0 0) pin_pitch)<line_sep>label_coords=config.pitch_generator(label_start label_pitch)<for_stmt>row labels<block_start>row_group=self.add(Group())<for_stmt>label row# If data is supplied convert to Label <block_start><if_stmt>type(label)<is>tuple<block_start>content,tag,*args=label<line_sep>attrs=args[0]<if>len(args)<g>0<else>{}<line_sep># Set leaderline and body in attrs if supplied in either: # 1. data # 2. PinlabelGroup attrs["leaderline"]=attrs.get("leaderline" <none>)<or>leaderline<line_sep>attrs["body"]=attrs.get("body" <none>)<or>body<line_sep>label=PinLabel(content=content scale=scale **attrs )<block_end># -- label now exists -- # label.add_tag(tag)<line_sep># Label follows another label in the row <try_stmt><block_start>prev_label=row_group.children[-1]<line_sep>label.x=prev_label.x+prev_label.width<times>scale.x<line_sep>label.y=prev_label.y+prev_label.body.y<times>scale.y<line_sep>label.leaderline=lline.Straight(direction="hh")<block_end># Start of a new row <except_stmt>IndexError<block_start>label.x,label.y=next(pin_coords)<line_sep>x,y=next(label_coords)<line_sep>label.body.x<augadd>x-label.x<times>scale.x<line_sep>label.body.y<augadd>y-label.y<times>scale.y<block_end>row_group.add(label)<block_end><block_end><block_end><block_end>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) <import_from_stmt>spack *<class_stmt>Ck(MavenPackage)<block_start>"""CK calculates class-level and metric-level code metrics in Java projects by means of static analysis (i.e. no need for compiled code)."""<line_sep>homepage="https://github.com/mauricioaniche/ck"<line_sep>url="https://github.com/mauricioaniche/ck/archive/ck-0.6.2.tar.gz"<line_sep>version('0.6.2' sha256='ee16d209f05852230504dea1af39cdb1cfc8e9b56f4708ed1afcd5ce44af76eb')<line_sep>version('0.6.1' sha256='1db1fef7111bb485d5554d5927611761a102133a41b88e8fb20cd44494411ac4')<line_sep>version('0.6.0' sha256='8a1affad047fbefda5d2dad1a795204ffd06c50e2fba830f87cf6c7518423137')<line_sep>version('0.5.2' sha256='35f610f5d97ca31a62903ba368be7e0b74764daccd95afa3eb9ff04e0326a7ca')<line_sep>version('0.5.1' sha256='732849ae7b26d01ee082283396a6fdd7823282c368ae6fd05966acb4598ccebe')<line_sep>version('0.5.0' sha256='3923d25ff4941a6207d644fd1ba3115b5ad303ef953285610e836bc59a4cbcb7')<block_end>
""" @Author : Ailitonia @Date : 2021/08/27 0:48 @FileName : utils.py @Project : nonebot2_miya @Description : 签到素材合成工具 @GitHub : https://github.com/Ailitonia @Software : PyCharm """<import_stmt>os<import_stmt>random<import_stmt>asyncio<import_stmt>aiofiles.os<import_from_stmt>typing Optional<import_from_stmt>datetime datetime<import_from_stmt>PIL Image ImageDraw ImageFont<import_from_stmt>nonebot get_driver require logger<import_from_stmt>omega_miya.database DBPixivillust Result<import_from_stmt>omega_miya.utils.pixiv_utils PixivIllust<import_from_stmt>omega_miya.utils.omega_plugin_utils HttpFetcher ProcessUtils TextUtils<import_from_stmt>.config Config<import_from_stmt>.fortune get_fortune<line_sep>global_config=get_driver().config<line_sep>plugin_config=Config(**global_config.dict())<line_sep>TMP_PATH=global_config.tmp_path_<line_sep>RESOURCES_PATH=global_config.resources_path_<line_sep>SIGN_IN_PIC_PATH=os.path.abspath(os.path.join(TMP_PATH 'sign_in_pic'))<line_sep>SIGN_IN_CARD_PATH=os.path.abspath(os.path.join(TMP_PATH 'sign_in_card'))<line_sep>ENABLE_PIC_PREPARING_SCHEDULER=plugin_config.enable_pic_preparing_scheduler<line_sep>CACHE_PIC_LIMIT=plugin_config.cache_pic_limit<async_keyword><def_stmt>__pre_download_sign_in_pic pid:int * pic_size:str='regular'<arrow>Result.IntResult<block_start>illust_info_result=<await>PixivIllust(pid=pid).get_illust_data()<if_stmt>illust_info_result.error<block_start><return>Result.IntResult(error=<true> info=illust_info_result.info result=-1)<block_end>pic_url=illust_info_result.result.get('illust_pages' {}).get(0 {}).get(pic_size)<if_stmt><not>pic_url<block_start><return>Result.IntResult(error=<true> info='Small illust pages url not found' result=-1)<block_end>fetcher=HttpFetcher(timeout=30 attempt_limit=2 flag='pre_download_sign_in_pic' headers=PixivIllust.HEADERS)<line_sep>download_result=<await>fetcher.download_file(url=pic_url path=SIGN_IN_PIC_PATH)<if_stmt>download_result.error<block_start><return>Result.IntResult(error=<true> info=download_result.info result=-1)<block_end><else_stmt><block_start><return>Result.IntResult(error=<false> info='Success' result=0)<block_end><block_end><async_keyword><def_stmt>__prepare_sign_in_pic <arrow>Result.TextResult# 检查当前缓存目录里面的图片是不是超出数量限制 是的话就删除超出的部分 <block_start><if_stmt><not>os.path.exists(SIGN_IN_PIC_PATH)<block_start>os.makedirs(SIGN_IN_PIC_PATH)<block_end>pic_file_list=os.listdir(SIGN_IN_PIC_PATH)<if_stmt>len(pic_file_list)<g>CACHE_PIC_LIMIT<block_start>del_pic_file_list=random.sample(pic_file_list k=(len(pic_file_list)-CACHE_PIC_LIMIT))<for_stmt>pic_file del_pic_file_list<block_start><await>aiofiles.os.remove(os.path.abspath(os.path.join(SIGN_IN_PIC_PATH pic_file)))<block_end>logger.info(f'Preparing sign in pic processing, '<concat>f'removed pic "{"/".join(del_pic_file_list)}" exceed the limit of cache')<block_end># 获取图片信息并下载图片 pic_list_result=<await>DBPixivillust.rand_illust(num=100 nsfw_tag=0 ratio=1)<if_stmt>pic_list_result.error<or><not>pic_list_result.result<block_start>logger.error(f'Preparing sign in pic failed, DB Error or not result, result: {pic_list_result}')<line_sep><return>Result.TextResult(error=<true> info=pic_list_result.info result='DB Error or not result')<block_end>tasks=[__pre_download_sign_in_pic(pid=pid)<for>pid pic_list_result.result]<line_sep>pre_download_result=<await>ProcessUtils.fragment_process(tasks=tasks fragment_size=20 log_flag='pre_download_sign_in_pic')<line_sep>success_count=0<line_sep>failed_count=0<for_stmt>result pre_download_result<block_start><if_stmt>result.success()<block_start>success_count<augadd>1<block_end><else_stmt><block_start>failed_count<augadd>1<block_end><block_end>result_text=f'Completed with {success_count} Success, {failed_count} Failed'<line_sep>logger.info(f'Preparing sign in pic completed, {result_text}')<line_sep><return>Result.TextResult(error=<true> info=f'Completed' result=result_text)<block_end># 下载签到图片的定时任务 <if_stmt>ENABLE_PIC_PREPARING_SCHEDULER<block_start>scheduler=require("nonebot_plugin_apscheduler").scheduler<line_sep>scheduler.add_job(__prepare_sign_in_pic 'cron' # year=None, # month=None, # day='*/1', # week=None, # day_of_week=None, hour='*/6' # minute=None, # second=None, # start_date=None, # end_date=None, # timezone=None, id='prepare_sign_in_pic' coalesce=<true> misfire_grace_time=120)<block_end><async_keyword><def_stmt>__get_reand_sign_in_pic <arrow>Result.TextResult<block_start>try_count=0<if_stmt><not>os.path.exists(SIGN_IN_PIC_PATH)<block_start>os.makedirs(SIGN_IN_PIC_PATH)<block_end>pic_file_list=os.listdir(SIGN_IN_PIC_PATH)<while_stmt><not>pic_file_list<and>try_count<l>2<block_start><await>__prepare_sign_in_pic()<line_sep>pic_file_list=os.listdir(SIGN_IN_PIC_PATH)<line_sep>try_count<augadd>1<block_end><if_stmt><not>pic_file_list<block_start><return>Result.TextResult(error=<true> info='Can not pre-download sign in pic' result='')<block_end># 重置随机种子 random.seed()<line_sep>rand_file=random.choice(pic_file_list)<line_sep>file_path=os.path.abspath(os.path.join(SIGN_IN_PIC_PATH rand_file))<line_sep><return>Result.TextResult(error=<false> info='Success' result=file_path)<block_end><def_stmt>__get_level favorability:float<arrow>tuple[int int int]<block_start>""" 根据好感度获取等级及当前等级好感度 :param favorability: 总好感度 :return: (等级, 当前等级好感度, 当前等级好感度上限) """<if_stmt>favorability<le>0<block_start><return>0 0 1<block_end><elif_stmt>favorability<l>10000<block_start><return>1 int(favorability) 10000<block_end><elif_stmt>favorability<l>36000<block_start><return>2 int(favorability-10000) 26000<block_end><elif_stmt>favorability<l>78000<block_start><return>3 int(favorability-36000) 42000<block_end><elif_stmt>favorability<l>136000<block_start><return>4 int(favorability-78000) 58000<block_end><elif_stmt>favorability<l>210000<block_start><return>5 int(favorability-136000) 74000<block_end><elif_stmt>favorability<l>300000<block_start><return>6 int(favorability-210000) 90000<block_end><elif_stmt>favorability<l>406000<block_start><return>7 int(favorability-300000) 106000<block_end><else_stmt><block_start><return>8 int(favorability-406000) 122000<block_end><block_end><def_stmt>__get_level_color level:int<arrow>tuple[int int int]<block_start>""" 根据等级获取相应等级颜色 :param level: 等级 :return: (int, int, int): RGB 颜色 """<line_sep>level_color:dict[int tuple[int int int]]={0:(136 136 136) 1:(102 102 102) 2:(153 204 153) 3:(221 204 136) 4:(255 204 51) 5:(255 204 204) 6:(247 119 127) 7:(102 204 255) 8:(175 136 250) }<line_sep><return>level_color.get(level (136 136 136))<block_end><async_keyword><def_stmt>get_hitokoto * c:Optional[str]=<none><arrow>Result.TextResult<block_start>"""获取一言"""<line_sep>url='https://v1.hitokoto.cn'<line_sep>params={'encode':'json' 'charset':'utf-8'}<if_stmt>c<is><not><none><block_start>params.update({'c':c})<block_end>headers=HttpFetcher.DEFAULT_HEADERS.update({'accept':'application/json'})<line_sep>hitokoto_result=<await>HttpFetcher(flag='sign_hitokoto' headers=headers).get_json(url=url params=params)<if_stmt>hitokoto_result.error<block_start><return>Result.TextResult(error=<true> info=hitokoto_result.info result='')<block_end>text=f'{hitokoto_result.result.get("hitokoto")}\n——《{hitokoto_result.result.get("from")}》'<if_stmt>hitokoto_result.result.get("from_who")<block_start>text<augadd>f' {hitokoto_result.result.get("from_who")}'<block_end><return>Result.TextResult(error=<false> info='Success' result=text)<block_end><async_keyword><def_stmt>generate_sign_in_card user_id:int user_text:str fav:float * width:int=1024 fortune_do:bool=<true><arrow>Result.TextResult<block_start>""" 生成卡片 :param user_id: 用户id :param user_text: 头部自定义文本 :param fav: 用户好感度 用户计算等级 :param width: 生成图片宽度 自适应排版 :param fortune_do: 是否绘制老黄历当日宜与不宜 :return: 生成图片地址 """<line_sep># 获取头图 sign_pic_path_result=<await>__get_reand_sign_in_pic()<if_stmt>sign_pic_path_result.error<block_start><return>Result.TextResult(error=<true> info=sign_pic_path_result.info result='')<block_end>sign_pic_path=sign_pic_path_result.result<def_stmt>__handle # 生成用户当天老黄历 <block_start>user_fortune=get_fortune(user_id=user_id)<line_sep>fortune_star=user_fortune.get('fortune_star')<line_sep>fortune_text=user_fortune.get('fortune_text')<line_sep>fortune_do_1=user_fortune.get('do_1')<line_sep>fortune_do_2=user_fortune.get('do_2')<line_sep>fortune_not_do_1=user_fortune.get('not_do_1')<line_sep>fortune_not_do_2=user_fortune.get('not_do_2')<line_sep># 加载头图 draw_top_img:Image.Image=Image.open(sign_pic_path)<line_sep># 调整头图宽度 top_img_height=int(width<times>draw_top_img.height/draw_top_img.width)<line_sep>draw_top_img=draw_top_img.resize((width top_img_height))<line_sep># 字体 bd_font_path=os.path.abspath(os.path.join(RESOURCES_PATH 'fonts' 'SourceHanSans_Heavy.otf'))<line_sep>bd_font=ImageFont.truetype(bd_font_path width<floordiv>10)<line_sep>bd_title_font=ImageFont.truetype(bd_font_path width<floordiv>12)<line_sep>bd_text_font=ImageFont.truetype(bd_font_path width<floordiv>18)<line_sep>main_font_path=os.path.abspath(os.path.join(RESOURCES_PATH 'fonts' 'SourceHanSans_Regular.otf'))<line_sep>text_font=ImageFont.truetype(main_font_path width<floordiv>28)<line_sep>level_font_path=os.path.abspath(os.path.join(RESOURCES_PATH 'fonts' 'pixel.ttf'))<line_sep>level_font=ImageFont.truetype(level_font_path width<floordiv>20)<line_sep>bottom_font_path=os.path.abspath(os.path.join(RESOURCES_PATH 'fonts' 'fzzxhk.ttf'))<line_sep>bottom_text_font=ImageFont.truetype(bottom_font_path width<floordiv>40)<line_sep># 打招呼 <if_stmt>4<le>datetime.now().hour<l>11<block_start>top_text='早上好'<block_end><elif_stmt>11<le>datetime.now().hour<l>14<block_start>top_text='中午好'<block_end><elif_stmt>14<le>datetime.now().hour<l>19<block_start>top_text='下午好'<block_end><elif_stmt>19<le>datetime.now().hour<l>22<block_start>top_text='晚上好'<block_end><else_stmt><block_start>top_text='晚安'<block_end>top_text_width,top_text_height=bd_font.getsize(top_text)<line_sep># 计算好感度等级条 level=__get_level(favorability=fav)<line_sep>level_text=f'Level {level[0]}'<line_sep>level_text_width,level_text_height=level_font.getsize(level_text)<line_sep>fav_text=f'{level[1]}/{level[2]}'<line_sep>fav_rat=level[1]/level[2]<if>level[1]<l>level[2]<else>1<line_sep>fav_text_width,fav_text_height=text_font.getsize(fav_text)<line_sep># 日期 date_text=datetime.now().strftime('%m/%d')<line_sep># 昵称、好感度、积分 # 首先要对文本进行分割 user_text_=TextUtils(text=user_text).split_multiline(width=(width-int(width<times>0.125)) font=text_font)<line_sep>user_text_width,user_text_height=text_font.getsize_multiline(user_text_)<line_sep># 今日运势 fortune_text_width,fortune_text_height=bd_text_font.getsize(fortune_text)<line_sep>fortune_star_width,fortune_star_height=text_font.getsize(fortune_star)<line_sep># 底部文字 bottom_text_width,bottom_text_height=bottom_text_font.getsize(f'{"@@##"<times>4}\n'<times>4)<line_sep># 总高度 <if_stmt>fortune_do<block_start>height=(top_img_height+top_text_height+user_text_height+level_text_height+fortune_text_height<times>3+fortune_star_height<times>6+bottom_text_height<times>4+int(0.25<times>width))<block_end><else_stmt><block_start>height=(top_img_height+top_text_height+user_text_height+level_text_height+fortune_text_height<times>1+fortune_star_height<times>2+bottom_text_height<times>4+int(0.1875<times>width))<block_end># 生成背景 background=Image.new(mode="RGB" size=(width height) color=(255 255 255))<line_sep># 开始往背景上绘制各个元素 # 以下排列从上到下绘制 请勿变换顺序 否则导致位置错乱 background.paste(draw_top_img box=(0 0))# 背景 this_height=top_img_height+int(0.0625<times>width)<line_sep>ImageDraw.Draw(background).text(xy=(int(width<times>0.0625) this_height) text=top_text font=bd_font align='left' anchor='lt' fill=(0 0 0))<line_sep># 打招呼 ImageDraw.Draw(background).text(xy=(width-int(width<times>0.0625) this_height) text=date_text font=bd_title_font align='right' anchor='rt' fill=__get_level_color(level=level[0]))<line_sep># 日期 this_height<augadd>top_text_height<line_sep>ImageDraw.Draw(background).multiline_text(xy=(int(width<times>0.0625) this_height) text=user_text_ font=text_font align='left' fill=(128 128 128))<line_sep># 昵称、好感度、积分 this_height<augadd>user_text_height+int(0.046875<times>width)<line_sep>ImageDraw.Draw(background).text(xy=(int(width<times>0.065) this_height) text=level_text font=level_font align='left' anchor='lt' fill=__get_level_color(level=level[0]))<line_sep># 等级 this_height<augadd>level_text_height+int(0.03125<times>width)<line_sep>ImageDraw.Draw(background).text(xy=(width-int(width<times>0.0625) this_height) text=fav_text font=text_font align='right' anchor='rm' fill=(208 208 208))<line_sep># 经验条数值 ImageDraw.Draw(background).line(xy=[(int(width<times>0.0625) this_height) (width-int(width<times>0.09375+fav_text_width) this_height)] fill=(224 224 224) width=int(0.03125<times>width))<line_sep># 经验条底 ImageDraw.Draw(background).line(xy=[(int(width<times>0.0625) this_height) (int(width<times>0.0625+(width<times>0.84375-fav_text_width)<times>fav_rat) this_height)] fill=__get_level_color(level=level[0]) width=int(0.03125<times>width))<line_sep># 经验条内 this_height<augadd>fortune_star_height+int(0.015625<times>width)<line_sep>ImageDraw.Draw(background).text(xy=(int(width<times>0.0625) this_height) text=f'今日运势: {fortune_text}' font=bd_text_font align='left' anchor='lt' fill=(0 0 0))<line_sep># 今日运势 this_height<augadd>fortune_text_height<line_sep>ImageDraw.Draw(background).text(xy=(int(width<times>0.0625) this_height) text=fortune_star font=text_font align='left' anchor='lt' fill=(128 128 128))<line_sep># 运势星星 <if_stmt>fortune_do<block_start>this_height<augadd>fortune_star_height+int(0.03125<times>width)<line_sep>ImageDraw.Draw(background).text(xy=(int(width<times>0.0625) this_height) text=f'宜' font=bd_text_font align='left' anchor='lt' fill=(0 0 0))<line_sep># 宜 this_height<augadd>fortune_text_height<line_sep>ImageDraw.Draw(background).text(xy=(int(width<times>0.0625) this_height) text=fortune_do_1 font=text_font align='left' anchor='lt' fill=(128 128 128))<line_sep># 今日宜1 this_height<augadd>fortune_star_height# 反正这两字体都一样大 ImageDraw.Draw(background).text(xy=(int(width<times>0.0625) this_height) text=fortune_do_2 font=text_font align='left' anchor='lt' fill=(128 128 128))<line_sep># 今日宜2 this_height<augadd>fortune_star_height+int(0.03125<times>width)<line_sep>ImageDraw.Draw(background).text(xy=(int(width<times>0.0625) this_height) text=f'不宜' font=bd_text_font align='left' anchor='lt' fill=(0 0 0))<line_sep># 不宜 this_height<augadd>fortune_text_height<line_sep>ImageDraw.Draw(background).text(xy=(int(width<times>0.0625) this_height) text=fortune_not_do_1 font=text_font align='left' anchor='lt' fill=(128 128 128))<line_sep># 今日不宜1 this_height<augadd>fortune_star_height<line_sep>ImageDraw.Draw(background).text(xy=(int(width<times>0.0625) this_height) text=fortune_not_do_2 font=text_font align='left' anchor='lt' fill=(128 128 128))<block_end># 今日不宜2 this_height<augadd>fortune_star_height+bottom_text_height<times>2<line_sep>ImageDraw.Draw(background).text(xy=(width-int(width<times>0.0625) this_height) text='随机生成 请勿迷信' font=bottom_text_font align='right' anchor='rt' fill=(128 128 128))<line_sep>this_height<augadd>bottom_text_height<line_sep>ImageDraw.Draw(background).text(xy=(width-int(width<times>0.0625) this_height) text=f'Omega Miya @ {datetime.now().year}' font=bottom_text_font align='right' anchor='rt' fill=(128 128 128))<if_stmt><not>os.path.exists(SIGN_IN_CARD_PATH)<block_start>os.makedirs(SIGN_IN_CARD_PATH)<block_end><if_stmt>fortune_do<block_start>name_prefix='fortune_sign_in'<block_end><else_stmt><block_start>name_prefix='fortune'<block_end>save_path=os.path.abspath(os.path.join(SIGN_IN_CARD_PATH f"{name_prefix}_card_{user_id}_{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.jpg"))<line_sep>background.save(save_path 'JPEG')<line_sep><return>save_path<block_end><try_stmt><block_start>loop=asyncio.get_running_loop()<line_sep>result=<await>loop.run_in_executor(<none> __handle)<line_sep><return>Result.TextResult(error=<false> info='Success' result=result)<block_end><except_stmt>Exception<as>e<block_start><return>Result.TextResult(error=<true> info=repr(e) result='')<block_end><block_end>__all__=['scheduler' 'get_hitokoto' 'generate_sign_in_card']<line_sep>
model=Sequential()<line_sep>model.add(Conv2D(32 (3 3) input_shape=input_shape))<line_sep>model.add(Activation('relu'))<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>model.add(Conv2D(64 (3 3)))<line_sep>model.add(Activation('relu'))<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>model.add(Conv2D(128 (3 3)))<line_sep>model.add(Activation('relu'))<line_sep>model.add(MaxPooling2D(pool_size=(2 2)))<line_sep>model.add(Flatten())<line_sep>model.add(Dense(64))<line_sep>model.add(Activation('relu'))<line_sep>model.add(Dense(1))<line_sep>model.add(Activation('sigmoid'))<line_sep>model.compile(loss='binary_crossentropy' optimizer='rmsprop' metrics=['accuracy'])<line_sep>
<import_stmt>tensorflow<as>tf<import_from_stmt>embeddings load_sentence_embeddings<import_from_stmt>preprocess_data preprocess_batch<import_from_stmt>six.moves input<import_from_stmt>lstm_model lstm_model<import_stmt>numpy<as>np<import_from_stmt>pprint pprint<as>pp<class_stmt>Paraphraser(object)<block_start>'''Heart of the paraphraser model. This class loads the checkpoint into the Tensorflow runtime environment and is responsible for inference. Greedy and sampling based approaches are supported '''<def_stmt>__init__ self checkpoint<block_start>"""Constructor. Load vocabulary index, start token, end token, unk id, mask_id. Restore checkpoint. Args: checkpoint: A path to the checkpoint """<line_sep>self.word_to_id,self.idx_to_word,self.embedding,self.start_id,self.end_id,self.unk_id,self.mask_id=load_sentence_embeddings()<line_sep>self.checkpoint=checkpoint<line_sep>gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5)<line_sep>self.sess=tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))<line_sep>self.model=lstm_model(self.sess 'infer' 300 self.embedding self.start_id self.end_id self.mask_id)<line_sep>saver=tf.train.Saver()<line_sep>saver.restore(self.sess checkpoint)<block_end><def_stmt>sample_paraphrase self sentence sampling_temp=1.0 how_many=1<block_start>"""Paraphrase by sampling a distribution Args: sentence (str): A sentence input that will be paraphrased by sampling from distribution. sampling_temp (int) : A number between 0 an 1 Returns: str: a candidate paraphrase of the `sentence` """<line_sep><return>self.infer(1 sentence self.idx_to_word sampling_temp how_many)<block_end><def_stmt>greedy_paraphrase self sentence<block_start>"""Paraphrase using greedy sampler Args: sentence : The source sentence to be paraphrased. Returns: str : a candidate paraphrase of the `sentence` """<line_sep><return>self.infer(0 sentence self.idx_to_word 0. 1)<block_end><def_stmt>infer self decoder source_sent id_to_vocab temp how_many<block_start>""" Perform inferencing. In other words, generate a paraphrase for the source sentence. Args: decoder : 0 for greedy, 1 for sampling source_sent : source sentence to generate a paraphrase for id_to_vocab : dict of vocabulary index to word end_id : the end token temp : the sampling temperature to use when `decoder` is 1 Returns: str : for the generated paraphrase """<line_sep>seq_source_words,seq_source_ids=preprocess_batch([source_sent]<times>how_many)<line_sep>#print(seq_source_words) #print(seq_source_ids) seq_source_len=[len(seq_source)<for>seq_source seq_source_ids]<line_sep>#print(seq_source_len) feed_dict={self.model['seq_source_ids']:seq_source_ids self.model['seq_source_lengths']:seq_source_len self.model['decoder_technique']:decoder self.model['sampling_temperature']:temp}<line_sep>feeds=[self.model['predictions']#model['final_sequence_lengths'] ]<line_sep>predictions=self.sess.run(feeds feed_dict)[0]<line_sep>#print(predictions) <return>self.translate(predictions decoder id_to_vocab seq_source_words[0])<block_end><def_stmt>translate self predictions decoder id_to_vocab seq_source_words<block_start>""" Translate the vocabulary ids in `predictions` to actual words that compose the paraphrase. Args: predictions : arrays of vocabulary ids decoder : 0 for greedy, 1 for sample, 2 for beam id_to_vocab : dict of vocabulary index to word Returns: str : the paraphrase """<line_sep>translated_predictions=[]<line_sep>#np_end = np.where(translated_predictions == end_id) <for_stmt>sent_pred predictions<block_start>translated=[]<for_stmt>pred sent_pred<block_start>word='UUNNKK'<if_stmt>pred<eq>self.end_id<block_start><break><block_end><if_stmt>pred<eq>self.unk_id# Search for rare word <block_start><for_stmt>seq_source_word seq_source_words<block_start><if_stmt>seq_source_word<not><in>self.word_to_id<block_start>word=seq_source_word<block_end><block_end><block_end><else_stmt><block_start>word=id_to_vocab[pred]<block_end>translated.append(word)<block_end>translated_predictions.append(' '.join(translated))<block_end><return>translated_predictions<block_end><block_end><def_stmt>main <block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--checkpoint' type=str help='Checkpoint path')<line_sep>args=parser.parse_args()<line_sep>paraphraser=Paraphraser(args.checkpoint)<while_stmt>1<block_start>source_sentence=input("Source: ")<line_sep>#p = paraphraser.greedy_paraphrase(source_sentence) #print(p) paraphrases=paraphraser.sample_paraphrase(source_sentence sampling_temp=0.75 how_many=10)<for_stmt>i,paraphrase enumerate(paraphrases)<block_start>print("Paraph #{}: {}".format(i paraphrase))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>sys<import_stmt>site<import_stmt>shutil<import_stmt>hashlib<import_stmt>base64<import_from_stmt>pathlib Path<import_stmt>configparser<import_stmt>urllib.request<import_stmt>urllib.error<def_stmt>_install files url=<none><block_start>""" install one file package from GitHub or current directory Parameters ---------- files : list files to be installed the first item (files[0]) will be used as the name of the package'' optional files should be preceded with an exclamation mark (!) url : str url of the location of the GitHub repository this will start usually with https://raw.githubusercontent.com/ and end with /master/ if omitted, the files will be copied from the current directory (not GitHub) Returns ------- info : Info instance info.package : name of the package installed info.path : name where the package is installed in the site-packages info.version : version of the package (obtained from <package>.py) info.files_copied : list of copied files Notes ----- The program automatically makes the required __init__.py file (unless given in files) and <package><version>.dist-info folder with the usual files METADATA, INSTALLER and RECORDS. As the setup.py is not run, the METADATA is very limited, i.e. is contains just name and version. If a __init__.py is in files that file will be used. Otherwise, an __init__/py file will be generated. In thet case, if a __version__ = statement is found in the source file, the __version__ will be included in that __init__.py file. Version history --------------- version 1.0.5 2020-06-24 Bug with removing the dist-info of packages starting with the same name fixed. version 1.0.4 2020-03-29 Linux and ios versions now search in sys.path for site-packages, whereas other platforms now use site.getsitepackages(). This is to avoid installation in a roaming directory on Windows. version 1.0.2 2020-03-07 modified several open calls to be compatible with Python < 3.6 multipe installation for Pythonista removed. Now installs only in site-packages version 1.0.1 2020-03-06 now uses urllib instead of requests to avoid non standard libraries installation for Pythonista improved version 1.0.0 2020-03-04 initial version (c)2020 <NAME> - www.salabim.org """<class_stmt>Info<block_start>version="?"<line_sep>package="?"<line_sep>path="?"<line_sep>files_copied=[]<block_end>info=Info()<line_sep>Pythonista=sys.platform<eq>"ios"<if_stmt><not>files<block_start><raise>ValueError("no files specified")<block_end><if_stmt>files[0][0]<eq>"!"<block_start><raise>ValueError("first item in files (sourcefile) may not be optional")<block_end>package=Path(files[0]).stem<line_sep>sourcefile=files[0]<line_sep>file_contents={}<for_stmt>file files<block_start>optional=file[0]<eq>"!"<if_stmt>optional<block_start>file=file[1:]<block_end><if_stmt>url<block_start><try_stmt><block_start><with_stmt>urllib.request.urlopen(url+file)<as>response<block_start>page=response.read()<block_end>file_contents[file]=page<line_sep>exists=<true><block_end><except_stmt>urllib.error.URLError<block_start>exists=<false><block_end><block_end><else_stmt><block_start>exists=Path(file).is_file()<if_stmt>exists<block_start><with_stmt>open(file "rb")<as>f<block_start>file_contents[file]=f.read()<block_end><block_end><block_end><if_stmt>(<not>exists)<and>(<not>optional)<block_start><raise>FileNotFoundError(file+" not found. Nothing installed.")<block_end><block_end>version="unknown"<for_stmt>line file_contents[sourcefile].decode("utf-8").split("\n")<block_start>line_split=line.split("__version__ =")<if_stmt>len(line_split)<g>1<block_start>raw_version=line_split[-1].strip(" '\"")<line_sep>version=""<for_stmt>c raw_version<block_start><if_stmt>c<in>"0123456789-."<block_start>version<augadd>c<block_end><else_stmt><block_start><break><block_end><block_end><break><block_end><block_end>info.files_copied=list(file_contents.keys())<line_sep>info.package=package<line_sep>info.version=version<line_sep>file="__init__.py"<if_stmt>file<not><in>file_contents<block_start>file_contents[file]=("from ."+package+" import *\n").encode()<if_stmt>version<ne>"unknown"<block_start>file_contents[file]<augadd>("from ."+package+" import __version__\n").encode()<block_end><block_end><if_stmt>sys.platform.startswith("linux")<or>(sys.platform<eq>"ios")<block_start>search_in=sys.path<block_end><else_stmt><block_start>search_in=site.getsitepackages()<block_end><for_stmt>f search_in<block_start>sitepackages_path=Path(f)<if_stmt>sitepackages_path.name<eq>"site-packages"<and>sitepackages_path.is_dir()<block_start><break><block_end><block_end><else_stmt><block_start><raise>ModuleNotFoundError("can't find the site-packages folder")<block_end>path=sitepackages_path/package<line_sep>info.path=str(path)<if_stmt>path.is_file()<block_start>path.unlink()<block_end><if_stmt><not>path.is_dir()<block_start>path.mkdir()<block_end><for_stmt>file,contents file_contents.items()<block_start><with_stmt>(path/file).open("wb")<as>f<block_start>f.write(contents)<block_end><block_end><if_stmt>Pythonista<block_start>pypi_packages=sitepackages_path/".pypi_packages"<line_sep>config=configparser.ConfigParser()<line_sep>config.read(pypi_packages)<line_sep>config[package]={}<line_sep>config[package]["url"]="github"<line_sep>config[package]["version"]=version<line_sep>config[package]["summary"]=""<line_sep>config[package]["files"]=path.as_posix()<line_sep>config[package]["dependency"]=""<with_stmt>pypi_packages.open("w")<as>f<block_start>config.write(f)<block_end><block_end><else_stmt><block_start><for_stmt>entry sitepackages_path.glob("*")<block_start><if_stmt>entry.is_dir()<block_start><if_stmt>entry.stem.startswith(package+"-")<and>entry.suffix<eq>".dist-info"<block_start>shutil.rmtree(entry)<block_end><block_end><block_end>path_distinfo=Path(str(path)+"-"+version+".dist-info")<if_stmt><not>path_distinfo.is_dir()<block_start>path_distinfo.mkdir()<block_end><with_stmt>(path_distinfo/"METADATA").open("w")<as>f# make a dummy METADATA file <block_start>f.write("Name: "+package+"\n")<line_sep>f.write("Version: "+version+"\n")<block_end><with_stmt>(path_distinfo/"INSTALLER").open("w")<as>f# make a dummy METADATA file <block_start>f.write("github\n")<block_end><with_stmt>(path_distinfo/"RECORD").open("w")<as>f<block_start><pass><block_end># just to create the file to be recorded <with_stmt>(path_distinfo/"RECORD").open("w")<as>record_file<block_start><for_stmt>p (path path_distinfo)<block_start><for_stmt>file p.glob("**/*")<block_start><if_stmt>file.is_file()<block_start>name=file.relative_to(sitepackages_path).as_posix()# make sure we have slashes record_file.write(name+",")<if_stmt>(file.stem<eq>"RECORD"<and>p<eq>path_distinfo)<or>("__pycache__"<in>name.lower())<block_start>record_file.write(",")<block_end><else_stmt><block_start><with_stmt>file.open("rb")<as>f<block_start>file_contents=f.read()<line_sep>hash="sha256="+base64.urlsafe_b64encode(hashlib.sha256(file_contents).digest()).decode("latin1").rstrip("=")<line_sep># hash calculation derived from wheel.py in pip length=str(len(file_contents))<line_sep>record_file.write(hash+","+length)<block_end><block_end>record_file.write("\n")<block_end><block_end><block_end><block_end><block_end><return>info<block_end><if_stmt>__name__<eq>"__main__"<block_start>info=_install(files="salabim.py !calibri.ttf !mplus-1m-regular.ttf !license.txt !DejaVuSansMono.ttf !changelog.txt".split() url="https://raw.githubusercontent.com/salabim/salabim/master/" )<line_sep>print(info.package+" "+info.version+" successfully installed in "+info.path)<line_sep>print("files copied: " ", ".join(info.files_copied))<block_end>
<import_from_stmt>apps.workflow.models WorkflowUserPermission<import_from_stmt>service.account.account_base_service account_base_service_ins<import_from_stmt>service.base_service BaseService<import_from_stmt>service.common.common_service common_service_ins<import_from_stmt>service.common.constant_service constant_service_ins<class_stmt>WorkflowPermissionService(BaseService)<block_start>""" 流程服务 """<def_stmt>__init__ self<block_start><pass><block_end><def_stmt>get_workflow_id_list_by_permission self permission user_type user<block_start>""" 获取操作权限 :param permission: :param user_type: :param user: :return: """<if_stmt>user_type<not><in>['app' 'user' 'department']<block_start><return><false> 'user type is invalid'<block_end><if_stmt><not>user<block_start><if_stmt>user_type<eq>'app'<block_start><return><false> 'app_name is not provided'<block_end><if_stmt>user_type<eq>'user'<block_start><return><false> 'user is not provided'<block_end><if_stmt>user_type<eq>'department'<block_start><return><false> 'department is not provided'<block_end><block_end><if_stmt>user<eq>'loonflow'<block_start><import_from_stmt>apps.workflow.models Workflow<line_sep>workflow_query_set=Workflow.objects.filter(is_deleted=0).all()<line_sep>workflow_id_list=[]<for_stmt>workflow_obj workflow_query_set<block_start>workflow_id_list.append(workflow_obj.id)<block_end><return><true> dict(workflow_id_list=workflow_id_list)<block_end>result_queryset=WorkflowUserPermission.objects.filter(permission=permission user_type=user_type user=user is_deleted=0).all()<line_sep>workflow_id_list=[result.workflow_id<for>result result_queryset]<line_sep>workflow_id_list=list(set(workflow_id_list))<line_sep><return><true> dict(workflow_id_list=workflow_id_list)<block_end><def_stmt>workflow_id_permission_check self workflow_id permission user_type user<block_start>""" 检查是否有某workflow_id的权限 :param workflow_id: :param permission: :param user_type: :param user: :return: """<if_stmt>user_type<eq>'app'<and>user<eq>'loonflow'<block_start><return><true> ''<block_end>workflow_query_set=WorkflowUserPermission.objects.filter(is_deleted=0 workflow_id=workflow_id permission=permission user_type=user_type user=user).first()<if_stmt>workflow_query_set<block_start><return><true> ''<block_end><else_stmt><block_start><if_stmt>permission<eq>'api'<block_start><return><false> 'app: {} has no api permission for workflow_id: {}'.format(user workflow_id)<block_end><if_stmt>permission<eq>'admin'<block_start><return><false> 'user: {} has no admin permission for workflow_id:{}'.format(user workflow_id)<block_end><if_stmt>permission<eq>'intervene'<block_start><return><false> 'user: {} has no intervene permission for workflow_id:{}'.format(user workflow_id)<block_end><if_stmt>permission<eq>'view'<block_start><if_stmt>user_type<eq>'user'<block_start><return><false> 'user: {} has no view permission for workflow_id:{}'.format(user workflow_id)<block_end><if_stmt>user_type<eq>'department'<block_start><return><false> 'department: {} has no view permission for workflow_id:{}'.format(user workflow_id)<block_end><block_end><return><false> 'no permission'<block_end><block_end><def_stmt>get_record_list_by_app_list self app_list<block_start>""" 批量获取应用的workflow权限 :param app_list: :return: """<line_sep>permission_query_set=WorkflowUserPermission.objects.filter(is_deleted=0 permission='api' user_type='app' user__in=app_list).all()<line_sep><return><true> dict(permission_query_set=permission_query_set)<block_end><def_stmt>update_app_permission self app_name workflow_ids<block_start>""" 更新应用的权限 :param app_name: :param workflow_ids: :return: """<if_stmt>workflow_ids<block_start>workflow_id_list=[int(workflow_id)<for>workflow_id workflow_ids.split(',')]<block_end><else_stmt><block_start>workflow_id_list=[]<block_end>permission_query_set=WorkflowUserPermission.objects.filter(is_deleted=0 permission='api' user_type='app' user=app_name).all()<line_sep>exist_workflow_id_list=[permission_query.workflow_id<for>permission_query permission_query_set]<line_sep>flag,need_add_workflow_list=common_service_ins.list_difference(workflow_id_list exist_workflow_id_list)<if_stmt>flag<is><false><block_start><return><false> need_add_workflow_list<block_end>flag,need_del_workflow_list=common_service_ins.list_difference(exist_workflow_id_list workflow_id_list)<if_stmt>flag<is><false><block_start><return><false> need_del_workflow_list<block_end>add_permission_query_list=[]<for_stmt>workflow_id need_add_workflow_list<block_start>add_permission_query_list.append(WorkflowUserPermission(permission='api' user_type='app' user=app_name workflow_id=workflow_id))<block_end>WorkflowUserPermission.objects.bulk_create(add_permission_query_list)<line_sep>WorkflowUserPermission.objects.filter(is_deleted=0 permission='api' user_type='app' user=app_name workflow_id__in=need_del_workflow_list).update(is_deleted=1)<line_sep><return><true> ''<block_end><def_stmt>del_app_permission self app_name workflow_ids=<none><block_start>""" 删除应用权限 :param app_name: :param workflow_ids: :return: """<if_stmt>workflow_ids<eq><none><block_start>WorkflowUserPermission.objects.filter(is_deleted=0 permission='api' user_type='app' user=app_name).update(is_deleted=1)<block_end><else_stmt><block_start>WorkflowUserPermission.objects.filter(is_deleted=0 permission='api' user_type='app' user=app_name workflow_id__in=workflow_ids.split(',')).update(is_deleted=1)<block_end><return><true> ''<block_end><def_stmt>manage_workflow_permission_check self workflow_id username app_name<block_start>""" 用户是否有管理工作流的权限 :param workflow_id: :param username: :param app_name: :return: """<line_sep># 判断应用是否有工作流的权限 flag,msg=self.workflow_id_permission_check(workflow_id 'api' 'app' app_name)<if_stmt>flag<is><false><block_start><return>flag msg<block_end># 工作流创建人有管理权限 <import_from_stmt>service.workflow.workflow_base_service workflow_base_service_ins<line_sep>flag,workflow_obj=workflow_base_service_ins.get_by_id(workflow_id)<if_stmt>workflow_obj.creator<eq>username<block_start><return><true> "creator has workflow's manage permission"<block_end># 超级管理员拥有所有工作流的管理权限 flag,user_obj=account_base_service_ins.get_user_by_username(username)<if_stmt>flag<is><false><block_start><return>flag user_obj<block_end><if_stmt>user_obj.type_id<eq>constant_service_ins.ACCOUNT_TYPE_SUPER_ADMIN<block_start><return><true> "superuser has all workflow's manage permission"<block_end>flag,msg=self.workflow_id_permission_check(workflow_id 'admin' 'user' username)<line_sep><return>flag msg<block_end><block_end>workflow_permission_service_ins=WorkflowPermissionService()<line_sep>
<import_from_stmt>..base ShopifyResource<class_stmt>Report(ShopifyResource)<block_start><pass><block_end>
""" CODE ADAPTED FROM: https://github.com/sjblim/rmsn_nips_2018 Implementation of Recurrent Marginal Structural Networks (R-MSNs): <NAME>, <NAME>, <NAME>, "Forecasting Treatment Responses Over Time Using Recurrent Marginal Structural Networks", Advances in Neural Information Processing Systems, 2018. """<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>rmsn.libs.net_helpers<as>helpers<line_sep>_ACTIVATION_MAP={'sigmoid':tf.nn.sigmoid 'elu':tf.nn.elu 'tanh':tf.nn.tanh 'linear':<lambda>x:x}<class_stmt>StateDumpingRNN(tf.contrib.rnn.RNNCell)<block_start>""" This RNNCell dumps out internal states for lstms"""<def_stmt>__init__ self lstm<block_start>super(StateDumpingRNN self).__init__()<line_sep># Check that outputs self.lstm_cell=lstm<block_end>@property<def_stmt>state_size self<block_start><return>self.lstm_cell.state_size<block_end>@property<def_stmt>output_size self<block_start><return>self.lstm_cell.state_size<block_end><def_stmt>call self inputs state<block_start>output,state=self.lstm_cell(inputs state)<line_sep><return>state state<block_end><block_end><class_stmt>Seq2SeqDecoderCell(tf.contrib.rnn.RNNCell)<block_start>""" Decoder cell which allows for feedback, and external inputs during training """<def_stmt>__init__ self lstm W b b_training_mode=<false><block_start>super(Seq2SeqDecoderCell self).__init__()<line_sep>self.lstm_cell=lstm<line_sep>self.W=W<line_sep>self.b=b<line_sep>self._output_size=self.W.get_shape().as_list()[-1]<line_sep>self.b_training_mode=b_training_mode<block_end>@property<def_stmt>state_size self<block_start><if_stmt>self.b_training_mode# use actual inputs <block_start><return>self.lstm_cell.state_size<block_end><else_stmt><block_start><return>self.lstm_cell.state_size+self._output_size<block_end><block_end>@property<def_stmt>output_size self<block_start><return>self._output_size<block_end><def_stmt>call self inputs state# During training time, we assume that the previous input shape is [batch_size, action_vector + output_vector] # Output vectors are assumed to be at the end of the input or state vector (depending on train/test mode respectively) <block_start><if_stmt>self.b_training_mode<block_start>actual_states=state<line_sep>combined_inputs=inputs<block_end><else_stmt><block_start>actual_states,prev_outputs=tf.split(state [self.lstm_cell.state_size self._output_size] axis=-1)<line_sep>combined_inputs=tf.concat([inputs prev_outputs] axis=-1)<block_end># TODO: FIX HACK! THis forces this lstm to be in a different scope <with_stmt>tf.variable_scope("seq2seq")<block_start>output,state=self.lstm_cell(combined_inputs actual_states)<line_sep>output=tf.matmul(output self.W)+self.b<block_end><if_stmt><not>self.b_training_mode<block_start>state=tf.concat([state output] axis=-1)<block_end><return>output state<block_end><block_end><class_stmt>RnnModel<block_start><def_stmt>__init__ self params# Generic params <block_start>self.net_name=params['net_name']<line_sep>self.experiment_name=params['experiment_name']<line_sep># Data params self.training_data=params['training_dataset']<line_sep>self.validation_data=params['validation_dataset']<line_sep>self.test_data=params['test_dataset']<line_sep>self.input_size=params['input_size']<line_sep>self.output_size=params['output_size']<line_sep># Network params self.softmax_size=params['softmax_size']<line_sep>self.dropout_rate=params['dropout_rate']<line_sep>self.hidden_layer_size=params['hidden_layer_size']<line_sep>self.memory_activation_type=params['hidden_activation']<line_sep>self.output_activation_type=params['output_activation']<line_sep>self.b_use_seq2seq_feedback=params['use_seq2seq_feedback']<line_sep>self.b_use_seq2seq_training_mode=params['use_seq2seq_training_mode']<line_sep># Memory Adapter params self.b_use_memory_adapter=<false><if>'use_memory_adapter'<not><in>params<else>params['use_memory_adapter']<line_sep>self.memory_adapter_size=0<if>'memory_adapter_size'<not><in>params<else>params['memory_adapter_size']<line_sep>self.encoder_state_size=<none><line_sep># TODO: FIX THIS HACK FOR LOADING # Change scope for seq2seq network - so weights can be loaded later... variable_scope_name="seq2seq"<if>"seq2seq"<in>self.net_name<else>"network"<with_stmt>tf.variable_scope(variable_scope_name)<block_start>self.rnn_cell=tf.contrib.rnn.BasicLSTMCell(self.hidden_layer_size activation=_ACTIVATION_MAP[self.memory_activation_type] state_is_tuple=<false> name=variable_scope_name<if>variable_scope_name<ne>"network"<else><none>)<line_sep>self.output_activation=_ACTIVATION_MAP[self.output_activation_type]<line_sep>self.output_w=tf.get_variable("Output_W" [self.hidden_layer_size self.output_size] dtype=tf.float32 initializer=tf.contrib.layers.xavier_initializer())<line_sep>self.output_b=tf.get_variable("Output_b" [self.output_size] dtype=tf.float32 initializer=tf.contrib.layers.xavier_initializer())<block_end># Training params self.performance_metric=params['performance_metric']<line_sep>self.epochs=params['num_epochs']<line_sep>self.minibatch_size=params['minibatch_size']<line_sep>self.learning_rate=params['learning_rate']<line_sep>self.max_global_norm=params['max_norm']<line_sep>self.backprop_length=params['backprop_length']<line_sep>self.global_step=tf.get_variable('global_step_tfrnn' initializer=0 dtype=np.int32 trainable=<false>)<line_sep># Test params self.num_prediction_samples=500<line_sep># Saving params self.model_folder=params['model_folder']<line_sep>relevant_name_parts=[self.experiment_name #self.net_name, self.dropout_rate self.hidden_layer_size self.epochs self.minibatch_size self.learning_rate self.max_global_norm self.backprop_length]<line_sep># Check <if_stmt><not>(self.memory_activation_type<eq>"elu"<and>self.output_activation_type<eq>"linear")<block_start>relevant_name_parts<augadd>[self.memory_activation_type self.output_activation_type]<block_end><if_stmt>self.memory_adapter_size<g>0<block_start>relevant_name_parts<augadd>[self.memory_adapter_size]<block_end>self.serialisation_name="_".join([str(s)<for>s relevant_name_parts])<block_end><def_stmt>_apply_memory_adapter self encoder_states<block_start>b_single_layer=self.memory_adapter_size<eq>0# since externally checked that memory adapter should be applied <if_stmt>self.encoder_state_size<is><none><block_start>encoder_size=encoder_states.get_shape().as_list()[-1]<line_sep>self.encoder_state_size=encoder_size<if_stmt>b_single_layer<block_start>self.memory_adapter_layer={'W1':tf.get_variable("Adapter_Layer1_W" [self.encoder_state_size self.hidden_layer_size<times>2] dtype=tf.float32 initializer=tf.contrib.layers.xavier_initializer()) 'b1':tf.get_variable("Adapter_Layer1_b" [self.hidden_layer_size<times>2] dtype=tf.float32 initializer=tf.contrib.layers.xavier_initializer()) }<block_end><else_stmt><block_start>self.memory_adapter_layer={'W1':tf.get_variable("Adapter_Layer1_W" [self.encoder_state_size self.memory_adapter_size] dtype=tf.float32 initializer=tf.contrib.layers.xavier_initializer()) 'b1':tf.get_variable("Adapter_Layer1_b" [self.memory_adapter_size] dtype=tf.float32 initializer=tf.contrib.layers.xavier_initializer()) 'W2':tf.get_variable("Adapter_Layer2_W" [self.memory_adapter_size self.hidden_layer_size<times>2] dtype=tf.float32 initializer=tf.contrib.layers.xavier_initializer()) 'b2':tf.get_variable("Adapter_Layer2_b" [self.hidden_layer_size<times>2] # LSTM memory is double concated dtype=tf.float32 initializer=tf.contrib.layers.xavier_initializer())}<block_end><block_end># Use elu and linear to avoid placing any restrictions on the range of internal activations memory_activation_fxn=_ACTIVATION_MAP[self.memory_activation_type]<line_sep>decoder_states=memory_activation_fxn(tf.matmul(encoder_states self.memory_adapter_layer['W1'])+self.memory_adapter_layer['b1'])<if_stmt><not>b_single_layer<block_start>decoder_states=memory_activation_fxn(tf.matmul(decoder_states self.memory_adapter_layer['W2'])+self.memory_adapter_layer['b2'])<block_end><return>decoder_states<block_end><def_stmt>get_prediction_graph self use_validation_set with_dropout=<true> placeholder_time_steps=<none> b_use_state_initialisation=<false> b_dump_all_states=<false><block_start><if_stmt>placeholder_time_steps<block_start>data_chunk={}<line_sep>data_chunk['inputs']=tf.placeholder(tf.float32 [<none> placeholder_time_steps self.input_size])<line_sep>data_chunk['sequence_lengths']=tf.placeholder(tf.float32 [<none>])# Length <block_end><else_stmt><block_start><if_stmt>use_validation_set<is><none><block_start>dataset=self.training_data.batch(self.minibatch_size)<block_end><elif_stmt>use_validation_set<block_start>dataset=self.validation_data.batch(self.minibatch_size)<block_end><else_stmt><block_start>dataset=self.test_data.batch(self.minibatch_size)<block_end>iterator=tf.data.Iterator.from_structure(dataset.output_types dataset.output_shapes)<line_sep>initializer=iterator.make_initializer(dataset)<line_sep>data_chunk=iterator.get_next()<block_end><if_stmt>b_use_state_initialisation<block_start><if_stmt>'initial_states'<not><in>data_chunk<block_start><raise>ValueError("State initialisations not present!")<block_end>initial_states=tf.cast(data_chunk['initial_states'] tf.float32)<block_end><else_stmt><block_start>initial_states=<none><block_end>output=self._build_prediction_graph(data_chunk with_dropout=with_dropout initial_states=initial_states b_dump_all_states=b_dump_all_states)<if_stmt>placeholder_time_steps<block_start>output['input_holder']=data_chunk['inputs']<line_sep>output['sequence_length_holder']=data_chunk['sequence_lengths']<block_end><else_stmt><block_start>output['initializer']=initializer<block_end><return>output<block_end><def_stmt>_build_prediction_graph self data_chunk with_dropout=<true> initial_states=<none> b_dump_all_states=<false># output_minibatch = tf.cast(data_chunk['outputs'], tf.float32) # active_entries = tf.cast(data_chunk['active_entries'], tf.float32) <block_start>input_minibatch=tf.cast(data_chunk['inputs'] tf.float32)<line_sep>sequence_lengths=tf.cast(data_chunk['sequence_lengths'] tf.int32)<line_sep>time_steps=input_minibatch.get_shape().as_list()[1]<line_sep># Setup graph now outputs=[]<line_sep>states_list=[]<if_stmt>with_dropout<block_start>num_samples=self.num_prediction_samples<line_sep>keep_probs=(1-self.dropout_rate)<block_end><else_stmt><block_start>num_samples=1<line_sep>keep_probs=1.0<block_end>lstm_additional_size=self.output_size<if><not>self.b_use_seq2seq_training_mode<and>self.b_use_seq2seq_feedback<else>0<line_sep>cell=tf.nn.rnn_cell.DropoutWrapper(self.rnn_cell input_keep_prob=keep_probs output_keep_prob=keep_probs state_keep_prob=keep_probs variational_recurrent=<true> input_size=input_minibatch.shape[2]+lstm_additional_size dtype=tf.float32)<line_sep># Extension for feedback loops in seq2seq architecture <if_stmt>self.b_use_seq2seq_feedback<block_start>cell=Seq2SeqDecoderCell(cell self.output_w self.output_b b_training_mode=<false>)<block_end># Extension for memory adapter <if_stmt>self.b_use_memory_adapter<block_start><if_stmt>initial_states<is><none><block_start><raise>ValueError("Memory adapter requires initial states!")<block_end>initial_states=self._apply_memory_adapter(initial_states)<block_end><for_stmt>i range(num_samples)<block_start>val,states=tf.nn.dynamic_rnn(cell input_minibatch initial_state=initial_states # None for default dtype=tf.float32 sequence_length=sequence_lengths)<if_stmt>b_dump_all_states<block_start>state_dumping_cell=StateDumpingRNN(cell)<line_sep>all_states,dumped_states=tf.nn.dynamic_rnn(state_dumping_cell input_minibatch initial_state=initial_states # None for default dtype=tf.float32 sequence_length=sequence_lengths)<block_end><else_stmt><block_start>all_states=states<block_end># just dump one state - used to speed up training while enforcing function params # Linear output layer flattened_val=tf.reshape(val [-1 self.hidden_layer_size])<if_stmt>self.b_use_seq2seq_feedback<block_start>logits=flattened_val<block_end><else_stmt><block_start>logits=tf.matmul(flattened_val self.output_w)+self.output_b<block_end><if_stmt>self.softmax_size<ne>0<block_start>logits=tf.reshape(logits [-1 time_steps self.output_size])<line_sep>core_outputs,softmax_outputs=tf.split(logits [self.output_size-self.softmax_size self.softmax_size] axis=2)<line_sep>output=tf.concat([self.output_activation(core_outputs) tf.nn.softmax(softmax_outputs axis=2)] axis=2)<block_end><else_stmt><block_start>output=self.output_activation(logits)<line_sep>output=tf.reshape(output [-1 time_steps self.output_size])<block_end>outputs.append(tf.expand_dims(output 0))<line_sep>states_list.append(tf.expand_dims(all_states 0))<block_end># Dumping output samples=tf.concat(outputs axis=0)<line_sep>mean_estimate=tf.reduce_mean(samples axis=0)<line_sep>upper_bound=tf.contrib.distributions.percentile(samples q=95.0 axis=0)<line_sep>lower_bound=tf.contrib.distributions.percentile(samples q=5.0 axis=0)<line_sep># Averages across all samples - no difference for single sample ave_state=tf.reduce_mean(tf.concat(states_list axis=0) axis=0)<line_sep><return>{'mean':mean_estimate 'upper_bound':upper_bound 'lower_bound':lower_bound 'ave_states':ave_state}<block_end><def_stmt>get_training_graph self use_truncated_bptt=<true> b_stub_front=<true> b_use_state_initialisation=<true><block_start>training_dataset=self.training_data.shuffle(buffer_size=10000).batch(self.minibatch_size).repeat(self.epochs)<line_sep>iterator=training_dataset.make_one_shot_iterator()<line_sep>data_chunk=iterator.get_next()<line_sep>input_minibatch=tf.cast(data_chunk['inputs'] tf.float32)<line_sep>output_minibatch=tf.cast(data_chunk['outputs'] tf.float32)<line_sep>active_entries=tf.cast(data_chunk['active_entries'] tf.float32)<line_sep>sequence_lengths=tf.cast(data_chunk['sequence_lengths'] tf.int32)<if_stmt>b_use_state_initialisation<block_start><if_stmt>'initial_states'<not><in>data_chunk<block_start><raise>ValueError("State initialisations not present!")<block_end>initial_states=tf.cast(data_chunk['initial_states'] tf.float32)<line_sep># Extension for memory adapter <if_stmt>self.b_use_memory_adapter<block_start><if_stmt>initial_states<is><none><block_start><raise>ValueError("Memory adapter requires initial states!")<block_end>initial_states=self._apply_memory_adapter(initial_states)<block_end><block_end><else_stmt><block_start>initial_states=<none><block_end><if_stmt>'propensity_weights'<in>data_chunk<block_start>weights=tf.cast(data_chunk['propensity_weights'] tf.float32)<block_end><else_stmt><block_start>weights=1<block_end>keep_probs=(1-self.dropout_rate)<line_sep># Setup graph now lstm_additional_size=self.output_size<if><not>self.b_use_seq2seq_training_mode<and>self.b_use_seq2seq_feedback<else>0<line_sep>cell=tf.nn.rnn_cell.DropoutWrapper(self.rnn_cell input_keep_prob=keep_probs output_keep_prob=keep_probs state_keep_prob=keep_probs variational_recurrent=<true> input_size=input_minibatch.shape[2]+lstm_additional_size dtype=tf.float32)<if_stmt>self.b_use_seq2seq_feedback<block_start>cell=Seq2SeqDecoderCell(cell self.output_w self.output_b b_training_mode=self.b_use_seq2seq_training_mode)<block_end># Stack up the dynamic RNNs for T-BPTT. # Splitting it up total_timesteps=input_minibatch.get_shape().as_list()[1]<line_sep>num_slices=int(total_timesteps/self.backprop_length)<line_sep>chunk_sizes=[self.backprop_length<for>i range(num_slices)]<line_sep>odd_size=total_timesteps-self.backprop_length<times>num_slices<line_sep># get all the chunks <if_stmt>odd_size<g>0<block_start><if_stmt>b_stub_front<block_start>chunk_sizes=[odd_size]+chunk_sizes<block_end><else_stmt><block_start>chunk_sizes=chunk_sizes+[odd_size]<block_end><block_end># Implement TF style Truncated-backprop through time outputs=[]<line_sep>start=0<line_sep>states=initial_states<for_stmt>chunk_size chunk_sizes<block_start>input_chunk=tf.slice(input_minibatch [0 start 0] [-1 chunk_size self.input_size])<if_stmt>states<is><not><none><and>use_truncated_bptt<block_start>val,states=tf.nn.dynamic_rnn(cell input_chunk sequence_length=sequence_lengths dtype=tf.float32 initial_state=states)<block_end><else_stmt><block_start>val,states=tf.nn.dynamic_rnn(cell input_chunk sequence_length=sequence_lengths dtype=tf.float32)<block_end># Linear output layer flattened_val=tf.reshape(val [-1 self.hidden_layer_size])<if_stmt>self.b_use_seq2seq_feedback<block_start>logits=flattened_val<block_end><else_stmt><block_start>logits=tf.matmul(flattened_val self.output_w)+self.output_b<block_end><if_stmt>self.softmax_size<ne>0<block_start>logits=tf.reshape(logits [-1 chunk_size self.output_size])<line_sep>core_outputs,softmax_outputs=tf.split(logits [self.output_size-self.softmax_size self.softmax_size] axis=2)<line_sep>output=tf.concat([self.output_activation(core_outputs) tf.nn.softmax(softmax_outputs axis=2)] axis=2)<block_end><else_stmt><block_start>output=self.output_activation(logits)<line_sep>output=tf.reshape(output [-1 chunk_size self.output_size])<block_end>outputs.append(output)<line_sep># break links between states for truncated bptt states=tf.identity(states)<line_sep># Starting point start<augadd>chunk_size<block_end># Dumping output predictions=tf.concat(outputs axis=1)<line_sep># Split out the softmax components <if_stmt>self.softmax_size<g>0<block_start>original_vs_softmax_size=[self.output_size-self.softmax_size self.softmax_size]<line_sep>predictions,softmax_predictions=tf.split(predictions original_vs_softmax_size axis=2)<line_sep>output_minibatch,softmax_output_minibatch=tf.split(output_minibatch original_vs_softmax_size axis=2)<line_sep>active_entries,softmax_active=tf.split(active_entries original_vs_softmax_size axis=2)<block_end># Compute loss function <if_stmt>self.performance_metric<eq>"mse"<block_start>loss=tf.reduce_sum(tf.square(predictions-output_minibatch)<times>active_entries<times>weights)/tf.reduce_sum(active_entries)<block_end># cos some zero entires <elif_stmt>self.performance_metric<eq>"xentropy"<block_start>loss=tf.reduce_sum((output_minibatch<times>-tf.log(predictions+1e-8)+(1-output_minibatch)<times>-tf.log(1-predictions+1e-8))<times>active_entries<times>weights)/tf.reduce_sum(active_entries)<block_end><else_stmt><block_start><raise>ValueError("Unknown performance metric {}".format(self.performance_metric))<block_end><if_stmt>self.softmax_size<g>0<block_start>loss<augadd>tf.reduce_sum(softmax_output_minibatch<times>-tf.log(softmax_predictions+1e-8)<times>softmax_active<times>weights)/tf.reduce_sum(softmax_active)<block_end>optimiser=helpers.get_optimization_graph(loss learning_rate=self.learning_rate max_global_norm=self.max_global_norm global_step=self.global_step)<line_sep># Parcel outputs handles={'loss':loss 'optimiser':optimiser}<line_sep><return>handles<block_end><block_end>
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 #!/usr/bin/env python3 # Modules importing <import_from_future_stmt> print_function<import_stmt>os sys<import_stmt>waffun<as>function<import_stmt>boto3<import_stmt>zipfile<line_sep># Global Constants limitWebAcl='10'<def_stmt>stageFile fileName# Trying to 'touch' (create) the provided file <block_start>dummyFile=""<try_stmt><block_start>dummyFile=open(fileName 'w')<block_end><except_stmt><block_start>print("*** Unable to create the file "+fileName+"! ***\n" file=sys.stderr)<line_sep>sys.exit(-1)<block_end><else_stmt><block_start><return>(dummyFile)<block_end><block_end><def_stmt>getWaf arguments<block_start>''' Prints customer account and calls the right WAF function to get customer's resources. The arguments are a list with the following values: [wafType to be considered (1 = global, 2 = regional), region name, Web ACL ID] '''<line_sep># Staging all files. The first one is the log file. The second one is the Terraform template file. # The third one is the zip file containing the two previous ones. listLogTemplate=function.getHomeConfig()<line_sep>log=stageFile(listLogTemplate[0])<line_sep>template=stageFile(listLogTemplate[1])<line_sep>package=listLogTemplate[2]<line_sep>print("Your WAFER log file is "+listLogTemplate[0])<line_sep>print("Your Terraform template file is "+listLogTemplate[1])<line_sep># Populating first lines of the log file log.write("*************************************************************************\n")<line_sep>log.write("WAFER - AWS WAF Enhanced Repicator - Version "+function.getVersion()+"\n")<line_sep>log.write("*************************************************************************\n")<line_sep>webAclId=arguments[2]<line_sep>isRegional=<false><line_sep>suffix="_"<line_sep>region="us-east-1"<if_stmt>arguments[0]<eq>2# This indicates that it will be regional WAF <block_start>isRegional=<true><line_sep>suffix="regional_"<line_sep>region=arguments[1]<block_end><if_stmt>isRegional<block_start>print("Considering WAF regional resources on "+region+".\n")<line_sep>log.write(function.getFormattedDateTime()+"Region: "+region+"\n")<line_sep>client=boto3.setup_default_session(region_name=region)<line_sep>client=boto3.client('waf-regional')<block_end><else_stmt><block_start>print("Considering WAF global resources.\n")<line_sep>log.write(function.getFormattedDateTime()+"Global WAF\n")<line_sep>client=boto3.client('waf')<block_end><if_stmt>len(webAclId)<eq>0<block_start><try_stmt><block_start>response=client.list_web_acls()<block_end><except_stmt><block_start>function.abortMission(log template "list_web_acls()")<block_end><else_stmt># In case query is ok, proceed with the code <block_start><if_stmt>len(response)<eq>0<block_start><if_stmt>isRegional<block_start>print("You have no Web ACLs on region {}. Exiting...\n".format(region) file=sys.stderr)<block_end><else_stmt><block_start>print("You have no global Web ACLs.\n" file=sys.stderr)<block_end>log.write(function.getFormattedDateTime()+"End of Log.")<line_sep>function.abortMission(log template)<block_end><else_stmt><block_start>print("Choose which Web ACL you want to consider: ")<for_stmt>i range(len(response['WebACLs']))<block_start>print("[{}] Id: {}, Name: {}".format(str(i+1) response['WebACLs'][i]['WebACLId'] response['WebACLs'][i]['Name']))<block_end>print("[0] Abort")<line_sep>choice=-1<while_stmt>(choice<l>0<or>choice<g>len(response))<block_start>choice=input("Your choice: ")<if_stmt><not>choice.isdigit()<block_start>choice=-1<block_end><else_stmt><block_start>choice=int(choice)<block_end><block_end><if_stmt>choice<eq>0<block_start>print("Aborting execution.\n" file=sys.stderr)<line_sep>log.write(function.getFormattedDateTime()+"End of Log.")<line_sep>function.abortMission(log template "")<block_end>webAclId=response['WebACLs'][choice-1]['WebACLId']<line_sep>webAclName=response['WebACLs'][choice-1]['Name']<block_end><block_end><block_end><else_stmt><block_start><try_stmt><block_start>response=client.get_web_acl(WebACLId=webAclId)<block_end><except_stmt><block_start><if_stmt>isRegional<block_start>print("Unable to find the provided Web ACL ID {} on the provided region {}.".format(webAclId region) file=sys.stderr)<line_sep>log.write(function.getFormattedDateTime()+"Unable to find the provided Web ACL "+webAclId+" on the provided region "+region+".\n")<block_end><else_stmt><block_start>print("Unable to find the provided global Web ACL ID {}.".format(webAclId) file=sys.stderr)<line_sep>log.write(function.getFormattedDateTime()+"Unable to find the provided global Web ACL "+webAclId+".\n")<block_end>function.abortMission(log template "")<block_end>webAclName=response['WebACL']['Name']<block_end>log.write(function.getFormattedDateTime()+"Web ACL (ID): "+webAclName+" ("+webAclId+")\n")<line_sep>print("Grabbing resources for Web ACL {} (ID: {})...".format(webAclName webAclId))<try_stmt><block_start>response1=client.get_web_acl(WebACLId=webAclId)<block_end><except_stmt><block_start>function.abortMission(log template "get_web_acl()")<block_end>metricName=response1['WebACL']['MetricName']<line_sep>defaultAction=response1['WebACL']['DefaultAction']['Type']<line_sep># Starting the template writing. template.write('provider "aws" {\n')<if_stmt>isRegional<block_start>template.write(' region = "'+region+'"\n')<block_end><else_stmt><block_start>template.write(' region = "us-east-1"\n')<block_end>template.write('}\n\n')<line_sep># Getting all conditions. conditionsResult=crawlConditions(client log template suffix)<line_sep>template.write(conditionsResult[1])<line_sep>template.write("\n\n")<line_sep>rules={}<for_stmt>i range(len(response1['WebACL']['Rules']))<block_start>finalString=""<line_sep>ruleId=response1['WebACL']['Rules'][i]['RuleId']<line_sep>ruleType=response1['WebACL']['Rules'][i]['Type']<if_stmt>ruleType<eq>'GROUP'<block_start><try_stmt><block_start>groupTemp=client.get_rule_group(RuleGroupId=ruleId)<block_end><except_stmt><block_start>function.abortMission(log template "get_rule_group()")<block_end>groupName=groupTemp['RuleGroup']['Name']<line_sep>print("Rule Group (Id): {} ({})".format(groupName ruleId))<line_sep>log.write(function.getFormattedDateTime()+"Group Name: "+groupName+" / Group Id: "+ruleId+"\n")<try_stmt><block_start>loopGroup=client.list_activated_rules_in_rule_group(RuleGroupId=ruleId)<block_end><except_stmt><block_start>function.abortMission(log template "list_activated_rules_in_rule_group()")<block_end><for_stmt>j range(len(loopGroup['ActivatedRules']))<block_start>idTemp=loopGroup['ActivatedRules'][j]['RuleId']<try_stmt><block_start>rTemp=client.get_rule(RuleId=idTemp)<block_end><except_stmt><block_start>function.abortMission(log template "get_rule()")<block_end># Checking if the rule was not already recorded <if_stmt><not>idTemp<in>rules<block_start>index=0<for_stmt>key,value rules.items()<block_start><if_stmt>rules[key][:5]<eq>"rule_"<block_start>index<augadd>1<block_end><block_end>rules[idTemp]="rule_"+str(index)<line_sep>nameTemp=rTemp['Rule']['Name']<line_sep>print(" Rule Name: {} / Rule ID: {}".format(nameTemp idTemp))<line_sep>log.write(function.getFormattedDateTime()+" Rule Name: "+nameTemp+" / Rule ID: "+ruleId+"\n")<line_sep>finalString<augadd>"resource \"aws_waf"+suffix+"rule\" \"rule_"+str(index)+"\" {\n"<line_sep>finalString<augadd>" name = \""+rTemp['Rule']['Name']+"\"\n"<line_sep>finalString<augadd>" metric_name = \""+rTemp['Rule']['MetricName']+"\"\n\n"<for_stmt>k range(len(rTemp['Rule']['Predicates']))<block_start><if_stmt>isRegional<block_start>finalString<augadd>" predicate {\n"<block_end><else_stmt><block_start>finalString<augadd>" predicates {\n"<block_end>finalString<augadd>" type = \""+rTemp['Rule']['Predicates'][k]['Type']+"\"\n"<line_sep>finalString<augadd>" negated = "+str(rTemp['Rule']['Predicates'][k]['Negated']).lower()+"\n"<line_sep>conditionId=rTemp['Rule']['Predicates'][k]['DataId']<line_sep>finalString<augadd>" data_id = \"${aws_waf"+suffix+conditionsResult[0][conditionId][:-2]+"."+conditionsResult[0][conditionId]+".id}\"\n"<line_sep>finalString<augadd>" }\n"<block_end>finalString<augadd>"}\n\n"<block_end><block_end>finalString<augadd>"resource \"aws_waf"+suffix+"rule_group\" \"rule_group_"+str(i)+"\" {\n"<line_sep>rules[ruleId]="rule_group_"+str(i)<line_sep>finalString<augadd>" name = \""+groupName+"\"\n"<line_sep>finalString<augadd>" metric_name = \""+groupTemp['RuleGroup']['MetricName']+"\"\n\n"<for_stmt>j range(len(loopGroup['ActivatedRules']))<block_start>finalString<augadd>" activated_rule {\n"<line_sep>finalString<augadd>" action {\n"<line_sep>finalString<augadd>" type = \""+loopGroup['ActivatedRules'][j]['Action']['Type']+"\"\n"<line_sep>finalString<augadd>" }\n\n"<line_sep>finalString<augadd>" priority = "+str(loopGroup['ActivatedRules'][j]['Priority'])+"\n"<line_sep>finalString<augadd>" rule_id = \"${aws_waf"+suffix+"rule."+rules[loopGroup['ActivatedRules'][j]['RuleId']]+".id}\"\n"<line_sep>finalString<augadd>" }\n\n"<block_end>finalString<augadd>"}\n\n"<line_sep>template.write(finalString)<block_end><elif_stmt>ruleType<eq>"RATE_BASED"<block_start><try_stmt><block_start>rTemp=client.get_rate_based_rule(RuleId=ruleId)<block_end><except_stmt><block_start>function.abortMission(log template "get_rate_based_rule()")<block_end>ruleName=rTemp['Rule']['Name']<line_sep>ruleAction=response1['WebACL']['Rules'][i]['Action']['Type']<line_sep>log.write(function.getFormattedDateTime()+"Rule Name: "+ruleName+" / Rule Id: "+ruleId+"\n")<line_sep>print("Rule Name: {} / Rule Id: {}".format(ruleName ruleId))<line_sep>idTemp=rTemp['Rule']['RuleId']<if_stmt><not>idTemp<in>rules<block_start>index=0<for_stmt>key,value rules.items()<block_start><if_stmt>rules[key][:5]<eq>"rule_"<block_start>index<augadd>1<block_end><block_end>rules[idTemp]="rule_"+str(index)<line_sep>finalString<augadd>"resource \"aws_waf"+suffix+"rate_based_rule\" \"rule_"+str(index)+"\" {\n"<line_sep>finalString<augadd>" name = \""+rTemp['Rule']['Name']+"\"\n"<line_sep>finalString<augadd>" metric_name = \""+rTemp['Rule']['MetricName']+"\"\n\n"<line_sep>finalString<augadd>" rate_key = \""+rTemp['Rule']['RateKey']+"\"\n"<line_sep>finalString<augadd>" rate_limit = "+str(rTemp['Rule']['RateLimit'])+"\n\n"<for_stmt>j range(len(rTemp['Rule']['MatchPredicates']))<block_start><if_stmt>isRegional<block_start>finalString<augadd>" predicate {\n"<block_end><else_stmt><block_start>finalString<augadd>" predicates {\n"<block_end>conditionId=rTemp['Rule']['MatchPredicates'][j]['DataId']<line_sep>finalString<augadd>" data_id = \"${aws_waf"+suffix+conditionsResult[0][conditionId][:-2]+"."+conditionsResult[0][conditionId]+".id}\"\n"<line_sep>finalString<augadd>" negated = "+str(rTemp['Rule']['MatchPredicates'][j]['Negated']).lower()+"\n"<line_sep>finalString<augadd>" type = \""+rTemp['Rule']['MatchPredicates'][j]['Type']+"\"\n"<line_sep>finalString<augadd>" }\n\n"<block_end>finalString<augadd>"}\n\n"<line_sep>template.write(finalString)<block_end><block_end><elif_stmt>ruleType<eq>"REGULAR"<block_start><try_stmt><block_start>rTemp=client.get_rule(RuleId=ruleId)<block_end><except_stmt><block_start>function.abortMission(log template "get_rule()")<block_end>ruleName=rTemp['Rule']['Name']<line_sep>ruleAction=response1['WebACL']['Rules'][i]['Action']['Type']<line_sep>log.write(function.getFormattedDateTime()+"Rule Name: "+ruleName+" / Rule Id: "+ruleId+"\n")<line_sep>print("Rule Name: {} / Rule Id: {}".format(ruleName ruleId))<line_sep>idTemp=rTemp['Rule']['RuleId']<if_stmt><not>idTemp<in>rules<block_start>index=0<for_stmt>key,value rules.items()<block_start><if_stmt>rules[key][:5]<eq>"rule_"<block_start>index<augadd>1<block_end><block_end>rules[idTemp]="rule_"+str(index)<line_sep>finalString<augadd>"resource \"aws_waf"+suffix+"rule\" \"rule_"+str(index)+"\" {\n"<line_sep>finalString<augadd>" name = \""+rTemp['Rule']['Name']+"\"\n"<line_sep>finalString<augadd>" metric_name = \""+rTemp['Rule']['MetricName']+"\"\n\n"<for_stmt>j range(len(rTemp['Rule']['Predicates']))<block_start><if_stmt>isRegional<block_start>finalString<augadd>" predicate {\n"<block_end><else_stmt><block_start>finalString<augadd>" predicates {\n"<block_end>conditionId=rTemp['Rule']['Predicates'][j]['DataId']<line_sep>finalString<augadd>" data_id = \"${aws_waf"+suffix+conditionsResult[0][conditionId][:-2]+"."+conditionsResult[0][conditionId]+".id}\"\n"<line_sep>finalString<augadd>" negated = "+str(rTemp['Rule']['Predicates'][j]['Negated']).lower()+"\n"<line_sep>finalString<augadd>" type = \""+rTemp['Rule']['Predicates'][j]['Type']+"\"\n"<line_sep>finalString<augadd>" }\n\n"<block_end>finalString<augadd>"}\n\n"<line_sep>template.write(finalString)<block_end><block_end><block_end># Getting all associated resources for the Web ACL. resourcesResult=getAssociatedResources(client webAclId region log template isRegional)<line_sep>template.write(resourcesResult[1])<line_sep>finalString=""<line_sep>finalString<augadd>"resource \"aws_waf"+suffix+"web_acl\" \"web_acl\" {\n"<line_sep>finalString<augadd>' name = "'+webAclName+'"\n'<line_sep>finalString<augadd>' metric_name = "'+metricName+'"\n\n'<line_sep>finalString<augadd>' default_action {\n'<line_sep>finalString<augadd>' type = "'+defaultAction+'"\n'<line_sep>finalString<augadd>' }\n\n'<for_stmt>i range(len(response1['WebACL']['Rules']))<block_start>ruleType=response1['WebACL']['Rules'][i]['Type']<if_stmt>isRegional<block_start>finalString<augadd>" rule {\n"<block_end><else_stmt><block_start>finalString<augadd>" rules {\n"<block_end>finalString<augadd>" priority = "+str(response1['WebACL']['Rules'][i]['Priority'])+"\n"<line_sep>finalString<augadd>" type = \""+ruleType+"\"\n"<if_stmt>ruleType<eq>"GROUP"<block_start>finalString<augadd>" rule_id = \"${aws_waf"+suffix+"rule_group."+rules[response1['WebACL']['Rules'][i]['RuleId']]+".id}\"\n\n"<line_sep>finalString<augadd>" override_action {\n"<line_sep>finalString<augadd>" type = \""+response1['WebACL']['Rules'][i]['OverrideAction']['Type']+"\"\n"<block_end><elif_stmt>ruleType<eq>"REGULAR"<block_start>finalString<augadd>" rule_id = \"${aws_waf"+suffix+"rule."+rules[response1['WebACL']['Rules'][i]['RuleId']]+".id}\"\n\n"<line_sep>finalString<augadd>" action {\n"<line_sep>finalString<augadd>" type = \""+response1['WebACL']['Rules'][i]['Action']['Type']+"\"\n"<block_end><elif_stmt>ruleType<eq>"RATE_BASED"<block_start>finalString<augadd>" rule_id = \"${aws_waf"+suffix+"rate_based_rule."+rules[response1['WebACL']['Rules'][i]['RuleId']]+".id}\"\n\n"<line_sep>finalString<augadd>" action {\n"<line_sep>finalString<augadd>" type = \""+response1['WebACL']['Rules'][i]['Action']['Type']+"\"\n"<block_end>finalString<augadd>" }\n"<line_sep>finalString<augadd>" }\n\n"<block_end>finalString<augadd>"}\n\n"<line_sep># This means there are regional resources associated with the Web ACL. In case it's a Global WAF Web ACL, # and there is at least one CloudFront distribution associated with it, this was already covered in the # the corresponding CloudFront block while running the getAssociatedResources() function. <if_stmt>len(resourcesResult[0])<g>0<and>isRegional<block_start><for_stmt>z range(len(resourcesResult[0]))<block_start>finalString<augadd>"resource \"aws_wafregional_web_acl_association\" \"web_acl_association_"+str(z)+"\" {\n"<line_sep>finalString<augadd>" web_acl_id = \"${aws_wafregional_web_acl.web_acl.id}\"\n"<if_stmt>"alb_dns_name"<in>resourcesResult[0][z]<block_start>finalString<augadd>" resource_arn = \"${aws_lb.waferALB.arn}\"\n"# This means an ALB needs to be associated with the Web ACL <block_end><else_stmt># This means an API Gateway needs to be associated with the Web ACL <block_start>finalString<augadd>" resource_arn = \"arn:aws:apigateway:"+region+"::/restapis/${aws_api_gateway_rest_api.waferAPI.id}/stages/waferStage\"\n"<block_end>finalString<augadd>"}\n\n"<block_end><block_end># This is the real final part of the template file (the outputs). finalString<augadd>"output \"Web_ACL_Name\" {\n"<line_sep>finalString<augadd>" description = \"Please refer to this Web ACL\"\n"<line_sep>finalString<augadd>" value = \""+webAclName+"\"\n"<line_sep>finalString<augadd>"}\n\n"<for_stmt>z range(len(resourcesResult[0]))<block_start>finalString<augadd>"output \""+resourcesResult[0][z][0]+"\" {\n"<line_sep>finalString<augadd>" description = \""+resourcesResult[0][z][1]+"\"\n"<line_sep>tail=""<if_stmt>"api_gateway_invoke_url"<in>resourcesResult[0][z]<block_start>tail="/WAFER"# Adding the stage nane to the final URL. <block_end>finalString<augadd>" value = "+resourcesResult[0][z][2]+tail+"\n"<line_sep>finalString<augadd>"}\n\n"<block_end>template.write(finalString)<line_sep>log.write(function.getFormattedDateTime()+"End of Log.")<line_sep>print("All done.")<line_sep>log.close()<line_sep>template.close()<line_sep># Zipping files. <try_stmt><block_start><import_stmt>zlib<line_sep>compression=zipfile.ZIP_DEFLATED<block_end><except_stmt><block_start>compression=zipfile.ZIP_STORED<block_end>zf=zipfile.ZipFile(package mode="w")<try_stmt><block_start>zf.write(listLogTemplate[0] compress_type=compression)<block_end><except_stmt><block_start>print("Unable to add {} to the zip file!".format(listLogTemplate[0]))<block_end><try_stmt><block_start>zf.write(listLogTemplate[1] compress_type=compression)<block_end><except_stmt><block_start>print("Unable to add {} to the zip file!".format(listLogTemplate[1]))<block_end>zf.close()<line_sep>print("\nGenerated ZIP file: {}.".format(package))<block_end><def_stmt>crawlConditions botoClient log template suffix<block_start>''' This function crawls all conditions from the provided Boto3 object and returns them in a form of a conditions list and a template string. '''<line_sep>returnString=""<line_sep>conditionsDict={}<line_sep># Getting the String Match Conditions <try_stmt><block_start>test=botoClient.list_byte_match_sets()<block_end><except_stmt><block_start>function.abortMission(log template "list_byte_match_sets()")<block_end><for_stmt>k range(len(test['ByteMatchSets']))<block_start><try_stmt><block_start>condition=botoClient.get_byte_match_set(ByteMatchSetId=test['ByteMatchSets'][k]['ByteMatchSetId'])<block_end><except_stmt><block_start>function.abortMission(log template "get_byte_match_set()")<block_end>namePrefix="byte_match_set_"+str(k)<line_sep>returnString<augadd>"resource \"aws_waf"+suffix+"byte_match_set\" \""+namePrefix+"\" {\n"<line_sep>returnString<augadd>" name = \""+condition['ByteMatchSet']['Name']+"\"\n\n"<for_stmt>l range(len(condition['ByteMatchSet']['ByteMatchTuples']))<block_start>returnString<augadd>" byte_match_tuples {\n"<line_sep>returnString<augadd>" text_transformation = \""+condition['ByteMatchSet']['ByteMatchTuples'][l]['TextTransformation']+"\"\n"<line_sep>returnString<augadd>" target_string = \""+str(condition['ByteMatchSet']['ByteMatchTuples'][l]['TargetString'])[2:-1]+"\"\n"<line_sep>returnString<augadd>" positional_constraint = \""+condition['ByteMatchSet']['ByteMatchTuples'][l]['PositionalConstraint']+"\"\n\n"<line_sep>returnString<augadd>" field_to_match {\n"<line_sep>returnString<augadd>" type = \""+condition['ByteMatchSet']['ByteMatchTuples'][l]['FieldToMatch']['Type']+"\"\n"<if_stmt>len(condition['ByteMatchSet']['ByteMatchTuples'][l]['FieldToMatch'])<g>1<block_start>returnString<augadd>" data = \""+condition['ByteMatchSet']['ByteMatchTuples'][l]['FieldToMatch']['Data']+"\"\n"<block_end>returnString<augadd>" }\n"<line_sep>returnString<augadd>" }"<if_stmt>l<ne>len(condition['ByteMatchSet']['ByteMatchTuples'])-1<block_start>returnString<augadd>"\n\n"<block_end><else_stmt><block_start>returnString<augadd>"\n"<block_end><block_end>conditionsDict[test['ByteMatchSets'][k]['ByteMatchSetId']]=namePrefix<line_sep>returnString<augadd>"}\n\n"<block_end>returnString<augadd>"\n\n"<line_sep># Getting the Regex Pattern Sets <try_stmt><block_start>test=botoClient.list_regex_pattern_sets()<block_end><except_stmt><block_start>function.abortMission(log template "list_regex_pattern_sets()")<block_end><for_stmt>k range(len(test['RegexPatternSets']))<block_start><try_stmt><block_start>condition=botoClient.get_regex_pattern_set(RegexPatternSetId=test['RegexPatternSets'][k]['RegexPatternSetId'])<block_end><except_stmt><block_start>function.abortMission(log template "get_regex_pattern_set()")<block_end>namePrefix="regex_pattern_set_"+str(k)<line_sep>returnString<augadd>"resource \"aws_waf"+suffix+"regex_pattern_set\" \""+namePrefix+"\" {\n"<line_sep>returnString<augadd>" name = \""+condition['RegexPatternSet']['Name']+"\"\n"<line_sep>returnString<augadd>" regex_pattern_strings = [ "<for_stmt>l range(len(condition['RegexPatternSet']['RegexPatternStrings']))# The following loop is to insert another "\" for all Regex pattern sets that have "\", as Terraform may not originally understand them. <block_start>cadTemp=""<for_stmt>m range(len(condition['RegexPatternSet']['RegexPatternStrings'][l]))<block_start><if_stmt>condition['RegexPatternSet']['RegexPatternStrings'][l][m]<eq>"\\"<block_start>cadTemp<augadd>"\\\\"+condition['RegexPatternSet']['RegexPatternStrings'][l][m+1:]<line_sep>m<augadd>1<block_end><block_end><if_stmt>len(cadTemp)<eq>0<block_start>cadTemp=condition['RegexPatternSet']['RegexPatternStrings'][l]<block_end>returnString<augadd>"\""+cadTemp+"\""<if_stmt>l<ne>len(condition['RegexPatternSet']['RegexPatternStrings'])-1<block_start>returnString<augadd>", "<block_end><block_end>returnString<augadd>" ]\n"<line_sep>conditionsDict[test['RegexPatternSets'][k]['RegexPatternSetId']]=namePrefix<line_sep>returnString<augadd>"}\n\n"<block_end># Getting the Regex Match Conditions <try_stmt><block_start>test=botoClient.list_regex_match_sets()<block_end><except_stmt><block_start>function.abortMission(log template "list_regex_match_sets()")<block_end><for_stmt>k range(len(test['RegexMatchSets']))<block_start><try_stmt><block_start>condition=botoClient.get_regex_match_set(RegexMatchSetId=test['RegexMatchSets'][k]['RegexMatchSetId'])<block_end><except_stmt><block_start>function.abortMission(log template "get_regex_match_set()")<block_end>namePrefix="regex_match_set_"+str(k)<line_sep>returnString<augadd>"resource \"aws_waf"+suffix+"regex_match_set\" \""+namePrefix+"\" {\n"<line_sep>returnString<augadd>" name = \""+condition['RegexMatchSet']['Name']+"\"\n\n"<for_stmt>l range(len(condition['RegexMatchSet']['RegexMatchTuples']))<block_start>returnString<augadd>" regex_match_tuple {\n"<line_sep>returnString<augadd>" field_to_match {\n"<line_sep>returnString<augadd>" type = \""+condition['RegexMatchSet']['RegexMatchTuples'][l]['FieldToMatch']['Type']+"\"\n"<if_stmt>len(condition['RegexMatchSet']['RegexMatchTuples'][l]['FieldToMatch'])<g>1<block_start>returnString<augadd>" data = \""+condition['RegexMatchSet']['RegexMatchTuples'][l]['FieldToMatch']['Data']+"\"\n"<block_end>returnString<augadd>" }\n\n"<line_sep>returnString<augadd>" text_transformation = \""+condition['RegexMatchSet']['RegexMatchTuples'][l]['TextTransformation']+"\"\n"<line_sep>returnString<augadd>" regex_pattern_set_id = \"${aws_waf"+suffix+"regex_pattern_set."+conditionsDict[condition['RegexMatchSet']['RegexMatchTuples'][l]['RegexPatternSetId']]+".id}\"\n"<line_sep>returnString<augadd>" }"<if_stmt>l<ne>len(condition['RegexMatchSet']['RegexMatchTuples'])-1<block_start>returnString<augadd>"\n\n"<block_end><else_stmt><block_start>returnString<augadd>"\n"<block_end><block_end>conditionsDict[test['RegexMatchSets'][k]['RegexMatchSetId']]=namePrefix<line_sep>returnString<augadd>"}\n\n"<block_end># Getting the SQL Injection Conditions <try_stmt><block_start>test=botoClient.list_sql_injection_match_sets()<block_end><except_stmt><block_start>function.abortMission(log template "list_sql_injection_match_sets()")<block_end><for_stmt>k range(len(test['SqlInjectionMatchSets']))<block_start><try_stmt><block_start>condition=botoClient.get_sql_injection_match_set(SqlInjectionMatchSetId=test['SqlInjectionMatchSets'][k]['SqlInjectionMatchSetId'])<block_end><except_stmt><block_start>function.abortMission(log template "get_sql_injection_match_set()")<block_end>namePrefix="sql_injection_match_set_"+str(k)<line_sep>returnString<augadd>"resource \"aws_waf"+suffix+"sql_injection_match_set\" \""+namePrefix+"\" {\n"<line_sep>returnString<augadd>" name = \""+condition['SqlInjectionMatchSet']['Name']+"\"\n\n"<for_stmt>l range(len(condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples']))<block_start><if_stmt>len(suffix)<eq>1# This means it's global WAF (suffix == '_'). Terraaform expects 'tuples' (plural). <block_start>returnString<augadd>" sql_injection_match_tuples {\n"<block_end><else_stmt><block_start>returnString<augadd>" sql_injection_match_tuple {\n"<block_end>returnString<augadd>" text_transformation = \""+condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples'][l]['TextTransformation']+"\"\n"<line_sep>returnString<augadd>" field_to_match {\n"<line_sep>returnString<augadd>" type = \""+condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples'][l]['FieldToMatch']['Type']+"\"\n"<if_stmt>len(condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples'][l]['FieldToMatch'])<g>1<block_start>returnString<augadd>" data = \""+condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples'][l]['FieldToMatch']['Data']+"\"\n"<block_end>returnString<augadd>" }\n"<line_sep>returnString<augadd>" }"<if_stmt>l<ne>len(condition['SqlInjectionMatchSet']['SqlInjectionMatchTuples'])-1<block_start>returnString<augadd>"\n\n"<block_end><else_stmt><block_start>returnString<augadd>"\n"<block_end><block_end>conditionsDict[test['SqlInjectionMatchSets'][k]['SqlInjectionMatchSetId']]=namePrefix<line_sep>returnString<augadd>"}"<block_end>returnString<augadd>"\n\n"<line_sep># Getting the Size Constraint Set Conditions <try_stmt><block_start>test=botoClient.list_size_constraint_sets()<block_end><except_stmt><block_start>function.abortMission(log template "list_size_constraint_sets()")<block_end><for_stmt>k range(len(test['SizeConstraintSets']))<block_start><try_stmt><block_start>condition=botoClient.get_size_constraint_set(SizeConstraintSetId=test['SizeConstraintSets'][k]['SizeConstraintSetId'])<block_end><except_stmt><block_start>function.abortMission(log template "get_size_constraint_set())")<block_end>namePrefix="size_constraint_set_"+str(k)<line_sep>returnString<augadd>"resource \"aws_waf"+suffix+"size_constraint_set\" \""+namePrefix+"\" {\n"<line_sep>returnString<augadd>" name = \""+condition['SizeConstraintSet']['Name']+"\"\n\n"<for_stmt>l range(len(condition['SizeConstraintSet']['SizeConstraints']))<block_start>returnString<augadd>" size_constraints {\n"<line_sep>returnString<augadd>" text_transformation = \""+condition['SizeConstraintSet']['SizeConstraints'][l]['TextTransformation']+"\"\n"<line_sep>returnString<augadd>" comparison_operator = \""+condition['SizeConstraintSet']['SizeConstraints'][l]['ComparisonOperator']+"\"\n"<line_sep>returnString<augadd>" size = \""+str(condition['SizeConstraintSet']['SizeConstraints'][l]['Size'])+"\"\n\n"<line_sep>returnString<augadd>" field_to_match {\n"<line_sep>returnString<augadd>" type = \""+condition['SizeConstraintSet']['SizeConstraints'][l]['FieldToMatch']['Type']+"\"\n"<if_stmt>len(condition['SizeConstraintSet']['SizeConstraints'][l]['FieldToMatch'])<g>1<block_start>returnString<augadd>" data = \""+condition['SizeConstraintSet']['SizeConstraints'][l]['FieldToMatch']['Data']+"\"\n"<block_end>returnString<augadd>" }\n"<line_sep>returnString<augadd>" }"<if_stmt>l<ne>len(condition['SizeConstraintSet']['SizeConstraints'])-1<block_start>returnString<augadd>"\n\n"<block_end><else_stmt><block_start>returnString<augadd>"\n"<block_end><block_end>conditionsDict[test['SizeConstraintSets'][k]['SizeConstraintSetId']]=namePrefix<line_sep>returnString<augadd>"}"<block_end>returnString<augadd>"\n\n"<line_sep># Getting the IP Set Conditions <try_stmt><block_start>test=botoClient.list_ip_sets()<block_end><except_stmt><block_start>function.abortMission(log template "list_ip_sets()")<block_end><for_stmt>k range(len(test['IPSets']))<block_start><try_stmt><block_start>condition=botoClient.get_ip_set(IPSetId=test['IPSets'][k]['IPSetId'])<block_end><except_stmt><block_start>function.abortMission(log template "get_ip_set()")<block_end>namePrefix="ipset_"+str(k)<line_sep>returnString<augadd>"resource \"aws_waf"+suffix+"ipset\" \""+namePrefix+"\" {\n"<line_sep>returnString<augadd>" name = \""+condition['IPSet']['Name']+"\"\n\n"<for_stmt>l range(len(condition['IPSet']['IPSetDescriptors']))<block_start><if_stmt>len(suffix)<eq>1# This means it's global WAF (suffix == '_'). Terraaform expects 'descriptors' (plural). <block_start>returnString<augadd>" ip_set_descriptors {\n"<block_end><else_stmt><block_start>returnString<augadd>" ip_set_descriptor {\n"<block_end>returnString<augadd>" type = \""+condition['IPSet']['IPSetDescriptors'][l]['Type']+"\"\n"<line_sep>returnString<augadd>" value = \""+condition['IPSet']['IPSetDescriptors'][l]['Value']+"\"\n"<line_sep>returnString<augadd>" }"<if_stmt>l<ne>len(condition['IPSet']['IPSetDescriptors'])-1<block_start>returnString<augadd>"\n\n"<block_end><else_stmt><block_start>returnString<augadd>"\n"<block_end><block_end>conditionsDict[test['IPSets'][k]['IPSetId']]=namePrefix<line_sep>returnString<augadd>"}\n\n"<block_end># Getting the Geo Conditions <try_stmt><block_start>test=botoClient.list_geo_match_sets()<block_end><except_stmt><block_start>function.abortMission(log template "list_geo_match_sets()")<block_end><for_stmt>k range(len(test['GeoMatchSets']))<block_start><try_stmt><block_start>condition=botoClient.get_geo_match_set(GeoMatchSetId=test['GeoMatchSets'][k]['GeoMatchSetId'])<block_end><except_stmt><block_start>function.abortMission(log template "get_geo_match_set()")<block_end>namePrefix="geo_match_set_"+str(k)<line_sep>returnString<augadd>"resource \"aws_waf"+suffix+"geo_match_set\" \""+namePrefix+"\" {\n"<line_sep>returnString<augadd>" name = \""+condition['GeoMatchSet']['Name']+"\"\n\n"<for_stmt>l range(len(condition['GeoMatchSet']['GeoMatchConstraints']))<block_start>returnString<augadd>" geo_match_constraint {\n"<line_sep>returnString<augadd>" type = \""+condition['GeoMatchSet']['GeoMatchConstraints'][l]['Type']+"\"\n"<line_sep>returnString<augadd>" value = \""+condition['GeoMatchSet']['GeoMatchConstraints'][l]['Value']+"\"\n"<line_sep>returnString<augadd>" }"<if_stmt>l<ne>len(condition['GeoMatchSet']['GeoMatchConstraints'])-1<block_start>returnString<augadd>"\n\n"<block_end><else_stmt><block_start>returnString<augadd>"\n"<block_end><block_end>conditionsDict[test['GeoMatchSets'][k]['GeoMatchSetId']]=namePrefix<line_sep>returnString<augadd>"}\n\n"<block_end># Getting the XSS Conditions <try_stmt><block_start>test=botoClient.list_xss_match_sets()<block_end><except_stmt><block_start>function.abortMission(log template "list_xss_match_sets()")<block_end><for_stmt>k range(len(test['XssMatchSets']))<block_start><try_stmt><block_start>condition=botoClient.get_xss_match_set(XssMatchSetId=test['XssMatchSets'][k]['XssMatchSetId'])<block_end><except_stmt><block_start>function.abortMission(log template "get_xss_match_set()")<block_end>namePrefix="xss_match_set_"+str(k)<line_sep>returnString<augadd>"resource \"aws_waf"+suffix+"xss_match_set\" \""+namePrefix+"\" {\n"<line_sep>returnString<augadd>" name = \""+condition['XssMatchSet']['Name']+"\"\n\n"<for_stmt>l range(len(condition['XssMatchSet']['XssMatchTuples']))<block_start><if_stmt>len(suffix)<eq>1# This means it's global WAF (suffix == '_'). Terraform expects 'tuples' (plural). <block_start>returnString<augadd>" xss_match_tuples {\n"<block_end><else_stmt><block_start>returnString<augadd>" xss_match_tuple {\n"<block_end>returnString<augadd>" text_transformation = \""+condition['XssMatchSet']['XssMatchTuples'][l]['TextTransformation']+"\"\n"<line_sep>returnString<augadd>" field_to_match {\n"<line_sep>returnString<augadd>" type = \""+condition['XssMatchSet']['XssMatchTuples'][l]['FieldToMatch']['Type']+"\"\n"<if_stmt>len(condition['XssMatchSet']['XssMatchTuples'][l]['FieldToMatch'])<g>1<block_start>returnString<augadd>" data = \""+condition['XssMatchSet']['XssMatchTuples'][l]['FieldToMatch']['Data']+"\"\n"<block_end>returnString<augadd>" }\n"<line_sep>returnString<augadd>" }"<if_stmt>l<ne>len(condition['XssMatchSet']['XssMatchTuples'])-1<block_start>returnString<augadd>"\n\n"<block_end><else_stmt><block_start>returnString<augadd>"\n"<block_end><block_end>conditionsDict[test['XssMatchSets'][k]['XssMatchSetId']]=namePrefix<line_sep>returnString<augadd>"}"<block_end><return>([conditionsDict returnString])<block_end><def_stmt>getAssociatedResources wafClient AclId region log template isRegional<block_start>''' Looks into the customer's WebACL and looks for associated resources. Returns a list of resources' names in case any is found. '''<line_sep>resourceString=""<line_sep>resourcesList=[]<line_sep># Checking if the Web ACL is associated with any resource. If the resulting array las a length greater than zero, # it means there is at least one resource of that type associated with the Web ACL. # Looking for ALBs first. If at least one ALB is associated, we need to create all resources to support it: # VPC, Subnet, Route Table, Internet Gateway, Target Group and Security Group. <if_stmt>isRegional<block_start><try_stmt><block_start>rAlb=wafClient.list_resources_for_web_acl(WebACLId=AclId ResourceType="APPLICATION_LOAD_BALANCER")<block_end><except_stmt><block_start>function.abort(log template "list_resources_for_web_acl(ALB)")<block_end><if_stmt>len(rAlb['ResourceArns'])<g>0<block_start>log.write(function.getFormattedDateTime()+"Found at least one ALB associated with this Web ACL. Creating equivalent resource...\n")<line_sep>print("Found at least one ALB associated with this Web ACL. Creating equivalent resource...")<line_sep>resourceString<augadd>"resource \"aws_vpc\" \"waferVPC\" {\n"<line_sep>resourceString<augadd>" cidr_block = \"10.10.0.0/16\"\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_subnet\" \"waferSubnet1\" {\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n"<line_sep>resourceString<augadd>" availability_zone = \""+region+"a\"\n"<line_sep>resourceString<augadd>" cidr_block = \"10.10.1.0/24\"\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_subnet\" \"waferSubnet2\" {\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n"<line_sep>resourceString<augadd>" availability_zone = \""+region+"b\"\n"<line_sep>resourceString<augadd>" cidr_block = \"10.10.2.0/24\"\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_internet_gateway\" \"waferIGW\" {\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_route_table\" \"waferRT\" {\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n\n"<line_sep>resourceString<augadd>" route {\n"<line_sep>resourceString<augadd>" cidr_block = \"0.0.0.0/0\"\n"<line_sep>resourceString<augadd>" gateway_id = \"${aws_internet_gateway.waferIGW.id}\"\n"<line_sep>resourceString<augadd>" }\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_route_table_association\" \"waferRTAssociation1\" {\n"<line_sep>resourceString<augadd>" subnet_id = \"${aws_subnet.waferSubnet1.id}\"\n"<line_sep>resourceString<augadd>" route_table_id = \"${aws_route_table.waferRT.id}\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_route_table_association\" \"waferRTAssociation2\" {\n"<line_sep>resourceString<augadd>" subnet_id = \"${aws_subnet.waferSubnet2.id}\"\n"<line_sep>resourceString<augadd>" route_table_id = \"${aws_route_table.waferRT.id}\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_security_group\" \"waferALBSG\" {\n"<line_sep>resourceString<augadd>" name = \"waferALBSG\"\n"<line_sep>resourceString<augadd>" description = \"Allow HTTP inbound traffic\"\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n"<line_sep>resourceString<augadd>" ingress {\n"<line_sep>resourceString<augadd>" from_port = 80\n"<line_sep>resourceString<augadd>" to_port = 80\n"<line_sep>resourceString<augadd>" protocol = \"tcp\"\n"<line_sep>resourceString<augadd>" cidr_blocks = [ \"0.0.0.0/0\" ]\n"<line_sep>resourceString<augadd>" }\n\n"<line_sep>resourceString<augadd>" egress {\n"<line_sep>resourceString<augadd>" from_port = 0\n"<line_sep>resourceString<augadd>" to_port = 0\n"<line_sep>resourceString<augadd>" protocol = \"-1\"\n"<line_sep>resourceString<augadd>" cidr_blocks = [ \"0.0.0.0/0\" ]\n"<line_sep>resourceString<augadd>" }\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_lb\" \"waferALB\" {\n"<line_sep>resourceString<augadd>" name = \"waferALB\"\n"<line_sep>resourceString<augadd>" internal = false\n"<line_sep>resourceString<augadd>" load_balancer_type = \"application\"\n"<line_sep>resourceString<augadd>" security_groups = [\"${aws_security_group.waferALBSG.id}\"]\n"<line_sep>resourceString<augadd>" subnets = [\"${aws_subnet.waferSubnet1.id}\", \"${aws_subnet.waferSubnet2.id}\"]\n\n"<line_sep>resourceString<augadd>" enable_cross_zone_load_balancing = true\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_lb_target_group\" \"waferALBTG\" {\n"<line_sep>resourceString<augadd>" name = \"waferALBTG\"\n"<line_sep>resourceString<augadd>" port = 80\n"<line_sep>resourceString<augadd>" protocol = \"HTTP\"\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_lb_listener\" \"waferALBListener\" {\n"<line_sep>resourceString<augadd>" load_balancer_arn = \"${aws_lb.waferALB.arn}\"\n"<line_sep>resourceString<augadd>" port = \"80\"\n"<line_sep>resourceString<augadd>" protocol = \"HTTP\"\n\n"<line_sep>resourceString<augadd>" default_action {\n"<line_sep>resourceString<augadd>" type = \"forward\"\n"<line_sep>resourceString<augadd>" target_group_arn = \"${aws_lb_target_group.waferALBTG.arn}\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>listTemp=[]<line_sep>listTemp.append("ALB_DNS_Name")<line_sep>listTemp.append("ALB DNS Name")<line_sep>listTemp.append("aws_lb.waferALB.dns_name")<line_sep>resourcesList.append(listTemp)<block_end># Let's check also if there's an API Gateway endpoint associated with the Web ACL. <try_stmt><block_start>rApi=wafClient.list_resources_for_web_acl(WebACLId=AclId ResourceType="API_GATEWAY")<block_end><except_stmt><block_start>function.abort(log template "list_resources_for_web_acl(API)")<block_end><if_stmt>len(rApi['ResourceArns'])<g>0<block_start>log.write(function.getFormattedDateTime()+"Found at least one API Gateway endpoint associated with this Web ACL. Creating equivalent resource...\n")<line_sep>log.write(function.getFormattedDateTime()+"Do not forget to change the API Gateway Integration method type to something different than 'MOCK'!\n")<line_sep>print("Found at least one API Gateway endpoint associated with this Web ACL. Creating equivalent resource...")<line_sep>resourceString<augadd>"resource \"aws_api_gateway_rest_api\" \"waferAPI\" {\n"<line_sep>resourceString<augadd>" name = \"waferAPI\"\n"<line_sep>resourceString<augadd>" description = \"WAFER API\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_api_gateway_resource\" \"waferAPIResource\" {\n"<line_sep>resourceString<augadd>" rest_api_id = \"${aws_api_gateway_rest_api.waferAPI.id}\"\n"<line_sep>resourceString<augadd>" parent_id = \"${aws_api_gateway_rest_api.waferAPI.root_resource_id}\"\n"<line_sep>resourceString<augadd>" path_part = \"WAFER\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_api_gateway_method\" \"waferMethod\" {\n"<line_sep>resourceString<augadd>" rest_api_id = \"${aws_api_gateway_rest_api.waferAPI.id}\"\n"<line_sep>resourceString<augadd>" resource_id = \"${aws_api_gateway_resource.waferAPIResource.id}\"\n"<line_sep>resourceString<augadd>" http_method = \"GET\"\n"<line_sep>resourceString<augadd>" authorization = \"NONE\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_api_gateway_deployment\" \"waferDeployment\" {\n"<line_sep>resourceString<augadd>" depends_on = [\"aws_api_gateway_integration.waferIntegration\"]\n"<line_sep>resourceString<augadd>" rest_api_id = \"${aws_api_gateway_rest_api.waferAPI.id}\"\n"<line_sep>resourceString<augadd>" stage_name = \"test\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_api_gateway_stage\" \"waferStage\" {\n"<line_sep>resourceString<augadd>" stage_name = \"waferStage\"\n"<line_sep>resourceString<augadd>" rest_api_id = \"${aws_api_gateway_rest_api.waferAPI.id}\"\n"<line_sep>resourceString<augadd>" deployment_id = \"${aws_api_gateway_deployment.waferDeployment.id}\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_api_gateway_integration\" \"waferIntegration\" {\n"<line_sep>resourceString<augadd>" rest_api_id = \"${aws_api_gateway_rest_api.waferAPI.id}\"\n"<line_sep>resourceString<augadd>" resource_id = \"${aws_api_gateway_resource.waferAPIResource.id}\"\n"<line_sep>resourceString<augadd>" http_method = \"${aws_api_gateway_method.waferMethod.http_method}\"\n"<line_sep>resourceString<augadd>" integration_http_method = \"GET\"\n"<line_sep>resourceString<augadd>" type = \"MOCK\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>listTemp=[]<line_sep>listTemp.append("API_Gateway_Invoke_URL")<line_sep>listTemp.append("API Gateway Invoke URL")<line_sep>listTemp.append("aws_api_gateway_stage.waferStage.invoke_url")<line_sep>resourcesList.append(listTemp)<block_end><block_end><else_stmt># It's a global WAF, so, we can check if there's a CloudFront distribution associated with the Web ACL. <block_start><try_stmt><block_start>cloudFront=boto3.client('cloudfront')<line_sep>rCfn=cloudFront.list_distributions_by_web_acl_id(WebACLId=AclId)<block_end><except_stmt><block_start>function.abort(log template "list_distributions_by_web_acl_id(CloudFront)")<block_end><if_stmt>rCfn['DistributionList']['Quantity']<g>0<block_start>log.write(function.getFormattedDateTime()+"Found at least one CloudFront distribution associated with this Web ACL. Creating equivalent resource...\n")<line_sep>print("Found at least one CloudFront distribution associated with this Web ACL. Creating equivalent resource...")<line_sep># We need to create an ALB first and then use it as the origin for the CloudFront distribution. resourceString<augadd>"resource \"aws_vpc\" \"waferVPC\" {\n"<line_sep>resourceString<augadd>" cidr_block = \"10.10.0.0/16\"\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_subnet\" \"waferSubnet1\" {\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n"<line_sep>resourceString<augadd>" availability_zone = \"us-east-1a\"\n"<line_sep>resourceString<augadd>" cidr_block = \"10.10.1.0/24\"\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_subnet\" \"waferSubnet2\" {\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n"<line_sep>resourceString<augadd>" availability_zone = \"us-east-1b\"\n"<line_sep>resourceString<augadd>" cidr_block = \"10.10.2.0/24\"\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_internet_gateway\" \"waferIGW\" {\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_route_table\" \"waferRT\" {\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n\n"<line_sep>resourceString<augadd>" route {\n"<line_sep>resourceString<augadd>" cidr_block = \"0.0.0.0/0\"\n"<line_sep>resourceString<augadd>" gateway_id = \"${aws_internet_gateway.waferIGW.id}\"\n"<line_sep>resourceString<augadd>" }\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_route_table_association\" \"waferRTAssociation1\" {\n"<line_sep>resourceString<augadd>" subnet_id = \"${aws_subnet.waferSubnet1.id}\"\n"<line_sep>resourceString<augadd>" route_table_id = \"${aws_route_table.waferRT.id}\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_route_table_association\" \"waferRTAssociation2\" {\n"<line_sep>resourceString<augadd>" subnet_id = \"${aws_subnet.waferSubnet2.id}\"\n"<line_sep>resourceString<augadd>" route_table_id = \"${aws_route_table.waferRT.id}\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_security_group\" \"waferALBSG\" {\n"<line_sep>resourceString<augadd>" name = \"waferALBSG\"\n"<line_sep>resourceString<augadd>" description = \"Allow HTTP inbound traffic\"\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n"<line_sep>resourceString<augadd>" ingress {\n"<line_sep>resourceString<augadd>" from_port = 80\n"<line_sep>resourceString<augadd>" to_port = 80\n"<line_sep>resourceString<augadd>" protocol = \"tcp\"\n"<line_sep>resourceString<augadd>" cidr_blocks = [ \"0.0.0.0/0\" ]\n"<line_sep>resourceString<augadd>" }\n\n"<line_sep>resourceString<augadd>" egress {\n"<line_sep>resourceString<augadd>" from_port = 0\n"<line_sep>resourceString<augadd>" to_port = 0\n"<line_sep>resourceString<augadd>" protocol = \"-1\"\n"<line_sep>resourceString<augadd>" cidr_blocks = [ \"0.0.0.0/0\" ]\n"<line_sep>resourceString<augadd>" }\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_lb\" \"waferALB\" {\n"<line_sep>resourceString<augadd>" name = \"waferALB\"\n"<line_sep>resourceString<augadd>" internal = false\n"<line_sep>resourceString<augadd>" load_balancer_type = \"application\"\n"<line_sep>resourceString<augadd>" security_groups = [\"${aws_security_group.waferALBSG.id}\"]\n"<line_sep>resourceString<augadd>" subnets = [\"${aws_subnet.waferSubnet1.id}\", \"${aws_subnet.waferSubnet2.id}\"]\n\n"<line_sep>resourceString<augadd>" enable_cross_zone_load_balancing = true\n\n"<line_sep>resourceString<augadd>" tags = {\n"<line_sep>resourceString<augadd>" Name = \"WAFER\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_lb_target_group\" \"waferALBTG\" {\n"<line_sep>resourceString<augadd>" name = \"waferALBTG\"\n"<line_sep>resourceString<augadd>" port = 80\n"<line_sep>resourceString<augadd>" protocol = \"HTTP\"\n"<line_sep>resourceString<augadd>" vpc_id = \"${aws_vpc.waferVPC.id}\"\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>resourceString<augadd>"resource \"aws_lb_listener\" \"waferALBListener\" {\n"<line_sep>resourceString<augadd>" load_balancer_arn = \"${aws_lb.waferALB.arn}\"\n"<line_sep>resourceString<augadd>" port = \"80\"\n"<line_sep>resourceString<augadd>" protocol = \"HTTP\"\n\n"<line_sep>resourceString<augadd>" default_action {\n"<line_sep>resourceString<augadd>" type = \"forward\"\n"<line_sep>resourceString<augadd>" target_group_arn = \"${aws_lb_target_group.waferALBTG.arn}\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>listTemp=[]<line_sep>listTemp.append("ALB_DNS_Name")<line_sep>listTemp.append("ALB DNS Name")<line_sep>listTemp.append("aws_lb.waferALB.dns_name")<line_sep>resourcesList.append(listTemp)<line_sep># Time to create the CloudFront distribution. resourceString<augadd>"resource \"aws_cloudfront_distribution\" \"waferCFN\" {\n"<line_sep>resourceString<augadd>" comment = \"WAFER CloudFront Distribution\"\n"<line_sep>resourceString<augadd>" enabled = true\n"<line_sep>resourceString<augadd>" web_acl_id = \"${aws_waf_web_acl.web_acl.id}\"\n\n"<line_sep>resourceString<augadd>" origin {\n"<line_sep>resourceString<augadd>" domain_name = \"${aws_lb.waferALB.dns_name}\"\n"<line_sep>resourceString<augadd>" origin_id = \"ELB-${aws_lb.waferALB.name}\"\n\n"<line_sep>resourceString<augadd>" custom_origin_config {\n"<line_sep>resourceString<augadd>" http_port = 80\n"<line_sep>resourceString<augadd>" https_port = 443\n"<line_sep>resourceString<augadd>" origin_protocol_policy = \"http-only\"\n"<line_sep>resourceString<augadd>" origin_ssl_protocols = [\"TLSv1\", \"TLSv1.1\", \"TLSv1.2\", \"SSLv3\"]\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>" }\n\n"<line_sep>resourceString<augadd>" default_cache_behavior {\n"<line_sep>resourceString<augadd>" allowed_methods = [\"GET\", \"HEAD\", \"OPTIONS\", \"PUT\", \"POST\", \"PATCH\", \"DELETE\"]\n"<line_sep>resourceString<augadd>" cached_methods = [\"GET\", \"HEAD\"]\n"<line_sep>resourceString<augadd>" target_origin_id = \"ELB-${aws_lb.waferALB.name}\"\n\n"<line_sep>resourceString<augadd>" forwarded_values {\n"<line_sep>resourceString<augadd>" query_string = true\n"<line_sep>resourceString<augadd>" headers = [\"*\"]\n"<line_sep>resourceString<augadd>" cookies {\n"<line_sep>resourceString<augadd>" forward = \"all\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>" }\n\n"<line_sep>resourceString<augadd>" viewer_protocol_policy = \"allow-all\"\n"<line_sep>resourceString<augadd>" }\n\n"<line_sep>resourceString<augadd>" viewer_certificate {\n"<line_sep>resourceString<augadd>" cloudfront_default_certificate = true\n"<line_sep>resourceString<augadd>" }\n\n"<line_sep>resourceString<augadd>" restrictions {\n"<line_sep>resourceString<augadd>" geo_restriction {\n"<line_sep>resourceString<augadd>" restriction_type = \"none\"\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>" }\n"<line_sep>resourceString<augadd>"}\n\n"<line_sep>listTemp=[]<line_sep>listTemp.append("CloudFront_Distribution_Domain_Name")<line_sep>listTemp.append("CloudFront Distribution Name")<line_sep>listTemp.append("aws_cloudfront_distribution.waferCFN.domain_name")<line_sep>resourcesList.append(listTemp)<block_end><block_end><return>([resourcesList resourceString])<block_end>
<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_from_stmt>.losses Loss<class_stmt>BootstrapLatent(Loss)<block_start><def_stmt>__init__ self<block_start>super(BootstrapLatent self).__init__()<block_end><def_stmt>compute self anchor sample pos_mask neg_mask=<none> *args **kwargs<arrow>torch.FloatTensor<block_start>anchor=F.normalize(anchor dim=-1 p=2)<line_sep>sample=F.normalize(sample dim=-1 p=2)<line_sep>[email protected]()<line_sep>loss=(similarity<times>pos_mask).sum(dim=-1)<line_sep><return>loss.mean()<block_end><block_end>
# Copyright (c) 2013 - 2020 <NAME> and Contributors. # This file is part of YAWAST which is released under the MIT license. # See the LICENSE file or go to https://yawast.org/license/ for full license details. <try_stmt><block_start><import_from_stmt>._version __version__<line_sep>__all__=["__version__"]<del_stmt>_version# remove to avoid confusion with __version__ <block_end><except_stmt>Exception# if we get here, something is very wrong - running under python2? <block_start><pass><block_end>
<import_stmt>tensorflow<as>tf<line_sep># Training samples path, change to your local path training_samples_file_path=tf.keras.utils.get_file("trainingSamples.csv" "file:///Users/zhewang/Workspace/SparrowRecSys/src/main"<concat>"/resources/webroot/sampledata/trainingSamples.csv")<line_sep># Test samples path, change to your local path test_samples_file_path=tf.keras.utils.get_file("testSamples.csv" "file:///Users/zhewang/Workspace/SparrowRecSys/src/main"<concat>"/resources/webroot/sampledata/testSamples.csv")<line_sep># load sample as tf dataset <def_stmt>get_dataset file_path<block_start>dataset=tf.data.experimental.make_csv_dataset(file_path batch_size=12 label_name='label' na_value="0" num_epochs=1 ignore_errors=<true>)<line_sep><return>dataset<block_end># split as test dataset and training dataset train_dataset=get_dataset(training_samples_file_path)<line_sep>test_dataset=get_dataset(test_samples_file_path)<line_sep># genre features vocabulary genre_vocab=['Film-Noir' 'Action' 'Adventure' 'Horror' 'Romance' 'War' 'Comedy' 'Western' 'Documentary' 'Sci-Fi' 'Drama' 'Thriller' 'Crime' 'Fantasy' 'Animation' 'IMAX' 'Mystery' 'Children' 'Musical']<line_sep>GENRE_FEATURES={'userGenre1':genre_vocab 'userGenre2':genre_vocab 'userGenre3':genre_vocab 'userGenre4':genre_vocab 'userGenre5':genre_vocab 'movieGenre1':genre_vocab 'movieGenre2':genre_vocab 'movieGenre3':genre_vocab}<line_sep># all categorical features categorical_columns=[]<for_stmt>feature,vocab GENRE_FEATURES.items()<block_start>cat_col=tf.feature_column.categorical_column_with_vocabulary_list(key=feature vocabulary_list=vocab)<line_sep>emb_col=tf.feature_column.embedding_column(cat_col 10)<line_sep>categorical_columns.append(emb_col)<block_end># movie id embedding feature movie_col=tf.feature_column.categorical_column_with_identity(key='movieId' num_buckets=1001)<line_sep>movie_emb_col=tf.feature_column.embedding_column(movie_col 10)<line_sep>categorical_columns.append(movie_emb_col)<line_sep># user id embedding feature user_col=tf.feature_column.categorical_column_with_identity(key='userId' num_buckets=30001)<line_sep>user_emb_col=tf.feature_column.embedding_column(user_col 10)<line_sep>categorical_columns.append(user_emb_col)<line_sep># all numerical features numerical_columns=[tf.feature_column.numeric_column('releaseYear') tf.feature_column.numeric_column('movieRatingCount') tf.feature_column.numeric_column('movieAvgRating') tf.feature_column.numeric_column('movieRatingStddev') tf.feature_column.numeric_column('userRatingCount') tf.feature_column.numeric_column('userAvgRating') tf.feature_column.numeric_column('userRatingStddev')]<line_sep># embedding + MLP model architecture model=tf.keras.Sequential([tf.keras.layers.DenseFeatures(numerical_columns+categorical_columns) tf.keras.layers.Dense(128 activation='relu') tf.keras.layers.Dense(128 activation='relu') tf.keras.layers.Dense(1 activation='sigmoid') ])<line_sep># compile the model, set loss function, optimizer and evaluation metrics model.compile(loss='binary_crossentropy' optimizer='adam' metrics=['accuracy' tf.keras.metrics.AUC(curve='ROC') tf.keras.metrics.AUC(curve='PR')])<line_sep># train the model model.fit(train_dataset epochs=5)<line_sep># evaluate the model test_loss,test_accuracy,test_roc_auc,test_pr_auc=model.evaluate(test_dataset)<line_sep>print('\n\nTest Loss {}, Test Accuracy {}, Test ROC AUC {}, Test PR AUC {}'.format(test_loss test_accuracy test_roc_auc test_pr_auc))<line_sep># print some predict results predictions=model.predict(test_dataset)<for_stmt>prediction,goodRating zip(predictions[:12] list(test_dataset)[0][1][:12])<block_start>print("Predicted good rating: {:.2%}".format(prediction[0]) " | Actual rating label: " ("Good Rating"<if>bool(goodRating)<else>"Bad Rating"))<block_end>
# encoding: UTF-8 <import_from_stmt>urllib.parse urlencode<import_from_stmt>bs4 BeautifulSoup<import_stmt>datetime<import_stmt>time<import_from_stmt>opendatatools.common RestAgent<import_stmt>pandas<as>pd<import_stmt>json<import_stmt>math<import_stmt>random<class_stmt>AMACAgent(RestAgent)<block_start><def_stmt>__init__ self<block_start>RestAgent.__init__(self)<line_sep>self.add_headers({# 请求头 'Accept':'application/json, text/javascript, */*; q=0.01' 'Accept-Encoding':'gzip, deflate' 'Accept-Language':'zh-CN,zh;q=0.9' 'Cache-Control':'max-age=0' 'Connection':'keep-alive' 'Content-Length':'2' 'Content-Type':'application/json' 'Host':'gs.amac.org.cn' 'Origin':'http://gs.amac.org.cn' 'Referer':'http://gs.amac.org.cn/amac-infodisc/res/pof/fund/index.html' 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36' 'X-Requested-With':'XMLHttpRequest'})<line_sep>self.fund_list_url='http://gs.amac.org.cn/amac-infodisc/api/pof/fund?'<line_sep>self.fund_base_url='http://gs.amac.org.cn/amac-infodisc/res/pof/fund/'<line_sep>self.manager_list_url='http://gs.amac.org.cn/amac-infodisc/api/pof/manager?'# 请求 url 所携带参数的前面部分 self.manager_base_url='http://gs.amac.org.cn/amac-infodisc/res/pof/manager/'# 基金 url 前面的部分 self.company_title_list=['私募基金管理人名称' '管理人详细信息网址' '管理人类型' '成立时间' '备案时间' '机构诚信信息' '基金管理人全称(中文)' '基金管理人全称(英文)' '登记编号' '组织机构代码' '登记时间' '成立时间' '注册地址' '办公地址' '注册资本(万元)(人民币)' '实缴资本(万元)(人民币)' '企业性质' '注册资本实缴比例' '机构类型' '业务类型' '全职员工人数' '取得基金从业人数' '机构网址' ]<line_sep>self.fund_title_list=['基金名称' '基金编号' '成立时间' '备案时间' '基金备案阶段' '基金类型' '币种' '基金管理人名称' '管理类型' '托管人名称' '运作状态' '基金信息最后更新时间' '基金协会特别提示(针对基金)']<block_end><def_stmt>get_total_record self list_url<block_start>params={'rand':random.random() 'page':1 'size':10 }<line_sep>url=list_url+urlencode(params)<line_sep>resp=self.do_request(url json={} method="POST")<line_sep>result=json.loads(resp)<line_sep><return>result['totalElements']<block_end><def_stmt>get_page self list_url page<block_start>'''爬取第xx页信息'''<line_sep># url 携带参数,设置了每页显示 100 条信息 params={'rand':0.3248183083707361 'page':page 'size':1000 }<line_sep>url=list_url+urlencode(params)<line_sep>resp=self.do_request(url json={} method="POST")<line_sep><return>json.loads(resp)<block_end><def_stmt>parse_fund_page self r_json<block_start><if_stmt>r_json<block_start>items=r_json.get('content')<for_stmt>item items<block_start>info={}<line_sep>info['id']=item.get('id')<line_sep>info['基金名称']=item.get('fundName')<line_sep>info['基金详细信息网址']=self.fund_base_url+item.get('id')+".html"<line_sep>info['基金状态']=item.get('workingState')<line_sep>info['私募基金管理人名称']=item.get('managerName')<line_sep>info['管理人类型']=item.get('managerType')<line_sep>establishDate=item.get('establishDate')<line_sep>info['成立时间']=str(datetime.datetime.fromtimestamp(establishDate/1000).date())<if>establishDate<else>''<line_sep># 成立时间有可能为空,防止这种情况而报错 putOnRecordDate=item.get('putOnRecordDate')<line_sep>info['备案时间']=str(datetime.datetime.fromtimestamp(putOnRecordDate/1000).date())<if>putOnRecordDate<else>''<line_sep><yield>info<block_end><block_end><block_end><def_stmt>parse_manager_page self r_json<block_start><if_stmt>r_json<block_start>items=r_json.get('content')<for_stmt>item items<block_start>info={}<line_sep>info['id']=item.get('id')<line_sep>info['私募基金管理人名称']=item.get('managerName')<line_sep>info['管理人详细信息网址']=self.manager_base_url+item.get('id')+".html"<line_sep>info['管理人类型']=item.get('primaryInvestType')<line_sep>establishDate=item.get('establishDate')<line_sep>info['成立时间']=str(datetime.datetime.fromtimestamp(establishDate/1000).date())<if>establishDate<else>''<line_sep># 成立时间有可能为空,防止这种情况而报错 registerDate=item.get('registerDate')<line_sep>info['备案时间']=str(datetime.datetime.fromtimestamp(registerDate/1000).date())<if>registerDate<else>''<line_sep><yield>info<block_end><block_end><block_end><def_stmt>get_detail self url<block_start>resp=self.do_request(url method="GET" encoding="utf-8")<line_sep><return>resp<block_end><def_stmt>parse_manager_detail self html<block_start>soup=BeautifulSoup(html "html5lib")<line_sep>tables=soup.find_all('table')<line_sep>info={}<for_stmt>table tables<block_start><if_stmt>table.has_attr("class")<and>"table-info"<in>table['class']<block_start>rows=table.findAll('tr')<for_stmt>row rows<block_start>cols=row.findAll('td')<if_stmt>len(cols)<ge>2<block_start>title=cols[0].text<line_sep>content=cols[1].text<line_sep>title=title.replace(":" "")<line_sep>content=content.replace("\n" "")<line_sep>content=content.strip()<if_stmt>title<in>self.company_title_list<block_start>info[title]=content<block_end><block_end><if_stmt>len(cols)<ge>4<block_start>title=cols[2].text<line_sep>content=cols[3].text<line_sep>title=title.replace(":" "")<line_sep>content=content.replace("\n" "")<line_sep>content=content.strip()<if_stmt>title<in>self.company_title_list<block_start>info[title]=content<block_end><block_end><block_end><block_end><block_end><return>info<block_end><def_stmt>parse_fund_detail self html<block_start>soup=BeautifulSoup(html "html5lib")<line_sep>tables=soup.find_all('table')<line_sep>info={}<for_stmt>table tables<block_start><if_stmt>table.has_attr("class")<and>"table-info"<in>table['class']<block_start>rows=table.findAll('tr')<for_stmt>row rows<block_start>cols=row.findAll('td')<if_stmt>len(cols)<ge>2<block_start>title=cols[0].text<line_sep>content=cols[1].text<line_sep>title=title.replace(":" "")<line_sep>content=content.replace("\n" "")<line_sep>content=content.strip()<if_stmt>title<in>self.fund_title_list<block_start>info[title]=content<block_end><block_end><if_stmt>len(cols)<ge>4<block_start>title=cols[2].text<line_sep>content=cols[3].text<line_sep>title=title.replace(":" "")<line_sep>content=content.replace("\n" "")<line_sep>content=content.strip()<if_stmt>title<in>self.fund_title_list<block_start>info[title]=content<block_end><block_end><block_end><block_end><block_end><return>info<block_end><def_stmt>get_company_list self<block_start>total_record=self.get_total_record(self.manager_list_url)<line_sep>total_page=math.ceil(total_record/1000)<line_sep>print(total_record total_page)<line_sep>lis_json=[]<for_stmt>page range(1 total_page)<block_start>print("page=" page)<line_sep>r_json=self.get_page(self.manager_list_url page)<line_sep>results=self.parse_manager_page(r_json)<for_stmt>result results<block_start>lis_json.append(result)<block_end><block_end><return>pd.DataFrame(lis_json)<block_end><def_stmt>get_company_detail self company_id<block_start>url=self.manager_base_url+company_id+".html"<line_sep>html=self.get_detail(url)<line_sep>info=self.parse_manager_detail(html)<line_sep><return>info<block_end><def_stmt>get_fund_list self<block_start>total_record=self.get_total_record(self.fund_list_url)<line_sep>total_page=math.ceil(total_record/1000)<line_sep>print(total_record total_page)<line_sep>lis_json=[]<for_stmt>page range(1 total_page)<block_start>print("page=" page)<line_sep>r_json=self.get_page(self.fund_list_url page)<line_sep>results=self.parse_fund_page(r_json)<for_stmt>result results<block_start>lis_json.append(result)<block_end><block_end><return>pd.DataFrame(lis_json)<block_end><def_stmt>get_fund_detail self fund_id<block_start>url=self.fund_base_url+fund_id+".html"<line_sep>html=self.get_detail(url)<line_sep>info=self.parse_fund_detail(html)<line_sep><return>info<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>agent=AMACAgent()<line_sep>#df = agent.get_company_list() #print(df) #result = agent.get_company_detail("101000004390") #print(result) #df = agent.get_fund_list() #print(df) result=agent.get_fund_detail('351000130305')<line_sep>print(result)<block_end>
<import_stmt>argparse<import_stmt>os<import_stmt>subprocess<import_stmt>bddl<def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--max_trials" type=int default=1 help="Maximum number of trials to try sampling.")<line_sep>parser.add_argument("--num_initializations" type=int default=1 help="Number of initialization per PDDL per scene.")<line_sep>parser.add_argument("--start_initialization" type=int default=0 help="Starting idx for initialization")<line_sep>args=parser.parse_args()<line_sep>condition_dir=os.path.join(os.path.dirname(bddl.__file__) "activity_conditions")<for_stmt>task sorted(os.listdir(condition_dir))<block_start>task_dir=os.path.join(condition_dir task)<if_stmt>os.path.isdir(task_dir)<block_start><for_stmt>task_id_file sorted(os.listdir(task_dir))<block_start>task_id=task_id_file.replace("problem" "")[0]<if_stmt>task_id<ne>"0"<block_start><continue><block_end>subprocess.call("python sampling_saver.py --task {} --task_id {} --max_trials {} --num_initializations {} --start_initialization {}".format(task task_id args.max_trials args.num_initializations args.start_initialization ) shell=<true> )<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_from_stmt>. fields# noqa
<import_from_stmt>mlflow.entities._mlflow_object _MLflowObject<import_from_stmt>mlflow.protos.service_pb2 Metric<as>ProtoMetric<class_stmt>Metric(_MLflowObject)<block_start>""" Metric object. """<def_stmt>__init__ self key value timestamp step<block_start>self._key=key<line_sep>self._value=value<line_sep>self._timestamp=timestamp<line_sep>self._step=step<block_end>@property<def_stmt>key self<block_start>"""String key corresponding to the metric name."""<line_sep><return>self._key<block_end>@property<def_stmt>value self<block_start>"""Float value of the metric."""<line_sep><return>self._value<block_end>@property<def_stmt>timestamp self<block_start>"""Metric timestamp as an integer (milliseconds since the Unix epoch)."""<line_sep><return>self._timestamp<block_end>@property<def_stmt>step self<block_start>"""Integer metric step (x-coordinate)."""<line_sep><return>self._step<block_end><def_stmt>to_proto self<block_start>metric=ProtoMetric()<line_sep>metric.key=self.key<line_sep>metric.value=self.value<line_sep>metric.timestamp=self.timestamp<line_sep>metric.step=self.step<line_sep><return>metric<block_end>@classmethod<def_stmt>from_proto cls proto<block_start><return>cls(proto.key proto.value proto.timestamp proto.step)<block_end><block_end>
""" Utilities for various geometric operations. """<import_stmt>numpy<as>np<import_stmt>pytorch3d<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_from_stmt>pytorch3d.utils ico_sphere<def_stmt>random_rotation device=<none><block_start>quat=torch.randn(4 device=device)<line_sep>quat<augdiv>quat.norm()<line_sep><return>pytorch3d.transforms.quaternion_to_matrix(quat)<block_end><def_stmt>rot6d_to_matrix rot_6d<block_start>""" Convert 6D rotation representation to 3x3 rotation matrix. Reference: <NAME> al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Args: rot_6d (B x 6): Batch of 6D Rotation representation. Returns: Rotation matrices (B x 3 x 3). """<line_sep>rot_6d=rot_6d.view(-1 3 2)<line_sep>a1=rot_6d[: : 0]<line_sep>a2=rot_6d[: : 1]<line_sep>b1=F.normalize(a1)<line_sep>b2=F.normalize(a2-torch.einsum("bi,bi->b" b1 a2).unsqueeze(-1)<times>b1)<line_sep>b3=torch.cross(b1 b2)<line_sep><return>torch.stack((b1 b2 b3) dim=-1)<block_end><def_stmt>matrix_to_rot6d rotmat<block_start>""" Convert rotation matrix to 6D rotation representation. Args: rotmat (B x 3 x 3): Batch of rotation matrices. Returns: 6D Rotations (B x 3 x 2). """<line_sep><return>rotmat.view(-1 3 3)[: : :2]<block_end><def_stmt>spherical_to_cartesian theta phi radius=1.0<block_start>""" Converts from spherical coordinates to cartesian coordinates. Spherical coordinates are defined according to the physics convention (theta elevation, phi azimuth). https://en.wikipedia.org/wiki/Spherical_coordinate_system#Cartesian_coordinates Args: theta (tensor): elevation. phi (tensor): azimuth. radius (tensor): radius. Defaults to 1. Returns: (x, y, z) """<line_sep>x=radius<times>torch.sin(theta)<times>torch.cos(phi)<line_sep>y=radius<times>torch.sin(theta)<times>torch.sin(phi)<line_sep>z=radius<times>torch.cos(theta)<line_sep><return>x y z<block_end><def_stmt>cartesian_to_spherical x y z<block_start>""" Converts spherical coordinates to cartesian coordinates. Args: x (tensor). y (tensor). z (tensor). Returns: (theta, phi) """<line_sep>theta=torch.arccos(z)<line_sep>phi=torch.atan2(y x)<line_sep><return>theta phi<block_end><def_stmt>create_sphere level=4 device=<none><block_start>""" Creates a unit ico-sphere. """<line_sep>mesh=ico_sphere(level=level device=device)<line_sep><return>mesh.verts_padded()[0] mesh.faces_padded()[0]<block_end><def_stmt>unwrap_uv_map height=256 width=256<block_start>""" Samples spherical coordinates to unwrap a UV map. Args: height (int). width (int). Returns: Spherical coordinates (H,W,3). """<line_sep>theta_=torch.linspace(0 np.pi height)<line_sep>phi_=torch.linspace(-np.pi np.pi width)<line_sep>theta,phi=torch.meshgrid(theta_ phi_)<line_sep>x,y,z=spherical_to_cartesian(theta phi)<line_sep><return>torch.dstack((x y z))<block_end>
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/gaogaotiantian/watchpoints/blob/master/NOTICE.txt <import_stmt>ast<import_stmt>sys<def_stmt>ast_parse_node node<block_start>""" :param ast.Node node: an ast node representing an expression of variable :return ast.Node: an ast node for: _watchpoints_obj = var if <var is a local variable>: # watch(a) _watchpoints_localvar = "a" elif <var is a subscript>: # watch(a[3]) _watchpoints_parent = a _watchpoints_subscr = 3 elif <var is an attribute>: # watch(a.b) _watchpoints_parent = a _watchpoints_attr = "b" """<line_sep>root=ast.Module(body=[ast.Assign(targets=[ast.Name(id="_watchpoints_obj" ctx=ast.Store())] value=node)] type_ignores=[])<if_stmt>type(node)<is>ast.Name<block_start>root.body.append(ast.Assign(targets=[ast.Name(id="_watchpoints_localvar" ctx=ast.Store())] value=ast.Constant(value=node.id)))<block_end><elif_stmt>type(node)<is>ast.Subscript<block_start>root.body.append(ast.Assign(targets=[ast.Name(id="_watchpoints_parent" ctx=ast.Store())] value=node.value))<if_stmt>sys.version_info.minor<le>8<and>type(node.slice)<is>ast.Index<block_start>value_node=node.slice.value<block_end><elif_stmt>sys.version_info.minor<ge>9<and>type(node.slice)<is><not>ast.Slice<block_start>value_node=node.slice<block_end><else_stmt><block_start><raise>ValueError("Slice is not supported!")<block_end>root.body.append(ast.Assign(targets=[ast.Name(id="_watchpoints_subscr" ctx=ast.Store())] value=value_node))<block_end><elif_stmt>type(node)<is>ast.Attribute<block_start>root.body.append(ast.Assign(targets=[ast.Name(id="_watchpoints_parent" ctx=ast.Store())] value=node.value))<line_sep>root.body.append(ast.Assign(targets=[ast.Name(id="_watchpoints_attr" ctx=ast.Store())] value=ast.Constant(value=node.attr)))<block_end>ast.fix_missing_locations(root)<line_sep><return>root<block_end>
# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <class_stmt>StoropsException(Exception)<block_start>message='Storops Error.'<block_end><class_stmt>VNXException(StoropsException)<block_start>message="VNX Error."<block_end><class_stmt>VNXStorageGroupError(VNXException)<block_start><pass><block_end><class_stmt>VNXAttachAluError(VNXException)<block_start><pass><block_end><class_stmt>VNXAluAlreadyAttachedError(VNXAttachAluError)<block_start>message=('LUN already exists in the specified storage group' 'Requested LUN has already been added to this Storage Group')<block_end><class_stmt>VNXDetachAluError(VNXStorageGroupError)<block_start><pass><block_end><class_stmt>VNXDetachAluNotFoundError(VNXDetachAluError)<block_start>message='No such Host LUN in this Storage Group'<block_end><class_stmt>VNXCreateStorageGroupError(VNXStorageGroupError)<block_start><pass><block_end><class_stmt>VNXStorageGroupNameInUseError(VNXCreateStorageGroupError)<block_start>message='Storage Group name already in use'<block_end><class_stmt>VNXNoHluAvailableError(VNXStorageGroupError)<block_start><pass><block_end><class_stmt>VNXMigrationError(VNXException)<block_start><pass><block_end><class_stmt>VNXLunNotMigratingError(VNXException)<block_start><pass><block_end><class_stmt>VNXLunSyncCompletedError(VNXMigrationError)<block_start>error_code=0x714a8021<block_end><class_stmt>VNXTargetNotReadyError(VNXMigrationError)<block_start>message='The destination LUN is not available for migration'<block_end><class_stmt>VNXSnapError(VNXException)<block_start><pass><block_end><class_stmt>VNXDeleteAttachedSnapError(VNXSnapError)<block_start>error_code=0x716d8003<block_end><class_stmt>VNXCreateSnapError(VNXException)<block_start>message='Cannot create the snapshot.'<block_end><class_stmt>VNXAttachSnapError(VNXSnapError)<block_start>message='Cannot attach the snapshot.'<block_end><class_stmt>VNXDetachSnapError(VNXSnapError)<block_start>message='Cannot detach the snapshot.'<block_end><class_stmt>VNXSnapAlreadyMountedError(VNXSnapError)<block_start>error_code=0x716d8055<block_end><class_stmt>VNXSnapNameInUseError(VNXSnapError)<block_start>error_code=0x716d8005<block_end><class_stmt>VNXSnapNotExistsError(VNXSnapError)<block_start>message='The specified snapshot does not exist.'<block_end><class_stmt>VNXLunError(VNXException)<block_start><pass><block_end><class_stmt>VNXCreateLunError(VNXLunError)<block_start><pass><block_end><class_stmt>VNXLunNameInUseError(VNXCreateLunError)<block_start>error_code=0x712d8d04<block_end><class_stmt>VNXLunExtendError(VNXLunError)<block_start><pass><block_end><class_stmt>VNXLunExpandSizeError(VNXLunExtendError)<block_start>error_code=0x712d8e04<block_end><class_stmt>VNXLunPreparingError(VNXLunError)<block_start>error_code=0x712d8e0e<block_end><class_stmt>VNXLunNotFoundError(VNXLunError)<block_start>message='Could not retrieve the specified (pool lun).'<block_end><class_stmt>VNXDeleteLunError(VNXLunError)<block_start><pass><block_end><class_stmt>VNXLunUsedByFeatureError(VNXLunError)<block_start><pass><block_end><class_stmt>VNXCompressionError(VNXLunError)<block_start><pass><block_end><class_stmt>VNXCompressionAlreadyEnabledError(VNXCompressionError)<block_start>message='Compression on the specified LUN is already turned on.'<block_end><class_stmt>VNXConsistencyGroupError(VNXException)<block_start><pass><block_end><class_stmt>VNXCreateConsistencyGroupError(VNXConsistencyGroupError)<block_start><pass><block_end><class_stmt>VNXConsistencyGroupNameInUseError(VNXCreateConsistencyGroupError)<block_start>error_code=0x716d8021<block_end><class_stmt>VNXConsistencyGroupNotFoundError(VNXConsistencyGroupError)<block_start>message='Cannot find the consistency group'<block_end><class_stmt>VNXPingNodeError(VNXException)<block_start><pass><block_end><class_stmt>VNXMirrorException(VNXException)<block_start><pass><block_end><class_stmt>VNXMirrorNameInUseError(VNXMirrorException)<block_start>message='Mirror name already in use'<block_end><class_stmt>VNXMirrorPromotePrimaryError(VNXMirrorException)<block_start>message='Cannot remove or promote a primary image.'<block_end><class_stmt>VNXMirrorNotFoundError(VNXMirrorException)<block_start>message='Mirror not found'<block_end><class_stmt>VNXMirrorGroupNameInUseError(VNXMirrorException)<block_start>message='Mirror Group name already in use'<block_end><class_stmt>VNXMirrorGroupNotFoundError(VNXMirrorException)<block_start>message='Unable to locate the specified group'<block_end><class_stmt>VNXMirrorGroupAlreadyMemberError(VNXMirrorException)<block_start>message='The mirror is already a member of a group'<block_end><class_stmt>VNXMirrorGroupMirrorNotMemberError(VNXMirrorException)<block_start>message='The specified mirror is not a member of the group'<block_end><class_stmt>VNXMirrorGroupAlreadyPromotedError(VNXMirrorException)<block_start>message='The Consistency Group has no secondary images to promote'<block_end>
<import_stmt>torch<import_stmt>warnings<import_stmt>ConfigSpace<import_stmt>ConfigSpace.hyperparameters<as>CSH<import_stmt>ConfigSpace.conditions<as>CSC<import_from_stmt>autoPyTorch.utils.config_space_hyperparameter get_hyperparameter<import_from_stmt>autoPyTorch.components.preprocessing.preprocessor_base PreprocessorBase<class_stmt>FastICA(PreprocessorBase)<block_start><def_stmt>__init__ self hyperparameter_config<block_start>self.algorithm=hyperparameter_config['algorithm']<line_sep>self.whiten=hyperparameter_config['whiten']<line_sep>self.fun=hyperparameter_config['fun']<line_sep>self.n_components=<none><if_stmt>(self.whiten)<block_start>self.n_components=hyperparameter_config['n_components']<block_end><block_end><def_stmt>fit self X Y<block_start><import_stmt>sklearn.decomposition<line_sep>self.preprocessor=sklearn.decomposition.FastICA(n_components=self.n_components algorithm=self.algorithm fun=self.fun whiten=self.whiten)<line_sep># Make the RuntimeWarning an Exception! <with_stmt>warnings.catch_warnings()<block_start>warnings.filterwarnings("error" message='array must not contain infs or NaNs')<try_stmt><block_start><return>self.preprocessor.fit(X)<block_end><except_stmt>ValueError<as>e<block_start><if_stmt>'array must not contain infs or NaNs'<in>e.args[0]<block_start><raise>ValueError("Bug in scikit-learn: https://github.com/scikit-learn/scikit-learn/pull/2738")<block_end><block_end><block_end><block_end><def_stmt>transform self X<block_start><if_stmt>self.preprocessor<is><none><block_start><raise>NotImplementedError()<block_end><return>self.preprocessor.transform(X)<block_end>@staticmethod<def_stmt>get_hyperparameter_search_space dataset_info=<none> n_components=(10 2000) algorithm=('parallel' 'deflation') whiten=(<true> <false>) fun=('logcosh' 'exp' 'cube') <block_start>cs=ConfigSpace.ConfigurationSpace()<line_sep>n_components_hp=get_hyperparameter(CSH.UniformIntegerHyperparameter "n_components" n_components)<line_sep>algorithm_hp=get_hyperparameter(CSH.CategoricalHyperparameter 'algorithm' algorithm)<line_sep>whiten_hp=get_hyperparameter(CSH.CategoricalHyperparameter 'whiten' whiten)<line_sep>fun_hp=get_hyperparameter(CSH.CategoricalHyperparameter 'fun' fun)<if_stmt><true><in>whiten<block_start>cs.add_hyperparameters([n_components_hp algorithm_hp whiten_hp fun_hp])<line_sep>cs.add_condition(CSC.EqualsCondition(n_components_hp whiten_hp <true>))<block_end><return>cs<block_end><block_end>
<import_from_stmt>rotkehlchen.externalapis.etherscan _hashes_tuple_to_list<def_stmt>test_hashes_tuple_to_list <block_start>hashes={('0x1' 1) ('0x2' 2) ('0x3' 3) ('0x4' 4) ('0x5' 5)}<assert_stmt>_hashes_tuple_to_list(hashes)<eq>['0x1' '0x2' '0x3' '0x4' '0x5']<block_end>
<import_from_stmt>. constants<class_stmt>DoesNotExist(Exception)<block_start><pass><block_end><class_stmt>ReadError(Exception)<block_start><pass><block_end><class_stmt>WriteError(Exception)<block_start><pass><block_end><class_stmt>RepositoryConfigurationError(Exception)<block_start><pass><block_end><class_stmt>IncompatibleRepositoryVersion(Exception)<block_start><pass><block_end><class_stmt>CorruptedRepositorySpec(Exception)<block_start><pass><block_end><class_stmt>ConfigNotFound(Exception)<block_start><pass><block_end>
<import_stmt>json<import_stmt>subprocess<import_stmt>random<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<class_stmt>KubernetesExec(object)<block_start><def_stmt>__init__ self rcname cmd='sh' namespace="default" container=<none> kind="rc"<block_start>self.rcname=rcname<line_sep>self.namespace=namespace<line_sep>self.command=cmd<line_sep>self.kind=kind<line_sep>self.container=container<block_end><def_stmt>call self tty=<true><block_start>rc=self._getrc()<line_sep>selector=self._getselector(rc)<line_sep>logger.info("selector: %s" selector)<line_sep>pods=self._getpods(selector)<line_sep>podname=random.choice(pods)['metadata']['name']<line_sep>cmd=['exec' '--namespace' self.namespace podname]<if_stmt>tty<block_start>cmd.append("-ti")<block_end><if_stmt>self.container<is><not><none><block_start>cmd<augadd>['-c' self.container]<block_end>command=['kubectl']+cmd+["--"]+self.command.split(" ")<line_sep><return>subprocess.call(command)<block_end><def_stmt>_getpods self selector<block_start>cmd=['get' "pods" "-l" selector '-o' 'json']<line_sep>podslist=json.loads(self._call(cmd))<line_sep>pods=podslist['items']<line_sep><return>pods<block_end><def_stmt>_getselector self rc<block_start>s=<none><line_sep>items=rc['spec']['selector']<if_stmt>'matchLabels'<in>items<block_start>items=items['matchLabels']<block_end><for_stmt>k,v items.iteritems()<block_start><if_stmt>s<is><none><block_start>s="%s=%s"%(k v)<block_end><else_stmt><block_start>s<augadd>",%s=%s"%(k v)<block_end><block_end><return>s<block_end><def_stmt>_getrc self<block_start>cmd=['get' self.kind self.rcname '-o' 'json']<line_sep><return>json.loads(self._call(cmd))<block_end><def_stmt>_call self cmd dry=<false><block_start>command=['kubectl']+cmd+["--namespace" self.namespace]<line_sep><return>subprocess.check_output(command stderr=subprocess.STDOUT)<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>folium.elements JSCSSMixin<import_from_stmt>folium.map Marker<import_from_stmt>folium.utilities parse_options<import_from_stmt>jinja2 Template<class_stmt>BoatMarker(JSCSSMixin Marker)<block_start>"""Add a Marker in the shape of a boat. Parameters ---------- location: tuple of length 2, default None The latitude and longitude of the marker. If None, then the middle of the map is used. heading: int, default 0 Heading of the boat to an angle value between 0 and 360 degrees wind_heading: int, default None Heading of the wind to an angle value between 0 and 360 degrees If None, then no wind is represented. wind_speed: int, default 0 Speed of the wind in knots. https://github.com/thomasbrueggemann/leaflet.boatmarker """<line_sep>_template=Template(u""" {% macro script(this, kwargs) %} var {{ this.get_name() }} = L.boatMarker( {{ this.location|tojson }}, {{ this.options|tojson }} ).addTo({{ this._parent.get_name() }}); {% if this.wind_heading is not none -%} {{ this.get_name() }}.setHeadingWind( {{ this.heading }}, {{ this.wind_speed }}, {{ this.wind_heading }} ); {% else -%} {{this.get_name()}}.setHeading({{this.heading}}); {% endif -%} {% endmacro %} """)<line_sep>default_js=[('markerclusterjs' 'https://unpkg.com/leaflet.boatmarker/leaflet.boatmarker.min.js') ]<def_stmt>__init__ self location popup=<none> icon=<none> heading=0 wind_heading=<none> wind_speed=0 **kwargs<block_start>super(BoatMarker self).__init__(location popup=popup icon=icon)<line_sep>self._name='BoatMarker'<line_sep>self.heading=heading<line_sep>self.wind_heading=wind_heading<line_sep>self.wind_speed=wind_speed<line_sep>self.options=parse_options(**kwargs)<block_end><block_end>
"""Test Pandoc module"""<line_sep>#----------------------------------------------------------------------------- # Copyright (C) 2014 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- <import_stmt>os<import_stmt>warnings<import_from_stmt>...tests.utils onlyif_cmds_exist<import_from_stmt>nbconvert.tests.base TestsBase<import_from_stmt>.. pandoc<line_sep>#----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- <class_stmt>TestPandoc(TestsBase)<block_start>"""Collection of Pandoc tests"""<def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>self.original_env=os.environ.copy()<block_end><def_stmt>setUp self<block_start>super().setUp()<line_sep>pandoc.check_pandoc_version._cached=<none><block_end>@onlyif_cmds_exist('pandoc')<def_stmt>test_pandoc_available self<block_start>""" Test behaviour that pandoc functions raise PandocMissing as documented """<line_sep>pandoc.clean_cache()<line_sep>os.environ["PATH"]=""<with_stmt>self.assertRaises(pandoc.PandocMissing)<block_start>pandoc.get_pandoc_version()<block_end><with_stmt>self.assertRaises(pandoc.PandocMissing)<block_start>pandoc.check_pandoc_version()<block_end><with_stmt>self.assertRaises(pandoc.PandocMissing)<block_start>pandoc.pandoc("" "markdown" "html")<block_end># original_env["PATH"] should contain pandoc os.environ["PATH"]=self.original_env["PATH"]<with_stmt>warnings.catch_warnings(record=<true>)<as>w<block_start>pandoc.get_pandoc_version()<line_sep>pandoc.check_pandoc_version()<line_sep>pandoc.pandoc("" "markdown" "html")<block_end>self.assertEqual(w [])<block_end>@onlyif_cmds_exist('pandoc')<def_stmt>test_minimal_version self<block_start>original_minversion=pandoc._minimal_version<line_sep>pandoc._minimal_version="120.0"<with_stmt>warnings.catch_warnings(record=<true>)<as>w# call it twice to verify the cached value is used <block_start><assert_stmt><not>pandoc.check_pandoc_version()<assert_stmt><not>pandoc.check_pandoc_version()<block_end># only one warning after two calls, due to cache self.assertEqual(len(w) 1)<line_sep># clear cache pandoc.check_pandoc_version._cached=<none><line_sep>pandoc._minimal_version=pandoc.get_pandoc_version()<assert_stmt>pandoc.check_pandoc_version()<block_end><block_end><def_stmt>pandoc_function_raised_missing f *args **kwargs<block_start><try_stmt><block_start>f(*args **kwargs)<block_end><except_stmt>pandoc.PandocMissing<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end>
# Copyright © 2019 The vt-py authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>aiohttp<import_stmt>asyncio<import_stmt>base64<import_stmt>json<import_stmt>io<import_from_stmt>.error APIError<import_from_stmt>.feed Feed<import_from_stmt>.iterator Iterator<import_from_stmt>.object Object<import_from_stmt>.utils make_sync<import_from_stmt>.version __version__<line_sep>__all__=['Client' 'ClientResponse' 'url_id']<line_sep>_API_HOST='https://www.virustotal.com'<line_sep># All API endpoints start with this prefix, you don't need to include the # prefix in the paths you request as it's prepended automatically. _ENDPOINT_PREFIX='/api/v3'<line_sep># AppEngine server decides whether or not it should serve gzipped content # based on Accept-Encoding and User-Agent. Non-standard UAs are not served # with gzipped content unless it contains the string "gzip" somewhere. # See: https://cloud.google.com/appengine/kb/#compression _USER_AGENT_FMT='{agent}; vtpy {version}; gzip'<def_stmt>url_id url<block_start>"""Generates the object ID for an URL. The ID generated by this function can be used in calls that expect a URL ID like `client.get_object('/urls/<id>')` """<line_sep><return>base64.urlsafe_b64encode(url.encode()).decode().strip("=")<block_end><class_stmt>ClientResponse<block_start>"""Class representing the HTTP responses returned by the client. This class is just a thing wrapper around `aiohttp.ClientResponse <https://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientResponse>`_ that allows using it in both asynchronous and synchronous mode. Instances of this class have all the attributes that you can find in `aiohttp.ClientResponse`, like `version`, `status`, `method`, `url`, and so on. Methods in `aiohttp.ClientResponse` that return a coroutine have two flavors in this class: synchronous and asynchronous. For example, `aiohttp.ClientResponse.read()` becomes `vt.ClientResponse.read_async()`, and `vt.ClientResponse.read()` is the synchronous version of `vt.ClientResponse.read_async()`. Find more information about attributes and methods in `aiohttp.ClientResponse` in: https://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientResponse """<def_stmt>__init__ self aiohttp_resp<block_start>self._aiohttp_resp=aiohttp_resp<block_end><def_stmt>__getattr__ self attr<block_start><return>getattr(self._aiohttp_resp attr)<block_end>@property<def_stmt>content self<block_start><return>StreamReader(self._aiohttp_resp.content)<block_end><async_keyword><def_stmt>_get_chunked_response self<block_start>buffer=b""<async_keyword><for_stmt>data,_ self.content.iter_chunks()<block_start>buffer<augadd>data<block_end><return>buffer<block_end><async_keyword><def_stmt>read_async self<block_start><if_stmt>self.headers.get('Transfer-encoding')<eq>'chunked'<block_start><return><await>self._get_chunked_response()<block_end><else_stmt><block_start><return><await>self._aiohttp_resp.read()<block_end><block_end><def_stmt>read self<block_start><return>make_sync(self.read_async())<block_end><async_keyword><def_stmt>json_async self<block_start><if_stmt>self.headers.get('Transfer-encoding')<eq>'chunked'<block_start>response_content=<await>self._get_chunked_response()<line_sep><return>json.loads(response_content)<block_end><else_stmt><block_start><return><await>self._aiohttp_resp.json()<block_end><block_end><def_stmt>json self<block_start><return>make_sync(self.json_async())<block_end><async_keyword><def_stmt>text_async self<block_start><if_stmt>self.headers.get('Transfer-encoding')<eq>'chunked'<block_start>response_content=<await>self._get_chunked_response()<line_sep><return>response_content.decode(self._aiohttp_resp.get_encoding())<block_end><else_stmt><block_start><return><await>self._aiohttp_resp.text()<block_end><block_end><def_stmt>text self<block_start><return>make_sync(self.text_async())<block_end><block_end><class_stmt>StreamReader<block_start>"""Class representing the HTTP responses returned by the client. This class is just a thing wrapper around `aiohttp.StreamReader <https://aiohttp.readthedocs.io/en/stable/streams.html#aiohttp.StreamReader>`_ that allows using it in both asynchronous and synchronous mode. Instances of this class have all the methods that you can find in `aiohttp.StreamReader`, like `readany()`, `readany()`, etc. Methods in `aiohttp.StreamReader` come in two flavors in this class: synchronous and asynchronous. For example, `read()` and `read_async`, where `read` is the synchronous one and `read_async` is the asynchronous. Find more information about attributes and methods in `aiohttp.StreamReader` in: https://aiohttp.readthedocs.io/en/stable/streams.html#aiohttp.StreamReader """<def_stmt>__init__ self aiohttp_stream_reader<block_start>self._aiohttp_stream_reader=aiohttp_stream_reader<block_end><def_stmt>__getattr__ self attr<block_start><return>getattr(self._aiohttp_stream_reader attr)<block_end><async_keyword><def_stmt>read_async self n=-1<block_start><return><await>self._aiohttp_stream_reader.read(n)<block_end><def_stmt>read self n=-1<block_start><return>make_sync(self.read_async(n))<block_end><async_keyword><def_stmt>readany_async self<block_start><return><await>self._aiohttp_stream_reader.readany()<block_end><def_stmt>readany self<block_start><return>make_sync(self.readany_async())<block_end><async_keyword><def_stmt>readexactly_async self n<block_start><return><await>self._aiohttp_stream_reader.readexactly(n)<block_end><def_stmt>readexactly self n<block_start><return>make_sync(self.readexactly_async(n))<block_end><async_keyword><def_stmt>readline_async self<block_start><return><await>self._aiohttp_stream_reader.readline()<block_end><def_stmt>readline self<block_start><return>make_sync(self.readline_async())<block_end><async_keyword><def_stmt>readchunk_async self<block_start><return><await>self._aiohttp_stream_reader.readchunk()<block_end><def_stmt>readchunk self<block_start><return>make_sync(self.readchunk_async())<block_end><block_end><class_stmt>Client<block_start>"""Client for interacting with VirusTotal. :param apikey: Your VirusTotal API key. :param agent: A string that identifies your application. :param host: By default https://www.virustotal.com, it can be changed for testing purposes. :param trust_env: Get proxies information from HTTP_PROXY/HTTPS_PROXY environment variables if the parameter is True (False by default). :param timeout: A int that determines the number of seconds to wait for a request to timeout (300 by default). :param proxy: A string indicating the proxy to use for requests made by the client (None by default). :type apikey: str :type agent: str :type host: str :type trust_env: bool :type timeout: int :type proxy: str """<def_stmt>__init__ self apikey agent="unknown" host=<none> trust_env=<false> timeout=300 proxy=<none><block_start>"""Initialize the client with the provided API key."""<if_stmt><not>isinstance(apikey str)<block_start><raise>ValueError('API key must be a string')<block_end><if_stmt><not>apikey<block_start><raise>ValueError('API key can not be an empty string')<block_end>self._host=host<or>_API_HOST<line_sep>self._apikey=apikey<line_sep>self._agent=agent<line_sep>self._session=<none><line_sep>self._trust_env=trust_env<line_sep>self._timeout=timeout<line_sep>self._proxy=proxy<block_end><def_stmt>_full_url self path *args<block_start><try_stmt><block_start>path=path.format(*args)<block_end><except_stmt>IndexError<block_start><raise>ValueError('Not enough arguments to fill all placeholders in path')<block_end><if_stmt>path.startswith('http')<block_start><return>path<block_end><return>self._host+_ENDPOINT_PREFIX+path<block_end><def_stmt>_get_session self<block_start><if_stmt><not>self._session<block_start>self._session=aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=<false>) headers={'X-Apikey':self._apikey 'Accept-Encoding':'gzip' 'User-Agent':_USER_AGENT_FMT.format_map({'agent':self._agent 'version':__version__})} trust_env=self._trust_env timeout=aiohttp.ClientTimeout(total=self._timeout))<block_end><return>self._session<block_end><async_keyword><def_stmt>__aenter__ self<block_start><return>self<block_end><async_keyword><def_stmt>__aexit__ self type value traceback<block_start><await>self.close_async()<block_end><def_stmt>__enter__ self<block_start><return>self<block_end><def_stmt>__exit__ self type value traceback<block_start>self.close()<block_end><def_stmt>_extract_data_from_json self json_response<block_start><if_stmt><not>'data'<in>json_response<block_start><raise>ValueError('response does not returns a data field')<block_end><return>json_response['data']<block_end><async_keyword><def_stmt>_response_to_json self response<block_start>error=<await>self.get_error_async(response)<if_stmt>error<block_start><raise>error<block_end><return><await>response.json_async()<block_end><async_keyword><def_stmt>_response_to_object self response<block_start>json_response=<await>self._response_to_json(response)<try_stmt><block_start><return>Object.from_dict(self._extract_data_from_json(json_response))<block_end><except_stmt>ValueError<as>err<block_start><raise>ValueError(f'response is not an object: {err}')<block_end><block_end><async_keyword><def_stmt>close_async self<block_start>"""Like :func:`close` but returns a coroutine."""<if_stmt>self._session<block_start><await>self._session.close()<line_sep>self._session=<none><block_end><block_end><def_stmt>close self<block_start>"""Closes the client. When the client is not needed anymore it should be closed for releasing resources like TCP connections. """<line_sep><return>make_sync(self.close_async())<block_end><def_stmt>delete self path *path_args<block_start>"""Sends a DELETE request to a given API endpoint. :param path: Path to API endpoint, can contain format placeholders {}. :param path_args: A variable number of arguments that are put into any placeholders used in path. :type path: str :returns: An instance of :class:`ClientResponse`. """<line_sep><return>make_sync(self.delete_async(path *path_args))<block_end><async_keyword><def_stmt>delete_async self path *path_args<block_start>"""Like :func:`delete` but returns a coroutine."""<line_sep><return>ClientResponse(<await>self._get_session().delete(self._full_url(path *path_args) proxy=self._proxy))<block_end><def_stmt>download_file self hash file<block_start>"""Downloads a file given its hash (SHA-256, SHA-1 or MD5). The file identified by the hash will be written to the provided file object. The file object must be opened in write binary mode ('wb'). :param hash: File hash. :param file: A file object where the downloaded file will be written to. :type hash: str :type file: file-like object """<line_sep><return>make_sync(self.download_file_async(hash file))<block_end><async_keyword><def_stmt>download_file_async self hash file<block_start>"""Like :func:`download_file` but returns a coroutine."""<line_sep>response=<await>self.get_async(f'/files/{hash}/download')<line_sep>error=<await>self.get_error_async(response)<if_stmt>error<block_start><raise>error<block_end><while_stmt><true><block_start>chunk=<await>response.content.read_async(1024<times>1024)<if_stmt><not>chunk<block_start><break><block_end>file.write(chunk)<block_end><block_end><def_stmt>feed self feed_type cursor=<none><block_start>"""Returns an iterator for a VirusTotal feed. This functions returns an iterator that allows to retrieve a continuous stream of files as they are scanned by VirusTotal. See the documentation for the :class:`Feed` class for more details. :param feed_type: One of the supported feed types enumerated in :class:`FeedType`. :param cursor: An optional cursor indicating where to start. This argument can be a string in the format 'YYYMMDDhhmm' indicating the date and time of the first package that will be retrieved. :type hash: :class:`vt.FeedType` :type cursor: str """<line_sep><return>Feed(self feed_type cursor=cursor)<block_end><def_stmt>get self path *path_args params=<none><block_start>"""Sends a GET request to a given API endpoint. This is a low-level function that returns a raw HTTP response, no error checking nor response parsing is performed. See :func:`get_json`, :func:`get_data` and :func:`get_object` for higher-level functions. :param path: Path to API endpoint, can contain format placeholders {}. :param path_args: A variable number of arguments that are put into any placeholders used in path. :param params: Parameters sent in the request. :type path: str :type params: dict :returns: An instance of :class:`ClientResponse`. """<line_sep><return>make_sync(self.get_async(path *path_args params=params))<block_end><async_keyword><def_stmt>get_async self path *path_args params=<none><block_start>"""Like :func:`get` but returns a coroutine."""<line_sep><return>ClientResponse(<await>self._get_session().get(self._full_url(path *path_args) params=params proxy=self._proxy))<block_end><def_stmt>get_data self path *path_args params=<none><block_start>"""Sends a GET request to a given API endpoint and returns response's data. Most VirusTotal API responses are JSON-encoded with the following format:: {"data": <response data>} This function parses the server's response and return only the data, if the response is not in the expected format an exception is raised. For endpoints where the data is a VirusTotal object you can use :func:`get_object` instead. :param path: Path to API endpoint, can contain format placeholders {}. :param path_args: A variable number of arguments that are put into any placeholders used in path. :param params: Parameters sent in the request. :type path: str :type params: dict :returns: Whatever the server returned in the response's data field, it may be a dict, list, string or some other Python type, depending on the endpoint called. """<line_sep><return>make_sync(self.get_data_async(path *path_args params=params))<block_end><async_keyword><def_stmt>get_data_async self path *path_args params=<none><block_start>"""Like :func:`get_data` but returns a coroutine."""<line_sep>json_response=<await>self.get_json_async(path *path_args params=params)<line_sep><return>self._extract_data_from_json(json_response)<block_end><async_keyword><def_stmt>get_error_async self response<block_start>"""Given a :class:`ClientResponse` returns a :class:`APIError` This function checks if the response from the VirusTotal backend was an error and returns the appropriate :class:`APIError` or None if no error occurred. :param response: A :class:`ClientResponse` instance. :returns: An instance of :class:`APIError` or None. """<if_stmt>response.status<eq>200<block_start><return><none><block_end><if_stmt>response.status<ge>400<and>response.status<le>499<block_start><if_stmt>response.content_type<eq>'application/json'<block_start>json_response=<await>response.json_async()<line_sep>error=json_response.get('error')<if_stmt>error<block_start><return>APIError.from_dict(error)<block_end><block_end><return>APIError('ClientError' <await>response.text_async())<block_end><return>APIError('ServerError' <await>response.text_async())<block_end><def_stmt>get_json self path *path_args params=<none><block_start>"""Sends a GET request to a given API endpoint and parses the response. Most VirusTotal API responses are JSON-encoded. This function parses the JSON, check for errors, and return the server response as a dictionary. :param path: Path to API endpoint, can contain format placeholders {}. :param path_args: A variable number of arguments that are put into any placeholders used in path. :param params: Parameters sent in the request. :type path: str :type params: dict :returns: A dictionary with the backend's response. """<line_sep><return>make_sync(self.get_json_async(path *path_args params=params))<block_end><async_keyword><def_stmt>get_json_async self path *path_args params=<none><block_start>"""Like :func:`get_json` but returns a coroutine."""<line_sep>response=<await>self.get_async(path *path_args params=params)<line_sep><return><await>self._response_to_json(response)<block_end><def_stmt>get_object self path *path_args params=<none><block_start>"""Sends a GET request to a given API endpoint and returns an object. The endpoint specified must return an object, not a collection. This means that get_object can be used with endpoints like /files/{file_id} and /urls/{url_id}, which return an individual object but not with /comments, which returns a collection of objects. :param path: Path to API endpoint, can contain format placeholders {}. :param path_args: A variable number of arguments that are put into any placeholders used in path. :param params: Parameters sent in the request. :type path: str :type params: dict :returns: An instance of :class:`Object`. """<line_sep><return>make_sync(self.get_object_async(path *path_args params=params))<block_end><async_keyword><def_stmt>get_object_async self path *path_args params=<none><block_start>"""Like :func:`get_object` but returns a coroutine."""<line_sep>response=<await>self.get_async(path *path_args params=params)<line_sep><return><await>self._response_to_object(response)<block_end><def_stmt>patch self path *path_args data=<none><block_start>"""Sends a PATCH request to a given API endpoint. This is a low-level function that returns a raw HTTP response, no error checking nor response parsing is performed. See :func:`patch_object` for a higher-level function. :param path: Path to API endpoint, can contain format placeholders {}. :param path_args: A variable number of arguments that are put into any placeholders used in path. :param data: Data sent in the request body. :type path: str :type data: A string or bytes :returns: An instance of :class:`ClientResponse`. """<line_sep><return>make_sync(self.patch_async(path *path_args data))<block_end><async_keyword><def_stmt>patch_async self path *path_args data=<none><block_start>"""Like :func:`patch` but returns a coroutine."""<line_sep><return>ClientResponse(<await>self._get_session().patch(self._full_url(path *path_args) data=data proxy=self._proxy))<block_end><def_stmt>patch_object self path *path_args obj<block_start>"""Sends a PATCH request for modifying an object. This function modifies an object. The endpoint must be one that identifies an object, like /intelligence/hunting_rulesets/{id}. :param path: Path to API endpoint, can contain format placeholders {}. :param path_args: A variable number of arguments that are put into any placeholders used in path. :param obj: Object that has been modified. :type path: str :type obj: :class:`Object` :returns: An instance of :class:`Object` representing the same object after the changes has been applied. """<line_sep><return>make_sync(self.patch_object_async(path *path_args obj=obj))<block_end><async_keyword><def_stmt>patch_object_async self path *path_args obj<block_start>"""Like :func:`patch_object` but returns a coroutine."""<line_sep>data=json.dumps({'data':obj.to_dict(modified_attributes_only=<true>)})<line_sep>response=<await>self.patch_async(path *path_args data=data)<line_sep><return><await>self._response_to_object(response)<block_end><def_stmt>post self path *path_args data=<none><block_start>"""Sends a POST request to a given API endpoint. This is a low-level function that returns a raw HTTP response, no error checking nor response parsing is performed. See :func:`post_object` for a higher-level function. :param path: Path to API endpoint, can contain format placeholders {}. :param path_args: A variable number of arguments that are put into any placeholders used in path. :param data: Data sent in the request body. :type path: str :type data: A string or bytes :returns: An instance of :class:`ClientResponse`. """<line_sep><return>make_sync(self.post_async(path *path_args data=data))<block_end><async_keyword><def_stmt>post_async self path *path_args data=<none><block_start>"""Like :func:`post` but returns a coroutine."""<line_sep><return>ClientResponse(<await>self._get_session().post(self._full_url(path *path_args) data=data proxy=self._proxy))<block_end><def_stmt>post_object self path *path_args obj<block_start>"""Sends a POST request for creating an object. This function create a new object. The endpoint must be one that identifies a collection, like /intelligence/hunting_rulesets. :param path: Path to API endpoint. :param path_args: A variable number of arguments that are put into any placeholders used in path. :param obj: Instance :class:`Object` with the type expected by the API endpoint. :type path: str :type obj: :class:`Object` :returns: An instance of :class:`Object` representing the new object. """<line_sep><return>make_sync(self.post_object_async(path *path_args obj=obj))<block_end><async_keyword><def_stmt>post_object_async self path *path_args obj<block_start>"""Like :func:`post_object` but returns a coroutine."""<line_sep>data=json.dumps({'data':obj.to_dict()})<line_sep>response=<await>self.post_async(path *path_args data=data)<line_sep><return><await>self._response_to_object(response)<block_end><def_stmt>iterator self path *path_args params=<none> cursor=<none> limit=<none> batch_size=0<block_start>"""Returns an iterator for the collection specified by the given path. The endpoint specified by path must return a collection of objects. An example of such an endpoint are /comments and /intelligence/search. :param path: Path to API endpoint returning a collection. :param path_args: A variable number of arguments that are put into any placeholders used in path. :param params: Additional parameters passed to the endpoint. :param cursor: Cursor for resuming the iteration at the point it was left previously. A cursor can be obtained with Iterator.cursor(). This cursor is not the same one returned by the VirusTotal API. :param limit: Maximum number of objects that will be returned by the iterator. If a limit is not provided the iterator continues until it reaches the last object in the collection. :param batch_size: Maximum number of objects retrieved on each call to the endpoint. If not provided the server will decide how many objects to return. :type path: str :type params: dict :type cursor: str :type limit: int :type batch_size: int :returns: An instance of :class:`Iterator`. """<line_sep><return>Iterator(self self._full_url(path *path_args) params=params cursor=cursor limit=limit batch_size=batch_size)<block_end><def_stmt>scan_file self file wait_for_completion=<false><block_start>"""Scans a file. :param file: File to be scanned. :param wait_for_completion: If True the function doesn't return until the analysis has been completed. :type file: File-like object. :type wait_for_completion: bool :returns: An instance of :class:`Object` of analysis type. """<line_sep><return>make_sync(self.scan_file_async(file wait_for_completion=wait_for_completion))<block_end><async_keyword><def_stmt>scan_file_async self file wait_for_completion=<false><block_start>"""Like :func:`scan_file` but returns a coroutine."""<if_stmt><not>isinstance(file io.IOBase)<block_start><raise>TypeError(f'Expected a file to be a file object, got {type(file)}')<block_end># The snippet below could be replaced with this simpler code: # # form_data = aiohttp.FormData() # form_data.add_field('file', file) # # However, aiohttp.FormData assumes that the server supports RFC 5987 and # send a Content-Disposition like: # # 'form-data; name="file"; filename="foobar"; filename*=UTF-8''foobar # # AppEngine's upload handler doesn't like the filename*=UTF-8''foobar field # and fails with this Content-Disposition header. part=aiohttp.get_payload(file)<line_sep>filename=file.name<if>hasattr(file 'name')<else>'unknown'<line_sep>disposition=f'form-data; name="file"; filename="{filename}"'<line_sep>part.headers['Content-Disposition']=disposition<line_sep>form_data=aiohttp.MultipartWriter('form-data')<line_sep>form_data.append_payload(part)<line_sep>upload_url=<await>self.get_data_async('/files/upload_url')<line_sep>response=ClientResponse(<await>self._get_session().post(upload_url data=form_data proxy=self._proxy))<line_sep>analysis=<await>self._response_to_object(response)<if_stmt>wait_for_completion<block_start>analysis=<await>self._wait_for_analysis_completion(analysis)<block_end><return>analysis<block_end><def_stmt>scan_url self url wait_for_completion=<false><block_start>"""Scans a URL. :param url: The URL to be scanned. :param wait_for_completion: If True the function doesn't return until the analysis has been completed. :type url: str :type wait_for_completion: bool :returns: An instance of :class:`Object` of analysis type. """<line_sep><return>make_sync(self.scan_url_async(url wait_for_completion=wait_for_completion))<block_end><async_keyword><def_stmt>scan_url_async self url wait_for_completion=<false><block_start>"""Like :func:`scan_url` but returns a coroutine."""<line_sep>form_data=aiohttp.FormData()<line_sep>form_data.add_field('url' url)<line_sep>response=ClientResponse(<await>self._get_session().post(self._full_url('/urls') data=form_data proxy=self._proxy))<line_sep>analysis=<await>self._response_to_object(response)<if_stmt>wait_for_completion<block_start>analysis=<await>self._wait_for_analysis_completion(analysis)<block_end><return>analysis<block_end><async_keyword><def_stmt>_wait_for_analysis_completion self analysis<block_start><while_stmt><true><block_start>analysis=<await>self.get_object_async('/analyses/{}' analysis.id)<if_stmt>analysis.status<eq>'completed'<block_start><break><block_end><await>asyncio.sleep(20)<block_end><return>analysis<block_end><block_end>
<import_from_stmt>typing List Tuple Set Dict Optional cast<import_from_stmt>..types TealType<import_from_stmt>..ast Expr Return Seq ScratchSlot SubroutineDefinition SubroutineDeclaration <import_from_stmt>..ir Mode TealComponent TealOp TealBlock TealSimpleBlock<import_from_stmt>..errors TealInputError TealInternalError<import_from_stmt>.sort sortBlocks<import_from_stmt>.flatten flattenBlocks flattenSubroutines<import_from_stmt>.scratchslots assignScratchSlotsToSubroutines<import_from_stmt>.subroutines findRecursionPoints spillLocalSlotsDuringRecursion resolveSubroutines <import_from_stmt>.constants createConstantBlocks<line_sep>MAX_TEAL_VERSION=5<line_sep>MIN_TEAL_VERSION=2<line_sep>DEFAULT_TEAL_VERSION=MIN_TEAL_VERSION<class_stmt>CompileOptions<block_start><def_stmt>__init__ self * mode:Mode=Mode.Signature version:int=DEFAULT_TEAL_VERSION <arrow><none><block_start>self.mode=mode<line_sep>self.version=version<line_sep>self.currentSubroutine:Optional[SubroutineDefinition]=<none><line_sep>self.breakBlocksStack:List[List[TealSimpleBlock]]=[]<line_sep>self.continueBlocksStack:List[List[TealSimpleBlock]]=[]<block_end><def_stmt>setSubroutine self subroutine:Optional[SubroutineDefinition]<arrow><none><block_start>self.currentSubroutine=subroutine<block_end><def_stmt>enterLoop self<arrow><none><block_start>self.breakBlocksStack.append([])<line_sep>self.continueBlocksStack.append([])<block_end><def_stmt>isInLoop self<arrow>bool<block_start><return>len(self.breakBlocksStack)<ne>0<block_end><def_stmt>addLoopBreakBlock self block:TealSimpleBlock<arrow><none><block_start><if_stmt>len(self.breakBlocksStack)<eq>0<block_start><raise>TealInternalError("Cannot add break block when no loop is active")<block_end>self.breakBlocksStack[-1].append(block)<block_end><def_stmt>addLoopContinueBlock self block:TealSimpleBlock<arrow><none><block_start><if_stmt>len(self.continueBlocksStack)<eq>0<block_start><raise>TealInternalError("Cannot add continue block when no loop is active")<block_end>self.continueBlocksStack[-1].append(block)<block_end><def_stmt>exitLoop self<arrow>Tuple[List[TealSimpleBlock] List[TealSimpleBlock]]<block_start><if_stmt>len(self.breakBlocksStack)<eq>0<or>len(self.continueBlocksStack)<eq>0<block_start><raise>TealInternalError("Cannot exit loop when no loop is active")<block_end><return>(self.breakBlocksStack.pop() self.continueBlocksStack.pop())<block_end><block_end><def_stmt>verifyOpsForVersion teal:List[TealComponent] version:int<block_start>"""Verify that all TEAL operations are allowed in the specified version. Args: teal: Code to check. mode: The version to check against. Raises: TealInputError: if teal contains an operation not allowed in version. """<for_stmt>stmt teal<block_start><if_stmt>isinstance(stmt TealOp)<block_start>op=stmt.getOp()<if_stmt>op.min_version<g>version<block_start><raise>TealInputError("Op not supported in TEAL version {}: {}. Minimum required version is {}".format(version op op.min_version))<block_end><block_end><block_end><block_end><def_stmt>verifyOpsForMode teal:List[TealComponent] mode:Mode<block_start>"""Verify that all TEAL operations are allowed in mode. Args: teal: Code to check. mode: The mode to check against. Raises: TealInputError: if teal contains an operation not allowed in mode. """<for_stmt>stmt teal<block_start><if_stmt>isinstance(stmt TealOp)<block_start>op=stmt.getOp()<if_stmt><not>op.mode&mode<block_start><raise>TealInputError("Op not supported in {} mode: {}".format(mode.name op))<block_end><block_end><block_end><block_end><def_stmt>compileSubroutine ast:Expr options:CompileOptions subroutineMapping:Dict[Optional[SubroutineDefinition] List[TealComponent]] subroutineGraph:Dict[SubroutineDefinition Set[SubroutineDefinition]] subroutineBlocks:Dict[Optional[SubroutineDefinition] TealBlock] <arrow><none><block_start>currentSubroutine=(cast(SubroutineDeclaration ast).subroutine<if>isinstance(ast SubroutineDeclaration)<else><none>)<if_stmt><not>ast.has_return()<block_start><if_stmt>ast.type_of()<eq>TealType.none<block_start>ast=Seq([ast Return()])<block_end><else_stmt><block_start>ast=Return(ast)<block_end><block_end>options.setSubroutine(currentSubroutine)<line_sep>start,end=ast.__teal__(options)<line_sep>start.addIncoming()<line_sep>start.validateTree()<line_sep>start=TealBlock.NormalizeBlocks(start)<line_sep>start.validateTree()<line_sep>order=sortBlocks(start end)<line_sep>teal=flattenBlocks(order)<line_sep>verifyOpsForVersion(teal options.version)<line_sep>verifyOpsForMode(teal options.mode)<line_sep>subroutineMapping[currentSubroutine]=teal<line_sep>subroutineBlocks[currentSubroutine]=start<line_sep>referencedSubroutines:Set[SubroutineDefinition]=set()<for_stmt>stmt teal<block_start><for_stmt>subroutine stmt.getSubroutines()<block_start>referencedSubroutines.add(subroutine)<block_end><block_end><if_stmt>currentSubroutine<is><not><none><block_start>subroutineGraph[currentSubroutine]=referencedSubroutines<block_end>newSubroutines=referencedSubroutines-subroutineMapping.keys()<for_stmt>subroutine sorted(newSubroutines key=<lambda>subroutine:subroutine.id)<block_start>compileSubroutine(subroutine.getDeclaration() options subroutineMapping subroutineGraph subroutineBlocks )<block_end><block_end><def_stmt>compileTeal ast:Expr mode:Mode * version:int=DEFAULT_TEAL_VERSION assembleConstants:bool=<false> <arrow>str<block_start>"""Compile a PyTeal expression into TEAL assembly. Args: ast: The PyTeal expression to assemble. mode: The mode of the program to assemble. Must be Signature or Application. version (optional): The TEAL version used to assemble the program. This will determine which expressions and fields are able to be used in the program and how expressions compile to TEAL opcodes. Defaults to 2 if not included. assembleConstants (optional): When true, the compiler will produce a program with fully assembled constants, rather than using the pseudo-ops `int`, `byte`, and `addr`. These constants will be assembled in the most space-efficient way, so enabling this may reduce the compiled program's size. Enabling this option requires a minimum TEAL version of 3. Defaults to false. Returns: A TEAL assembly program compiled from the input expression. Raises: TealInputError: if an operation in ast is not supported by the supplied mode and version. TealInternalError: if an internal error is encounter during compilation. """<if_stmt>(<not>(MIN_TEAL_VERSION<le>version<le>MAX_TEAL_VERSION)<or>type(version)<is><not>int)<block_start><raise>TealInputError("Unsupported TEAL version: {}. Excepted an integer in the range [{}, {}]".format(version MIN_TEAL_VERSION MAX_TEAL_VERSION))<block_end>options=CompileOptions(mode=mode version=version)<line_sep>subroutineMapping:Dict[Optional[SubroutineDefinition] List[TealComponent]]=dict()<line_sep>subroutineGraph:Dict[SubroutineDefinition Set[SubroutineDefinition]]=dict()<line_sep>subroutineBlocks:Dict[Optional[SubroutineDefinition] TealBlock]=dict()<line_sep>compileSubroutine(ast options subroutineMapping subroutineGraph subroutineBlocks)<line_sep>localSlotAssignments=assignScratchSlotsToSubroutines(subroutineMapping subroutineBlocks)<line_sep>spillLocalSlotsDuringRecursion(version subroutineMapping subroutineGraph localSlotAssignments)<line_sep>subroutineLabels=resolveSubroutines(subroutineMapping)<line_sep>teal=flattenSubroutines(subroutineMapping subroutineLabels)<if_stmt>assembleConstants<block_start><if_stmt>version<l>3<block_start><raise>TealInternalError("The minimum TEAL version required to enable assembleConstants is 3. The current version is {}".format(version))<block_end>teal=createConstantBlocks(teal)<block_end>lines=["#pragma version {}".format(version)]<line_sep>lines<augadd>[i.assemble()<for>i teal]<line_sep><return>"\n".join(lines)<block_end>
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """An example that processes streaming NYC Taxi data with SqlTransform. This example reads from the PubSub NYC Taxi stream described in https://github.com/googlecodelabs/cloud-dataflow-nyc-taxi-tycoon, aggregates the data in 15s windows using SqlTransform, and writes the output to a user-defined PubSub topic. Java 8 must be available to run this pipeline, and the --experiments=use_runner_v2 flag must be passed when running on Dataflow. Docker must also be available to run this pipeline locally. """<line_sep># pytype: skip-file <import_stmt>json<import_stmt>logging<import_stmt>apache_beam<as>beam<import_from_stmt>apache_beam.options.pipeline_options PipelineOptions<import_from_stmt>apache_beam.transforms.sql SqlTransform<def_stmt>run output_topic pipeline_args<block_start>pipeline_options=PipelineOptions(pipeline_args save_main_session=<true> streaming=<true>)<with_stmt>beam.Pipeline(options=pipeline_options)<as>pipeline<block_start>_=(pipeline|beam.io.ReadFromPubSub(topic='projects/pubsub-public-data/topics/taxirides-realtime' timestamp_attribute="ts").with_output_types(bytes)|"Parse JSON payload"<rshift>beam.Map(json.loads)# Use beam.Row to create a schema-aware PCollection |"Create beam Row"<rshift>beam.Map(<lambda>x:beam.Row(ride_status=str(x['ride_status']) passenger_count=int(x['passenger_count'])))# SqlTransform will computes result within an existing window |"15s fixed windows"<rshift>beam.WindowInto(beam.window.FixedWindows(15))# Aggregate drop offs and pick ups that occur within each 15s window |SqlTransform(""" SELECT ride_status, COUNT(*) AS num_rides, SUM(passenger_count) AS total_passengers FROM PCOLLECTION WHERE NOT ride_status = 'enroute' GROUP BY ride_status""")# SqlTransform yields python objects with attributes corresponding to # the outputs of the query. # Collect those attributes, as well as window information, into a dict |"Assemble Dictionary"<rshift>beam.Map(<lambda>row window=beam.DoFn.WindowParam:{"ride_status":row.ride_status "num_rides":row.num_rides "total_passengers":row.total_passengers "window_start":window.start.to_rfc3339() "window_end":window.end.to_rfc3339()})|"Convert to JSON"<rshift>beam.Map(json.dumps)|"UTF-8 encode"<rshift>beam.Map(<lambda>s:s.encode("utf-8"))|beam.io.WriteToPubSub(topic=output_topic))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>logging.getLogger().setLevel(logging.INFO)<import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--output_topic' dest='output_topic' required=<true> help=('Cloud PubSub topic to write to (e.g. '<concat>'projects/my-project/topics/my-topic), must be created prior to '<concat>'running the pipeline.'))<line_sep>known_args,pipeline_args=parser.parse_known_args()<line_sep>run(known_args.output_topic pipeline_args)<block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torchvision<import_from_stmt>functools reduce<def_stmt>Conv3x3BNReLU in_channels out_channels stride groups<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=3 stride=stride padding=1 groups=groups) nn.BatchNorm2d(out_channels) nn.ReLU6(inplace=<true>))<block_end><def_stmt>Conv1x1BNReLU in_channels out_channels<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=1 stride=1) nn.BatchNorm2d(out_channels) nn.ReLU6(inplace=<true>))<block_end><def_stmt>Conv1x1BN in_channels out_channels<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=1 stride=1) nn.BatchNorm2d(out_channels))<block_end><class_stmt>InvertedResidual(nn.Module)<block_start><def_stmt>__init__ self in_channels out_channels stride expansion_factor=6<block_start>super(InvertedResidual self).__init__()<line_sep>self.stride=stride<line_sep>mid_channels=(in_channels<times>expansion_factor)<line_sep>self.bottleneck=nn.Sequential(Conv1x1BNReLU(in_channels mid_channels) Conv3x3BNReLU(mid_channels mid_channels stride groups=mid_channels) Conv1x1BN(mid_channels out_channels))<if_stmt>self.stride<eq>1<block_start>self.shortcut=Conv1x1BN(in_channels out_channels)<block_end><block_end><def_stmt>forward self x<block_start>out=self.bottleneck(x)<line_sep>out=(out+self.shortcut(x))<if>self.stride<eq>1<else>out<line_sep><return>out<block_end><block_end><class_stmt>MobileNetV2(nn.Module)<block_start><def_stmt>__init__ self num_classes=1000<block_start>super(MobileNetV2 self).__init__()<line_sep>self.first_conv=Conv3x3BNReLU(3 32 2 groups=1)<line_sep>self.layer1=self.make_layer(in_channels=32 out_channels=16 stride=1 block_num=1)<line_sep>self.layer2=self.make_layer(in_channels=16 out_channels=24 stride=2 block_num=2)<line_sep>self.layer3=self.make_layer(in_channels=24 out_channels=32 stride=2 block_num=3)<line_sep>self.layer4=self.make_layer(in_channels=32 out_channels=64 stride=2 block_num=4)<line_sep>self.layer5=self.make_layer(in_channels=64 out_channels=96 stride=1 block_num=3)<line_sep>self.layer6=self.make_layer(in_channels=96 out_channels=160 stride=2 block_num=3)<line_sep>self.layer7=self.make_layer(in_channels=160 out_channels=320 stride=1 block_num=1)<line_sep>self.last_conv=Conv1x1BNReLU(320 1280)<line_sep>self.avgpool=nn.AvgPool2d(kernel_size=7 stride=1)<line_sep>self.dropout=nn.Dropout(p=0.2)<line_sep>self.linear=nn.Linear(in_features=1280 out_features=num_classes)<block_end><def_stmt>make_layer self in_channels out_channels stride block_num<block_start>layers=[]<line_sep>layers.append(InvertedResidual(in_channels out_channels stride))<for_stmt>i range(1 block_num)<block_start>layers.append(InvertedResidual(out_channels out_channels 1))<block_end><return>nn.Sequential(*layers)<block_end><def_stmt>init_params self<block_start><for_stmt>m self.modules()<block_start><if_stmt>isinstance(m nn.Conv2d)<block_start>nn.init.kaiming_normal_(m.weight)<line_sep>nn.init.constant_(m.bias 0)<block_end><elif_stmt>isinstance(m nn.Linear)<or>isinstance(m nn.BatchNorm2d)<block_start>nn.init.constant_(m.weight 1)<line_sep>nn.init.constant_(m.bias 0)<block_end><block_end><block_end><def_stmt>forward self x<block_start>x=self.first_conv(x)<line_sep>x=self.layer1(x)<line_sep>x=self.layer2(x)<line_sep>x=self.layer3(x)<line_sep>x=self.layer4(x)<line_sep>x=self.layer5(x)<line_sep>x=self.layer6(x)<line_sep>x=self.layer7(x)<line_sep>x=self.last_conv(x)<line_sep>x=self.avgpool(x)<line_sep>x=x.view(x.size(0) -1)<line_sep>x=self.dropout(x)<line_sep>out=self.linear(x)<line_sep><return>out<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>model=MobileNetV2()<line_sep># model = torchvision.models.MobileNetV2() print(model)<line_sep>input=torch.randn(1 3 224 224)<line_sep>out=model(input)<line_sep>print(out.shape)<block_end>
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # <import_stmt>time<import_from_stmt>source_freshdesk.utils CallCredit<def_stmt>test_consume_one <block_start>"""Multiple consumptions of 1 cred will reach limit"""<line_sep>credit=CallCredit(balance=3 reload_period=1)<line_sep>ts_1=time.time()<for_stmt>i range(4)<block_start>credit.consume(1)<block_end>ts_2=time.time()<assert_stmt>1<le>ts_2-ts_1<l>2<block_end><def_stmt>test_consume_many <block_start>"""Consumptions of N creds will reach limit and decrease balance"""<line_sep>credit=CallCredit(balance=3 reload_period=1)<line_sep>ts_1=time.time()<line_sep>credit.consume(1)<line_sep>credit.consume(3)<line_sep>ts_2=time.time()<line_sep># the balance decreased already, so single cred will be enough to reach limit credit.consume(1)<line_sep>ts_3=time.time()<assert_stmt>1<le>ts_2-ts_1<l>2<assert_stmt>1<le>ts_3-ts_2<l>2<block_end>
""" Copyright (c) 2018-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_stmt>pytest<line_sep>pytest.importorskip('accuracy_checker.launcher.onnx_launcher')<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>accuracy_checker.launcher.launcher create_launcher<import_from_stmt>accuracy_checker.config ConfigError<def_stmt>old_onnxrunitme models_dir<block_start><import_stmt>onnxruntime<as>rt<line_sep>sess=rt.InferenceSession(str(models_dir/"samplenet.onnx"))<try_stmt><block_start>sess.get_providers()<line_sep><return><false><block_end><except_stmt>AttributeError<block_start><return><true><block_end><block_end><def_stmt>get_onnx_test_model models_dir device=<none> ep=<none><block_start>config={"framework":"onnx_runtime" "model":str(models_dir/"samplenet.onnx") "adapter":"classification" }<if_stmt>device<is><not><none><block_start>config['device']=device<block_end><if_stmt>ep<is><not><none><block_start>config['execution_providers']=ep<block_end><return>create_launcher(config)<block_end><class_stmt>TestONNXRuntimeLauncher<block_start><def_stmt>test_launcher_creates self models_dir<block_start>launcher=get_onnx_test_model(models_dir)<assert_stmt>launcher.inputs['data']<eq>[1 3 32 32]<assert_stmt>launcher.output_blob<eq>'fc3'<block_end><def_stmt>test_infer self data_dir models_dir<block_start>onnx_test_model=get_onnx_test_model(models_dir)<line_sep>_,_,h,w=onnx_test_model.inputs['data']<line_sep>img_raw=cv2.imread(str(data_dir/'1.jpg'))<line_sep>img_rgb=cv2.cvtColor(img_raw cv2.COLOR_BGR2RGB)<line_sep>img_resized=cv2.resize(img_rgb (w h))<line_sep>input_blob=np.transpose([img_resized] (0 3 1 2))<line_sep>res=onnx_test_model.predict([{'data':input_blob.astype(np.float32)}] [{}])<assert_stmt>np.argmax(res[0]['fc3'])<eq>7<block_end><def_stmt>test_infer_with_execution_provider self data_dir models_dir<block_start><if_stmt>old_onnxrunitme(models_dir)<block_start>pytest.skip(reason="onnxruntime does not support EP")<block_end>onnx_test_model=get_onnx_test_model(models_dir ep=['CPUExecutionProvider'])<line_sep>_,_,h,w=onnx_test_model.inputs['data']<line_sep>img_raw=cv2.imread(str(data_dir/'1.jpg'))<line_sep>img_rgb=cv2.cvtColor(img_raw cv2.COLOR_BGR2RGB)<line_sep>img_resized=cv2.resize(img_rgb (w h))<line_sep>input_blob=np.transpose([img_resized] (0 3 1 2))<line_sep>res=onnx_test_model.predict([{'data':input_blob.astype(np.float32)}] [{}])<assert_stmt>np.argmax(res[0]['fc3'])<eq>7<block_end><def_stmt>test_auto_model_search self models_dir<block_start>config={"framework":"onnx_runtime" "model":models_dir }<line_sep>launcher=create_launcher(config 'samplenet')<assert_stmt>launcher.model<eq>models_dir/"samplenet.onnx"<block_end><block_end>@pytest.mark.usefixtures('mock_path_exists')<class_stmt>TestONNXRuntimeLauncherConfig<block_start><def_stmt>test_missed_model_in_create_onnx_launcher_raises_config_error_exception self<block_start>config={'framework':'onnx_runtime'}<with_stmt>pytest.raises(ConfigError)<block_start>create_launcher(config)<block_end><block_end><def_stmt>test_unsupported_device_in_create_onnx_launcher_raises_config_error_exception self<block_start>config={'framework':'onnx_runtime' 'model':'model.onnx' 'device':'UNSUPPORTED'}<with_stmt>pytest.raises(ConfigError)<block_start>create_launcher(config)<block_end><block_end><block_end>
# Copyright 2015-2016 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_from_stmt>f5.multi_device.cluster TrustDomain<import_from_stmt>f5.multi_device.exceptions DeviceAlreadyInTrustDomain<import_from_stmt>f5.multi_device.exceptions DeviceNotTrusted<import_stmt>mock<import_stmt>pytest<class_stmt>MockDeviceInfo(object)<block_start><def_stmt>__init__ self name<block_start>self.name=name<line_sep>self.selfDevice='true'<line_sep>self.managementIp='1.1.1.1'<block_end><block_end>@pytest.fixture<def_stmt>BigIPs <block_start>mock_bigips=[]<for_stmt>bigip range(4)<block_start>mock_bigip=mock.MagicMock()<line_sep>mock_bigip.__name='me'<line_sep>mock_bigip.tm.cm.devices.get_collection.return_value=[MockDeviceInfo('test')]<line_sep>mock_bigip.tm.cm.devices.get_collection.__name__='test'<line_sep>mock_bigips.append(mock_bigip)<block_end><return>mock_bigips<block_end>@pytest.fixture<def_stmt>TrustDomainCreateNew BigIPs<block_start>mock_bigips=BigIPs<line_sep>td=TrustDomain()<line_sep><return>td mock_bigips<block_end><def_stmt>test_validate_device_not_trusted TrustDomainCreateNew<block_start>td,mock_bigips=TrustDomainCreateNew<with_stmt>pytest.raises(DeviceNotTrusted)<as>ex<block_start>td.devices=mock_bigips<line_sep>td.validate()<block_end><assert_stmt>"'test' is not trusted by 'test', which trusts: []"<in>str(ex.value)<block_end>@mock.patch('f5.multi_device.trust_domain.TrustDomain._set_attributes')@mock.patch('f5.multi_device.trust_domain.TrustDomain.validate')<def_stmt>test___init__ mock_set_attr mock_validate BigIPs<block_start>mock_bigips=BigIPs<line_sep>td=TrustDomain(devices=mock_bigips)<assert_stmt>td._set_attributes.call_args<eq>mock.call(devices=mock_bigips)<block_end><def_stmt>test__set_attributes BigIPs<block_start>mock_bigips=BigIPs<line_sep>td=TrustDomain()<line_sep>td._set_attributes(devices=mock_bigips partition='test')<assert_stmt>td.devices<eq>mock_bigips<assert_stmt>td.partition<eq>'test'<assert_stmt>td.device_group_name<eq>'device_trust_group'<assert_stmt>td.device_group_type<eq>'sync-only'<block_end>@mock.patch('f5.multi_device.trust_domain.TrustDomain._add_trustee')@mock.patch('f5.multi_device.trust_domain.pollster')<def_stmt>test_create mock_add_trustee mock_pollster TrustDomainCreateNew<block_start>td,mock_bigips=TrustDomainCreateNew<line_sep>td.create(devices=mock_bigips partition='test')<assert_stmt>td.devices<eq>mock_bigips<assert_stmt>td.partition<eq>'test'<assert_stmt>td._add_trustee.call_args_list<eq>[mock.call(mock_bigips[1]) mock.call(mock_bigips[2]) mock.call(mock_bigips[3])]<block_end>@mock.patch('f5.multi_device.trust_domain.TrustDomain._add_trustee')@mock.patch('f5.multi_device.trust_domain.pollster')@mock.patch('f5.multi_device.trust_domain.TrustDomain._remove_trustee')<def_stmt>test_teardown mock_add_trustee mock_pollster mock_rem_trustee TrustDomainCreateNew<block_start>td,mock_bigips=TrustDomainCreateNew<line_sep>td.create(devices=mock_bigips partition='test')<line_sep>td.teardown()<assert_stmt>td.domain<eq>{}<assert_stmt>td._remove_trustee.call_args_list<eq>[mock.call(mock_bigips[0]) mock.call(mock_bigips[1]) mock.call(mock_bigips[2]) mock.call(mock_bigips[3])]<block_end>@mock.patch('f5.multi_device.trust_domain.get_device_info')@mock.patch('f5.multi_device.trust_domain.TrustDomain._modify_trust')<def_stmt>test__add_trustee mock_dev_info mock_mod_trust TrustDomainCreateNew<block_start>td,mock_bigips=TrustDomainCreateNew<line_sep>td._set_attributes(devices=mock_bigips partition='test')<line_sep>td._add_trustee(mock_bigips[1])<assert_stmt>td._modify_trust.call_args<eq>mock.call(mock_bigips[0] td._get_add_trustee_cmd mock_bigips[1])<block_end>@mock.patch('f5.multi_device.trust_domain.TrustDomain._modify_trust')<def_stmt>test__add_trustee_already_in_domain mock_mod_trust TrustDomainCreateNew<block_start>td,mock_bigips=TrustDomainCreateNew<line_sep>td._set_attributes(devices=mock_bigips partition='test')<line_sep>td.domain={'test':'device'}<with_stmt>pytest.raises(DeviceAlreadyInTrustDomain)<as>ex<block_start>td._add_trustee(mock_bigips[1])<block_end><assert_stmt>"Device: 'test' is already in this trust domain"<in>str(ex.value)<block_end>
<import_stmt>logging<import_from_stmt>pathlib Path<import_from_stmt>typing Dict Any<import_stmt>pytoml<as>toml<line_sep>_LOGGER=logging.getLogger(__name__)<line_sep>CONF_NAME="sdk_packaging.toml"<line_sep>_SECTION="packaging"<line_sep># Default conf _CONFIG={"package_name":"packagename" "package_nspkg":"packagenspkg" "package_pprint_name":"MyService Management" "package_doc_id":"" "is_stable":<false> "is_arm":<true> "need_msrestazure":<false> # track2 does not need it anymore in setup.py "need_azuremgmtcore":<true> }<def_stmt>read_conf folder:Path<arrow>Dict[str Any]<block_start>conf_path=folder/CONF_NAME<if_stmt><not>conf_path.exists()<block_start><return>{}<block_end><with_stmt>open(conf_path "rb")<as>fd<block_start><return>toml.load(fd)[_SECTION]<block_end><block_end><def_stmt>build_default_conf folder:Path package_name:str<arrow><none><block_start>conf_path=folder/CONF_NAME<if_stmt>conf_path.exists()<block_start>_LOGGER.info("Skipping default conf since the file exists")<line_sep><return><block_end>_LOGGER.info("Build default conf for %s" package_name)<line_sep>conf={_SECTION:_CONFIG.copy()}<line_sep>conf[_SECTION]["package_name"]=package_name<line_sep>conf[_SECTION]["package_nspkg"]=package_name[:package_name.rindex("-")]+"-nspkg"<with_stmt>open(conf_path "w")<as>fd<block_start>toml.dump(conf fd)<block_end><block_end>
<import_from_stmt>tests.base_unittest BaseUnitTest<import_from_stmt>mock patch<import_from_stmt>pypokerengine.engine.round_manager RoundManager<import_from_stmt>pypokerengine.engine.game_evaluator GameEvaluator<import_from_stmt>pypokerengine.engine.poker_constants PokerConstants<as>Const<import_from_stmt>pypokerengine.engine.player Player<import_from_stmt>pypokerengine.engine.pay_info PayInfo<import_from_stmt>pypokerengine.engine.card Card<import_from_stmt>pypokerengine.engine.deck Deck<import_from_stmt>pypokerengine.engine.table Table<class_stmt>RoundManagerTest(BaseUnitTest)<block_start><def_stmt>setUp self<block_start><pass><block_end><def_stmt>test_collect_blind self<block_start>state,_=self.__start_round()<line_sep>players=state["table"].seats.players<line_sep>sb_amount=5<line_sep>self.eq(100-sb_amount players[0].stack)<line_sep>self.eq(100-sb_amount<times>2 players[1].stack)<line_sep>self.eq("SMALLBLIND" players[0].action_histories[-1]["action"])<line_sep>self.eq("BIGBLIND" players[1].action_histories[-1]["action"])<line_sep>self.eq(sb_amount players[0].pay_info.amount)<line_sep>self.eq(sb_amount<times>2 players[1].pay_info.amount)<block_end><def_stmt>test_collect_ante self<block_start>ante=10<line_sep>sb_amount=5<line_sep>table=self.__setup_table()<line_sep>state,_=RoundManager.start_new_round(1 sb_amount ante table)<line_sep>players=state["table"].seats.players<line_sep>self.eq(100-sb_amount-ante players[0].stack)<line_sep>self.eq(100-sb_amount<times>2-ante players[1].stack)<line_sep>self.eq(100-ante players[2].stack)<line_sep>self.eq("ANTE" players[0].action_histories[0]["action"])<line_sep>self.eq("ANTE" players[1].action_histories[0]["action"])<line_sep>self.eq("ANTE" players[2].action_histories[0]["action"])<line_sep>self.eq(sb_amount+ante players[0].pay_info.amount)<line_sep>self.eq(sb_amount<times>2+ante players[1].pay_info.amount)<line_sep>self.eq(ante players[2].pay_info.amount)<line_sep>self.eq(sb_amount+sb_amount<times>2+ante<times>3 GameEvaluator.create_pot(players)[0]["amount"])<block_end><def_stmt>test_collect_ante_skip_loser self<block_start>ante=10<line_sep>sb_amount=5<line_sep>table=self.__setup_table()<line_sep>table.seats.players[2].stack=0<line_sep>table.seats.players[2].pay_info.status=PayInfo.FOLDED<line_sep>state,_=RoundManager.start_new_round(1 sb_amount ante table)<line_sep>players=state["table"].seats.players<line_sep>self.eq(sb_amount+sb_amount<times>2+ante<times>2 GameEvaluator.create_pot(players)[0]["amount"])<block_end><def_stmt>test_deal_holecard self<block_start>state,_=self.__start_round()<line_sep>players=state["table"].seats.players<line_sep>self.eq([Card.from_id(1) Card.from_id(2)] players[0].hole_card)<line_sep>self.eq([Card.from_id(3) Card.from_id(4)] players[1].hole_card)<block_end><def_stmt>test_message_after_start_round self<block_start><with_stmt>patch('pypokerengine.engine.message_builder.MessageBuilder.build_round_start_message' return_value="hoge") patch('pypokerengine.engine.message_builder.MessageBuilder.build_street_start_message' return_value="fuga") patch('pypokerengine.engine.message_builder.MessageBuilder.build_ask_message' return_value="bar")<block_start>_,msgs=self.__start_round()<line_sep>self.eq(("uuid0" "hoge") msgs[0])<line_sep>self.eq(("uuid1" "hoge") msgs[1])<line_sep>self.eq(("uuid2" "hoge") msgs[2])<line_sep>self.eq((-1 "fuga") msgs[3])<line_sep>self.eq(("uuid2" "bar") msgs[4])<block_end><block_end><def_stmt>test_state_after_start_round self<block_start>state,msgs=self.__start_round()<line_sep>self.eq(2 state["next_player"])<line_sep>self.eq("SMALLBLIND" state["table"].seats.players[0].action_histories[0]["action"])<line_sep>self.eq("BIGBLIND" state["table"].seats.players[1].action_histories[0]["action"])<block_end><def_stmt>test_message_after_apply_action self<block_start><with_stmt>patch('pypokerengine.engine.message_builder.MessageBuilder.build_round_start_message' return_value="hoge") patch('pypokerengine.engine.message_builder.MessageBuilder.build_street_start_message' return_value="fuga") patch('pypokerengine.engine.message_builder.MessageBuilder.build_ask_message' return_value="bar") patch('pypokerengine.engine.message_builder.MessageBuilder.build_game_update_message' return_value="boo")<block_start>state,_=self.__start_round()<line_sep>_,msgs=RoundManager.apply_action(state "call" 10)<line_sep>self.eq((-1 "boo") msgs[0])<line_sep>self.eq(("uuid0" "bar") msgs[1])<block_end><block_end><def_stmt>test_state_after_apply_call self<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>self.eq(0 state["next_player"])<line_sep>self.eq("CALL" state["table"].seats.players[2].action_histories[0]["action"])<block_end><def_stmt>test_state_after_apply_raise self<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "raise" 15)<line_sep>self.eq(0 state["next_player"])<line_sep>self.eq("RAISE" state["table"].seats.players[2].action_histories[0]["action"])<block_end><def_stmt>test_message_after_forward_to_flop self<block_start><with_stmt>patch('pypokerengine.engine.message_builder.MessageBuilder.build_street_start_message' return_value="fuga") patch('pypokerengine.engine.message_builder.MessageBuilder.build_ask_message' return_value="bar") patch('pypokerengine.engine.message_builder.MessageBuilder.build_game_update_message' return_value="boo")<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>_,msgs=RoundManager.apply_action(state "call" 10)<line_sep>self.eq((-1 "boo") msgs[0])<line_sep>self.eq((-1 "fuga") msgs[1])<line_sep>self.eq(("uuid0" "bar") msgs[2])<block_end><block_end><def_stmt>test_state_after_forward_to_flop self<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>self.eq(Const.Street.FLOP state["street"])<line_sep>self.eq(0 state["next_player"])<line_sep>self.eq([Card.from_id(cid)<for>cid range(7 10)] state["table"].get_community_card())<line_sep>fetch_player=<lambda>uuid:[p<for>p state["table"].seats.players<if>p.uuid<eq>uuid][0]<line_sep>self.true(all(map(<lambda>p:len(p.action_histories)<eq>0 state["table"].seats.players)))<line_sep>self.eq(2 len(fetch_player("uuid0").round_action_histories[Const.Street.PREFLOP]))<line_sep>self.eq(2 len(fetch_player("uuid1").round_action_histories[Const.Street.PREFLOP]))<line_sep>self.eq(1 len(fetch_player("uuid2").round_action_histories[Const.Street.PREFLOP]))<line_sep>self.assertIsNone(fetch_player("uuid0").round_action_histories[Const.Street.TURN])<block_end><def_stmt>test_state_after_forward_to_turn self<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,msgs=RoundManager.apply_action(state "call" 0)<line_sep>self.eq(Const.Street.TURN state["street"])<line_sep>self.eq([Card.from_id(cid)<for>cid range(7 11)] state["table"].get_community_card())<line_sep>self.eq(3 len(msgs))<line_sep>fetch_player=<lambda>uuid:[p<for>p state["table"].seats.players<if>p.uuid<eq>uuid][0]<line_sep>self.true(all(map(<lambda>p:len(p.action_histories)<eq>0 state["table"].seats.players)))<line_sep>self.eq(2 len(fetch_player("uuid0").round_action_histories[Const.Street.PREFLOP]))<line_sep>self.eq(2 len(fetch_player("uuid1").round_action_histories[Const.Street.PREFLOP]))<line_sep>self.eq(1 len(fetch_player("uuid2").round_action_histories[Const.Street.PREFLOP]))<line_sep>self.eq(1 len(fetch_player("uuid0").round_action_histories[Const.Street.FLOP]))<line_sep>self.eq(1 len(fetch_player("uuid1").round_action_histories[Const.Street.FLOP]))<line_sep>self.eq(0 len(fetch_player("uuid2").round_action_histories[Const.Street.FLOP]))<line_sep>self.assertIsNone(fetch_player("uuid0").round_action_histories[Const.Street.TURN])<block_end><def_stmt>test_state_after_forward_to_river self<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,msgs=RoundManager.apply_action(state "call" 0)<line_sep>self.eq(Const.Street.RIVER state["street"])<line_sep>self.eq([Card.from_id(cid)<for>cid range(7 12)] state["table"].get_community_card())<line_sep>self.eq(3 len(msgs))<line_sep>fetch_player=<lambda>uuid:[p<for>p state["table"].seats.players<if>p.uuid<eq>uuid][0]<line_sep>self.true(all(map(<lambda>p:len(p.action_histories)<eq>0 state["table"].seats.players)))<line_sep>self.eq(2 len(fetch_player("uuid0").round_action_histories[Const.Street.PREFLOP]))<line_sep>self.eq(2 len(fetch_player("uuid1").round_action_histories[Const.Street.PREFLOP]))<line_sep>self.eq(1 len(fetch_player("uuid2").round_action_histories[Const.Street.PREFLOP]))<line_sep>self.eq(1 len(fetch_player("uuid0").round_action_histories[Const.Street.FLOP]))<line_sep>self.eq(1 len(fetch_player("uuid1").round_action_histories[Const.Street.FLOP]))<line_sep>self.eq(0 len(fetch_player("uuid2").round_action_histories[Const.Street.FLOP]))<line_sep>self.eq(1 len(fetch_player("uuid0").round_action_histories[Const.Street.TURN]))<line_sep>self.eq(1 len(fetch_player("uuid1").round_action_histories[Const.Street.TURN]))<line_sep>self.eq(0 len(fetch_player("uuid2").round_action_histories[Const.Street.TURN]))<line_sep>self.assertIsNone(fetch_player("uuid0").round_action_histories[Const.Street.RIVER])<block_end><def_stmt>test_state_after_showdown self<block_start>mock_return=[1 0]<times>3<with_stmt>patch('pypokerengine.engine.hand_evaluator.HandEvaluator.eval_hand' side_effect=mock_return) patch('pypokerengine.engine.message_builder.MessageBuilder.build_round_result_message' return_value="bogo")<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>self.eq(Const.Street.FINISHED state["street"])<line_sep>self.eq(110 state["table"].seats.players[0].stack)<line_sep>self.eq(90 state["table"].seats.players[1].stack)<line_sep>self.eq(100 state["table"].seats.players[2].stack)<line_sep>self.true(all(map(<lambda>p:len(p.action_histories)<eq>0 state["table"].seats.players)))<line_sep>self.true(all(map(<lambda>p:p.round_action_histories<eq>[<none>]<times>4 state["table"].seats.players)))<block_end><block_end><def_stmt>test_message_after_showdown self<block_start>mock_return=[1 0]<times>3<with_stmt>patch('pypokerengine.engine.hand_evaluator.HandEvaluator.eval_hand' side_effect=mock_return) patch('pypokerengine.engine.message_builder.MessageBuilder.build_game_update_message' return_value="boo") patch('pypokerengine.engine.message_builder.MessageBuilder.build_round_result_message' return_value="foo")<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>_,msgs=RoundManager.apply_action(state "call" 0)<line_sep>self.eq((-1 "boo") msgs[0])<line_sep>self.eq((-1 "foo") msgs[1])<block_end><block_end><def_stmt>test_table_reset_after_showdown self<block_start>mock_return=[1 0]<times>3<with_stmt>patch('pypokerengine.engine.hand_evaluator.HandEvaluator.eval_hand' side_effect=mock_return) patch('pypokerengine.engine.message_builder.MessageBuilder.build_game_update_message' return_value="boo") patch('pypokerengine.engine.message_builder.MessageBuilder.build_round_result_message' return_value="foo")<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>table=state["table"]<line_sep>player=state["table"].seats.players[0]<line_sep>self.eq(52 table.deck.size())<line_sep>self.eq([] table.get_community_card())<line_sep>self.eq([] player.hole_card)<line_sep>self.eq([] player.action_histories)<line_sep>self.eq(PayInfo.PAY_TILL_END player.pay_info.status)<block_end><block_end><def_stmt>test_message_skip_when_only_one_player_is_active self<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,msgs=RoundManager.apply_action(state "fold" 0)<line_sep>self.eq(Const.Street.FINISHED state["street"])<line_sep>self.false("street_start_message"<in>[msg["message"]["message_type"]<for>_,msg msgs])<block_end><def_stmt>test_ask_player_target_when_dealer_btn_player_is_folded self<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "fold" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,msgs=RoundManager.apply_action(state "call" 0)<line_sep>self.eq("uuid1" msgs[-1][0])<block_end><def_stmt>test_skip_asking_to_allin_player self<block_start>state,_=self.__start_round()<line_sep># Round 1 state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "raise" 50)<line_sep>state,_=RoundManager.apply_action(state "call" 50)<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>self.eq([95 40 165] [p.stack<for>p state["table"].seats.players])<line_sep># Round 2 state["table"].shift_dealer_btn()<line_sep>state["table"].set_blind_pos(1 2)<line_sep>state,_=RoundManager.start_new_round(2 5 0 state["table"])<line_sep>state,_=RoundManager.apply_action(state "raise" 40)<line_sep>state,_=RoundManager.apply_action(state "call" 40)<line_sep>state,_=RoundManager.apply_action(state "raise" 70)<line_sep>state,msgs=RoundManager.apply_action(state "call" 70)<line_sep>self.eq([25 0 95] [p.stack<for>p state["table"].seats.players])<line_sep>self.eq(1 state["street"])<line_sep>self.eq("uuid2" msgs[-1][0])<block_end><def_stmt>test_when_only_one_player_is_waiting_ask self<block_start>state,_=self.__start_round()<line_sep># Round 1 state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "raise" 50)<line_sep>state,_=RoundManager.apply_action(state "call" 50)<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>self.eq([95 40 165] [p.stack<for>p state["table"].seats.players])<line_sep># Round 2 state["table"].shift_dealer_btn()<line_sep>state,_=RoundManager.start_new_round(2 5 0 state["table"])<line_sep>state,_=RoundManager.apply_action(state "raise" 40)<line_sep>state,_=RoundManager.apply_action(state "call" 40)<line_sep>state,_=RoundManager.apply_action(state "raise" 70)<line_sep>state,_=RoundManager.apply_action(state "call" 70)<line_sep>state,_=RoundManager.apply_action(state "call" 0)<line_sep>state,_=RoundManager.apply_action(state "raise" 10)<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,_=RoundManager.apply_action(state "raise" 85)<line_sep>state,_=RoundManager.apply_action(state "call" 85)<block_end><def_stmt>test_ask_big_blind_in_preflop self<block_start>state,_=self.__start_round()<line_sep>state,_=RoundManager.apply_action(state "call" 10)<line_sep>state,msg=RoundManager.apply_action(state "call" 10)<line_sep>self.eq("uuid1" msg[-1][0])<line_sep>self.eq(Const.Street.PREFLOP state["street"])<block_end><def_stmt>test_everyone_agree_logic_regression self<block_start>players=[Player("uuid%d"%i 100)<for>i range(4)]<line_sep>players[0].stack=150<line_sep>players[1].stack=150<line_sep>players[2].stack=50<line_sep>players[3].stack=50<line_sep>deck=Deck(cheat=<true> cheat_card_ids=range(1 53))<line_sep>table=Table(cheat_deck=deck)<for_stmt>player players<block_start>table.seats.sitdown(player)<block_end>table.dealer_btn=3<line_sep>table.set_blind_pos(0 1)<line_sep>state,_=RoundManager.start_new_round(1 5 0 table)<line_sep>state,_=RoundManager.apply_action(state "raise" 15)<line_sep>state,_=RoundManager.apply_action(state "raise" 20)<line_sep>state,_=RoundManager.apply_action(state "raise" 25)<line_sep>state,_=RoundManager.apply_action(state "raise" 30)<line_sep>state,_=RoundManager.apply_action(state "raise" 50)<line_sep>state,_=RoundManager.apply_action(state "call" 50)<line_sep>state,_=RoundManager.apply_action(state "raise" 125)<line_sep>state,_=RoundManager.apply_action(state "call" 125)<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>state,_=RoundManager.apply_action(state "fold" 0)<line_sep>self.eq(Const.Street.FINISHED state["street"])<block_end><def_stmt>test_add_amount_calculationl_when_raise_on_ante self<block_start>table=self.__setup_table()<line_sep>pot_amount=<lambda>state:GameEvaluator.create_pot(state["table"].seats.players)[0]["amount"]<line_sep>stack_check=<lambda>expected state:self.eq(expected [p.stack<for>p state["table"].seats.players])<line_sep>start_state,_=RoundManager.start_new_round(1 10 5 table)<line_sep>self.eq(45 pot_amount(start_state))<line_sep>stack_check([85 75 95] start_state)<line_sep>folded_state,_=RoundManager.apply_action(start_state "fold" 0)<line_sep>called_state,_=RoundManager.apply_action(folded_state "call" 20)<line_sep>self.eq(55 pot_amount(called_state))<line_sep>stack_check([85 75 95] start_state)<line_sep>called_state,_=RoundManager.apply_action(start_state "call" 20)<line_sep>self.eq(20 called_state["table"].seats.players[2].action_histories[-1]["paid"])<line_sep>self.eq(65 pot_amount(called_state))<line_sep>raised_state,_=RoundManager.apply_action(start_state "raise" 30)<line_sep>self.eq(30 raised_state["table"].seats.players[2].action_histories[-1]["paid"])<line_sep>self.eq(75 pot_amount(raised_state))<block_end><def_stmt>test_deepcopy_state self<block_start>table=self.__setup_table()<line_sep>original=RoundManager._RoundManager__gen_initial_state(2 5 table)<line_sep>copied=RoundManager._RoundManager__deep_copy_state(original)<line_sep>check=<lambda>key:self.eq(original[key] copied[key])<line_sep>[check(key)<for>key ["round_count" "small_blind_amount" "street" "next_player"]]<block_end><def_stmt>__start_round self<block_start>table=self.__setup_table()<line_sep>round_count=1<line_sep>small_blind_amount=5<line_sep>ante=0<line_sep><return>RoundManager.start_new_round(round_count small_blind_amount ante table)<block_end><def_stmt>__setup_table self<block_start>players=[Player("uuid%d"%i 100)<for>i range(3)]<line_sep>deck=Deck(cheat=<true> cheat_card_ids=range(1 53))<line_sep>table=Table(cheat_deck=deck)<for_stmt>player players<block_start>table.seats.sitdown(player)<block_end>table.dealer_btn=2<line_sep>table.set_blind_pos(0 1)<line_sep><return>table<block_end><block_end>
<import_stmt>asyncio<import_stmt>async_timeout<import_stmt>pytest<import_from_stmt>kopf._cogs.structs.bodies RawBody RawEvent<import_from_stmt>kopf._cogs.structs.references Insights<import_from_stmt>kopf._core.reactor.observation process_discovered_namespace_event<async_keyword><def_stmt>test_initial_listing_is_ignored <block_start>insights=Insights()<line_sep>e1=RawEvent(type=<none> object=RawBody(metadata={'name':'ns1'}))<async_keyword><def_stmt>delayed_injection delay:float<block_start><await>asyncio.sleep(delay)<line_sep><await>process_discovered_namespace_event(insights=insights raw_event=e1 namespaces=['ns*'])<block_end>task=asyncio.create_task(delayed_injection(0))<with_stmt>pytest.raises(asyncio.TimeoutError)<block_start><async_keyword><with_stmt>async_timeout.timeout(0.1)<as>timeout<block_start><async_keyword><with_stmt>insights.revised<block_start><await>insights.revised.wait()<block_end><block_end><block_end><await>task<assert_stmt>timeout.expired<assert_stmt><not>insights.namespaces<block_end>@pytest.mark.parametrize('etype' ['ADDED' 'MODIFIED'])<async_keyword><def_stmt>test_followups_for_addition timer etype<block_start>insights=Insights()<line_sep>e1=RawEvent(type=etype object=RawBody(metadata={'name':'ns1'}))<async_keyword><def_stmt>delayed_injection delay:float<block_start><await>asyncio.sleep(delay)<line_sep><await>process_discovered_namespace_event(insights=insights raw_event=e1 namespaces=['ns*'])<block_end>task=asyncio.create_task(delayed_injection(0.1))<async_keyword><with_stmt>timer async_timeout.timeout(1)<block_start><async_keyword><with_stmt>insights.revised<block_start><await>insights.revised.wait()<block_end><block_end><await>task<assert_stmt>0.1<l>timer.seconds<l>0.11<assert_stmt>insights.namespaces<eq>{'ns1'}<block_end>@pytest.mark.parametrize('etype' ['DELETED'])<async_keyword><def_stmt>test_followups_for_deletion timer etype<block_start>insights=Insights()<line_sep>insights.namespaces.add('ns1')<line_sep>e1=RawEvent(type=etype object=RawBody(metadata={'name':'ns1'}))<async_keyword><def_stmt>delayed_injection delay:float<block_start><await>asyncio.sleep(delay)<line_sep><await>process_discovered_namespace_event(insights=insights raw_event=e1 namespaces=['ns*'])<block_end>task=asyncio.create_task(delayed_injection(0.1))<async_keyword><with_stmt>timer async_timeout.timeout(1)<block_start><async_keyword><with_stmt>insights.revised<block_start><await>insights.revised.wait()<block_end><block_end><await>task<assert_stmt>0.1<l>timer.seconds<l>0.11<assert_stmt><not>insights.namespaces<block_end>
<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>typing Callable Dict Optional Tuple Union<import_from_stmt>alibi_detect.cd.base BaseMMDDrift<import_from_stmt>alibi_detect.utils.tensorflow.distance mmd2_from_kernel_matrix<import_from_stmt>alibi_detect.utils.tensorflow.kernels GaussianRBF<line_sep>logger=logging.getLogger(__name__)<class_stmt>MMDDriftTF(BaseMMDDrift)<block_start><def_stmt>__init__ self x_ref:Union[np.ndarray list] p_val:float=.05 preprocess_x_ref:bool=<true> update_x_ref:Optional[Dict[str int]]=<none> preprocess_fn:Optional[Callable]=<none> kernel:Callable=GaussianRBF sigma:Optional[np.ndarray]=<none> configure_kernel_from_x_ref:bool=<true> n_permutations:int=100 input_shape:Optional[tuple]=<none> data_type:Optional[str]=<none><arrow><none><block_start>""" Maximum Mean Discrepancy (MMD) data drift detector using a permutation test. Parameters ---------- x_ref Data used as reference distribution. p_val p-value used for the significance of the permutation test. preprocess_x_ref Whether to already preprocess and store the reference data. update_x_ref Reference data can optionally be updated to the last n instances seen by the detector or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while for reservoir sampling {'reservoir_sampling': n} is passed. preprocess_fn Function to preprocess the data before computing the data drift metrics. kernel Kernel used for the MMD computation, defaults to Gaussian RBF kernel. sigma Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. configure_kernel_from_x_ref Whether to already configure the kernel bandwidth from the reference data. n_permutations Number of permutations used in the permutation test. input_shape Shape of input data. data_type Optionally specify the data type (tabular, image or time-series). Added to metadata. """<line_sep>super().__init__(x_ref=x_ref p_val=p_val preprocess_x_ref=preprocess_x_ref update_x_ref=update_x_ref preprocess_fn=preprocess_fn sigma=sigma configure_kernel_from_x_ref=configure_kernel_from_x_ref n_permutations=n_permutations input_shape=input_shape data_type=data_type)<line_sep>self.meta.update({'backend':'tensorflow'})<line_sep># initialize kernel <if_stmt>isinstance(sigma np.ndarray)<block_start>sigma=tf.convert_to_tensor(sigma)<block_end>self.kernel=kernel(sigma)<if>kernel<eq>GaussianRBF<else>kernel<line_sep># compute kernel matrix for the reference data <if_stmt>self.infer_sigma<or>isinstance(sigma tf.Tensor)<block_start>self.k_xx=self.kernel(self.x_ref self.x_ref infer_sigma=self.infer_sigma)<line_sep>self.infer_sigma=<false><block_end><else_stmt><block_start>self.k_xx,self.infer_sigma=<none> <true><block_end><block_end><def_stmt>kernel_matrix self x:Union[np.ndarray tf.Tensor] y:Union[np.ndarray tf.Tensor]<arrow>tf.Tensor<block_start>""" Compute and return full kernel matrix between arrays x and y. """<line_sep>k_xy=self.kernel(x y self.infer_sigma)<line_sep>k_xx=self.k_xx<if>self.k_xx<is><not><none><and>self.update_x_ref<is><none><else>self.kernel(x x)<line_sep>k_yy=self.kernel(y y)<line_sep>kernel_mat=tf.concat([tf.concat([k_xx k_xy] 1) tf.concat([tf.transpose(k_xy (1 0)) k_yy] 1)] 0)<line_sep><return>kernel_mat<block_end><def_stmt>score self x:Union[np.ndarray list]<arrow>Tuple[float float np.ndarray]<block_start>""" Compute the p-value resulting from a permutation test using the maximum mean discrepancy as a distance measure between the reference data and the data to be tested. Parameters ---------- x Batch of instances. Returns ------- p-value obtained from the permutation test, the MMD^2 between the reference and test set and the MMD^2 values from the permutation test. """<line_sep>x_ref,x=self.preprocess(x)<line_sep># compute kernel matrix, MMD^2 and apply permutation test using the kernel matrix n=x.shape[0]<line_sep>kernel_mat=self.kernel_matrix(x_ref x)<line_sep>kernel_mat=kernel_mat-tf.linalg.diag(tf.linalg.diag_part(kernel_mat))# zero diagonal mmd2=mmd2_from_kernel_matrix(kernel_mat n permute=<false> zero_diag=<false>).numpy()<line_sep>mmd2_permuted=np.array([mmd2_from_kernel_matrix(kernel_mat n permute=<true> zero_diag=<false>).numpy()<for>_ range(self.n_permutations)])<line_sep>p_val=(mmd2<le>mmd2_permuted).mean()<line_sep><return>p_val mmd2 mmd2_permuted<block_end><block_end>
<import_stmt>unittest<import_stmt>textwrap<import_stmt>copy<import_stmt>pickle<import_stmt>email<import_stmt>email.message<import_from_stmt>email policy<import_from_stmt>email.headerregistry HeaderRegistry<import_from_stmt>test.test_email TestEmailBase parameterize<line_sep>@parameterize<class_stmt>TestPickleCopyHeader(TestEmailBase)<block_start>header_factory=HeaderRegistry()<line_sep>unstructured=header_factory('subject' 'this is a test')<line_sep>header_params={'subject':('subject' 'this is a test') 'from':('from' '<EMAIL>') 'to':('to' 'a: <EMAIL>, <EMAIL>;, <EMAIL>') 'date':('date' 'Tue, 29 May 2012 09:24:26 +1000') }<def_stmt>header_as_deepcopy self name value<block_start>header=self.header_factory(name value)<line_sep>h=copy.deepcopy(header)<line_sep>self.assertEqual(str(h) str(header))<block_end><def_stmt>header_as_pickle self name value<block_start>header=self.header_factory(name value)<for_stmt>proto range(pickle.HIGHEST_PROTOCOL+1)<block_start>p=pickle.dumps(header proto)<line_sep>h=pickle.loads(p)<line_sep>self.assertEqual(str(h) str(header))<block_end><block_end><block_end>@parameterize<class_stmt>TestPickleCopyMessage(TestEmailBase)# Message objects are a sequence, so we have to make them a one-tuple in # msg_params so they get passed to the parameterized test method as a # single argument instead of as a list of headers. <block_start>msg_params={}<line_sep># Note: there will be no custom header objects in the parsed message. msg_params['parsed']=(email.message_from_string(textwrap.dedent("""\ Date: Tue, 29 May 2012 09:24:26 +1000 From: <EMAIL> To: <EMAIL> Subject: help I think I forgot the ring. """) policy=policy.default) )<line_sep>msg_params['created']=(email.message.Message(policy=policy.default) )<line_sep>msg_params['created'][0]['Date']='Tue, 29 May 2012 09:24:26 +1000'<line_sep>msg_params['created'][0]['From']='<EMAIL>'<line_sep>msg_params['created'][0]['To']='<EMAIL>'<line_sep>msg_params['created'][0]['Subject']='help'<line_sep>msg_params['created'][0].set_payload('I think I forgot the ring.')<def_stmt>msg_as_deepcopy self msg<block_start>msg2=copy.deepcopy(msg)<line_sep>self.assertEqual(msg2.as_string() msg.as_string())<block_end><def_stmt>msg_as_pickle self msg<block_start><for_stmt>proto range(pickle.HIGHEST_PROTOCOL+1)<block_start>p=pickle.dumps(msg proto)<line_sep>msg2=pickle.loads(p)<line_sep>self.assertEqual(msg2.as_string() msg.as_string())<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>common<import_from_stmt>nanpy.arduinotree ArduinoTree<import_from_stmt>nose.tools eq_<import_from_stmt>nose.tools ok_<def_stmt>setup <block_start>common.setup()<block_end><def_stmt>test <block_start>a=ArduinoTree()<line_sep>eq_(a.core.digitalPinToBitMask(2) 4)<line_sep>eq_(a.core.digitalPinToPort(2) 4)<line_sep>eq_(a.core.digitalPinToTimer(2) 0)<line_sep>eq_(a.core.analogInPinToBit(2) 2)<block_end><def_stmt>test_ports <block_start>a=ArduinoTree()<line_sep>eq_(a.core.portInputRegister(0) 0)# NOT_A_PORT eq_(a.core.portInputRegister(1) 0)# NOT_A_PORT eq_(a.core.portInputRegister(2) 35)# PINB eq_(a.core.portInputRegister(3) 38)# PINC eq_(a.core.portInputRegister(4) 41)# PIND eq_(a.core.portModeRegister(0) 0)# NOT_A_PORT eq_(a.core.portModeRegister(1) 0)# NOT_A_PORT eq_(a.core.portModeRegister(2) 36)# DDRB eq_(a.core.portModeRegister(3) 39)# DDRC eq_(a.core.portModeRegister(4) 42)# DDRD eq_(a.core.portOutputRegister(0) 0)# NOT_A_PORT eq_(a.core.portOutputRegister(1) 0)# NOT_A_PORT eq_(a.core.portOutputRegister(2) 37)# PORTB eq_(a.core.portOutputRegister(3) 40)# PORTC eq_(a.core.portOutputRegister(4) 43)<block_end># PORTD
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_from_stmt>path_explain.utils set_up_environment<import_from_stmt>path_explain.path_explainer_tf PathExplainerTF<import_from_stmt>preprocess higgs_dataset<import_from_stmt>train build_model<import_from_stmt>absl app<import_from_stmt>absl flags<line_sep>FLAGS=flags.FLAGS<line_sep>flags.DEFINE_integer('num_examples' 10000 'Number of inputs to run attributions on')<line_sep>flags.DEFINE_integer('num_samples' 300 'Number of samples to use when computing attributions')<def_stmt>interpret argv=<none><block_start>set_up_environment(visible_devices=FLAGS.visible_devices)<line_sep>train_set,test_set,vald_set=higgs_dataset(batch_size=FLAGS.batch_size num_parallel_calls=8 buffer_size=10000 seed=0 scale=<true> include_vald=<true>)<line_sep>print('Loading model...')<line_sep>model=build_model(weight_decay=FLAGS.weight_decay num_layers=FLAGS.num_layers hidden_units=FLAGS.hidden_units for_interpretation=<true>)<line_sep>model.load_weights('model.h5' by_name=<true>)<line_sep>print('Gathering inputs...')<line_sep>training_iters=int(10000/FLAGS.batch_size)<line_sep>training_samples=[]<for_stmt>i,(x_batch _) enumerate(train_set)<block_start>training_samples.append(x_batch)<if_stmt>i<ge>training_iters<block_start><break><block_end><block_end>training_samples=tf.concat(training_samples axis=0)<line_sep>input_samples=[]<line_sep>true_labels=[]<line_sep>pred_output=[]<line_sep>num_accumulated=0<for_stmt>x_batch,label_batch test_set<block_start>pred_labels=model(x_batch)<line_sep>correct_mask=(pred_labels[: 0].numpy()<g>0.5).astype(int)<eq>label_batch<line_sep>input_samples.append(x_batch.numpy()[correct_mask])<line_sep>pred_output.append(pred_labels.numpy()[correct_mask 0])<line_sep>true_labels.append(label_batch.numpy()[correct_mask])<line_sep>num_accumulated<augadd>np.sum(correct_mask)<if_stmt>num_accumulated<ge>FLAGS.num_examples<block_start><break><block_end><block_end>input_samples=np.concatenate(input_samples axis=0).astype(np.float32)<line_sep>true_labels=np.concatenate(true_labels axis=0)<line_sep>pred_output=np.concatenate(pred_output axis=0)<line_sep>np.save('input_samples.npy' input_samples)<line_sep>np.save('pred_output.npy' pred_output)<line_sep>np.save('true_labels.npy' true_labels)<line_sep>explainer=PathExplainerTF(model)<line_sep>print('Computing attributions...')<line_sep>attributions=explainer.attributions(inputs=input_samples baseline=np.zeros((1 input_samples.shape[1]) dtype=np.float32) batch_size=FLAGS.batch_size num_samples=FLAGS.num_samples use_expectation=<false> output_indices=0 verbose=<true>)<line_sep>np.save('attributions.npy' attributions)<line_sep>print('Computing interactions...')<line_sep>interactions=explainer.interactions(inputs=input_samples baseline=np.zeros((1 input_samples.shape[1]) dtype=np.float32) batch_size=FLAGS.batch_size num_samples=FLAGS.num_samples use_expectation=<false> output_indices=0 verbose=<true>)<line_sep>np.save('interactions.npy' interactions)<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(interpret)<block_end>
# Copyright (c) ZenML GmbH 2020. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express # or implied. See the License for the specific language governing # permissions and limitations under the License. """Base Class for all ZenML datasources"""<import_stmt>json<import_stmt>os<import_from_stmt>abc abstractmethod<import_from_stmt>typing Text Dict Optional Callable<import_from_stmt>uuid uuid4<import_from_stmt>zenml.enums GDPComponent<import_from_stmt>zenml.exceptions AlreadyExistsException<import_from_stmt>zenml.exceptions EmptyDatasourceException<import_from_stmt>zenml.exceptions InitializationException<import_from_stmt>zenml.logger get_logger<import_from_stmt>zenml.metadata ZenMLMetadataStore<import_from_stmt>zenml.repo Repository ArtifactStore<import_from_stmt>zenml.standards standard_keys<as>keys<import_from_stmt>zenml.utils path_utils<import_from_stmt>zenml.utils source_utils<import_from_stmt>zenml.utils.analytics_utils CREATE_DATASOURCE<import_from_stmt>zenml.utils.analytics_utils track<line_sep># from zenml.utils.post_training.post_training_utils import \ # view_schema, get_feature_spec_from_schema, \ # convert_raw_dataset_to_pandas, view_statistics <import_from_stmt>zenml.utils.print_utils to_pretty_string PrintStyles<line_sep>logger=get_logger(__name__)<class_stmt>BaseDatasource<block_start>"""Base class for all ZenML datasources. Every ZenML datasource should override this class. """<def_stmt>__init__ self name:Text _id:Text=<none> backend=<none> metadata_store:Optional[ZenMLMetadataStore]=<none> artifact_store:Optional[ArtifactStore]=<none> commits:Optional[Dict]=<none> *args **kwargs<block_start>""" Construct the datasource. Args: name (str): name of datasource schema (dict): schema of datasource _id: unique ID (for internal use) """<if_stmt>_id# Its loaded from config <block_start>self._id=_id<line_sep>logger.debug(f'Datasource {name} loaded.')<block_end><else_stmt># If none, then this is assumed to be 'new'. Check dupes. <block_start>all_names=Repository.get_instance().get_datasource_names()<if_stmt>any(d<eq>name<for>d all_names)<block_start><raise>AlreadyExistsException(name=name resource_type='datasource')<block_end>self._id=str(uuid4())<line_sep>track(event=CREATE_DATASOURCE)<line_sep>logger.info(f'Datasource {name} created.')<block_end># Metadata store <if_stmt>metadata_store<block_start>self.metadata_store:ZenMLMetadataStore=metadata_store<block_end><else_stmt># use default <block_start><try_stmt><block_start>self.metadata_store:ZenMLMetadataStore=Repository.get_instance().get_default_metadata_store()<block_end><except_stmt>InitializationException<block_start>self.metadata_store=<none><block_end><block_end># Default to local <if_stmt>backend<is><none><block_start><import_from_stmt>zenml.backends.orchestrator OrchestratorBaseBackend<line_sep>self.backend=OrchestratorBaseBackend()<block_end><else_stmt><block_start>self.backend=backend<block_end># Artifact store <if_stmt>artifact_store<block_start>self.artifact_store=artifact_store<block_end><else_stmt># use default <block_start><try_stmt><block_start>self.artifact_store=Repository.get_instance().get_default_artifact_store()<block_end><except_stmt>InitializationException<block_start>self.metadata_store=<none><block_end><block_end><if_stmt>commits<is><none><block_start>self.commits={}<block_end><else_stmt><block_start>self.commits=commits<block_end>self.name=name<line_sep>self._immutable=<false><line_sep>self._source=source_utils.resolve_class(self.__class__)<line_sep>self._source_args=json.dumps(kwargs)<block_end><def_stmt>__str__ self<block_start><return>to_pretty_string(self.to_config())<block_end><def_stmt>__repr__ self<block_start><return>to_pretty_string(self.to_config() style=PrintStyles.PPRINT)<block_end>@property<def_stmt>is_empty self<block_start><if_stmt>self.commits<block_start><return><false><block_end><return><true><block_end>@property<def_stmt>n_datapoints self# """Gets total number of datapoints in datasource""" # pipeline = self._get_one_pipeline() # data_files = self._get_data_file_paths(pipeline) # return sum(1 for _ in tf.data.TFRecordDataset(data_files, # compression_type='GZIP')) <block_start><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>process self output_path:Text make_beam_pipeline:Callable=<none><block_start><pass><block_end><def_stmt>commit self<block_start><import_from_stmt>zenml.pipelines.data_pipeline DataPipeline<line_sep>data_pipeline=DataPipeline(enable_cache=<false> backend=self.backend metadata_store=self.metadata_store artifact_store=self.artifact_store datasource=self)<line_sep>data_pipeline.run()<line_sep>commit_id=data_pipeline.pipeline_name.split('_')[2]<line_sep>self.commits[commit_id]=data_pipeline.pipeline_name.split('_')[1]<line_sep><return>commit_id<block_end><def_stmt>_assert_commit_id self commit_id:Text<block_start>"""Asserts commit_id is in self.commits"""<if_stmt>commit_id<not><in>self.commits<block_start><raise>AssertionError(f'There is no such commit_id as {commit_id} in the '<concat>f'datasource {self.name}')<block_end><block_end>@classmethod<def_stmt>from_config cls config:Dict<block_start>""" Convert from Data Step config to ZenML Datasource object. Data step is also populated and configuration set to parameters set in the config file. Args: config: a DataStep config in dict-form (probably loaded from YAML). """<if_stmt>keys.DatasourceKeys.SOURCE<not><in>config[keys.PipelineKeys.DATASOURCE]<block_start><return><none><block_end># can be empty # this is the data step config block source=config[keys.PipelineKeys.DATASOURCE][keys.DatasourceKeys.SOURCE]<line_sep>datasource_class=source_utils.load_source_path_class(source)<line_sep>datasource_name=config[keys.PipelineKeys.DATASOURCE][keys.DatasourceKeys.NAME]<line_sep>_id=config[keys.PipelineKeys.DATASOURCE][keys.DatasourceKeys.ID]<line_sep>args=json.loads(config[keys.PipelineKeys.DATASOURCE][keys.DatasourceKeys.ARGS])<line_sep># start with artifact store artifact_store=ArtifactStore(config[keys.PipelineKeys.DATASOURCE][keys.DatasourceKeys.ARTIFACT_STORE])<line_sep># metadata store metadata_store:ZenMLMetadataStore=ZenMLMetadataStore.from_config(config=config[keys.PipelineKeys.DATASOURCE][keys.DatasourceKeys.METADATA_STORE])<line_sep># backend <import_from_stmt>zenml.backends.orchestrator OrchestratorBaseBackend<line_sep>backend=OrchestratorBaseBackend.from_config(config=config[keys.PipelineKeys.DATASOURCE][keys.DatasourceKeys.BACKEND])<line_sep># resolve commits data_pipeline_names=metadata_store.get_data_pipeline_names_from_datasource_name(datasource_name)<line_sep># ugly hack to recompile the commit times commits={}<if_stmt>data_pipeline_names<block_start>commits={x.split('_')[2]:x.split('_')[1]<for>x data_pipeline_names}<block_end>obj=datasource_class(name=datasource_name _id=_id commits=commits backend=backend metadata_store=metadata_store artifact_store=artifact_store **args)<line_sep>obj._immutable=<true><line_sep><return>obj<block_end><def_stmt>to_config self<block_start>"""Converts datasource to ZenML config block."""<line_sep><return>{keys.DatasourceKeys.NAME:self.name keys.DatasourceKeys.SOURCE:self._source keys.DatasourceKeys.ARGS:self._source_args keys.DatasourceKeys.ID:self._id keys.DatasourceKeys.METADATA_STORE:self.metadata_store.to_config() keys.DatasourceKeys.ARTIFACT_STORE:self.artifact_store.path keys.DatasourceKeys.BACKEND:self.backend.to_config()}<block_end><def_stmt>get_latest_commit self<block_start>a=[k<for>k,v sorted(self.commits.items() key=<lambda>item:item[1])]<if_stmt>a<block_start><return>a[-1]<block_end><block_end><def_stmt>get_first_commit self<block_start>a=[k<for>k,v sorted(self.commits.items() key=<lambda>item:item[1])]<if_stmt>a<block_start><return>a[0]<block_end><block_end><def_stmt>get_data_pipeline_from_commit self commit_id:Text<block_start><import_from_stmt>zenml.pipelines.data_pipeline DataPipeline<line_sep>self._assert_commit_id(commit_id)<line_sep>repo:Repository=Repository.get_instance()<line_sep>name=DataPipeline.get_name_from_pipeline_name(DataPipeline.PIPELINE_TYPE+'_'+self.commits[commit_id]+'_'+commit_id)<line_sep><return>repo.get_pipeline_by_name(name)<block_end><def_stmt>_get_one_pipeline self<block_start>"""Gets representative pipeline from all pipelines associated."""<if_stmt>self.commits<block_start><return>self.get_data_pipeline_from_commit(list(self.commits.keys())[0])<block_end><raise>EmptyDatasourceException<block_end><def_stmt>_get_data_file_paths self pipeline<block_start>""" Gets path where data is stored as list of file paths. Args: pipeline: a pipeline with this datasource embedded """<if_stmt>pipeline.datasource._id<ne>self._id<block_start><raise>AssertionError('This pipeline does not belong to this '<concat>'datasource.')<block_end># Take any pipeline and get the datagen data_uri=os.path.join(pipeline.get_artifacts_uri_by_component(GDPComponent.DataGen.name)[0] 'Split-examples')<line_sep>data_files=path_utils.list_dir(data_uri)<line_sep><return>data_files<block_end><def_stmt>sample_data self sample_size:int=100000<block_start>""" Sampels data from datasource as a pandas DataFrame. Args: sample_size: # of rows to sample. """<line_sep># pipeline = self._get_one_pipeline() # data_files = self._get_data_file_paths(pipeline) # # schema_uri = pipeline.get_artifacts_uri_by_component( # GDPComponent.DataSchema.name)[0] # spec = get_feature_spec_from_schema(schema_uri) # # dataset = tf.data.TFRecordDataset(data_files, compression_type='GZIP') # return convert_raw_dataset_to_pandas(dataset, spec, sample_size) <raise>NotImplementedError<block_end># TODO [High]: Completely hacked code to get this to work <def_stmt>get_artifact_uri_by_component_and_commit_id self commit_id:Text component_name:Text<block_start>""" Gets the artifact URI by component and commit id. Args: commit_id: component_name: """<import_from_stmt>zenml.pipelines.data_pipeline DataPipeline<line_sep>store=self.metadata_store.store<line_sep>run_contexts=store.get_contexts_by_type(ZenMLMetadataStore.RUN_TYPE_NAME)<line_sep>run_contexts=[x<for>x run_contexts<if>x.name.startswith(DataPipeline.PIPELINE_TYPE)]<line_sep># now filter to the datasource name through executions commit_context=<none><for_stmt>c run_contexts<block_start>es=store.get_executions_by_context(c.id)<for_stmt>e es<block_start><if_stmt>'name'<in>e.custom_properties<and>e.custom_properties['name'].string_value<eq>self.name<block_start><if_stmt>commit_id<in>c.name<block_start>commit_context=c<block_end><block_end><block_end><block_end><if_stmt>commit_context<is><none><block_start><raise>AssertionError(f'Commit {commit_id} not found in metadata store for '<concat>f'datasource: {self.name}')<block_end># First get the context of the component and its artifacts component_context=[c<for>c store.get_contexts_by_type(ZenMLMetadataStore.NODE_TYPE_NAME)<if>c.name.endswith(component_name)][0]<line_sep>component_artifacts=store.get_artifacts_by_context(component_context.id)<line_sep># Second, get the context of the particular pipeline and its artifacts pipeline_artifacts=store.get_artifacts_by_context(commit_context.id)<line_sep># Figure out the matching ids and get URIs <return>[a.uri<for>a component_artifacts<if>a.id<in>[p.id<for>p pipeline_artifacts]]<block_end># def view_schema(self, commit_id: Text = None): # """ # View schema of data flowing in pipeline. # # Args: # commit_id: used to specify which commit's schema to use, if None # uses latest # """ # if commit_id is None: # commit_id = self.get_latest_commit() # self._assert_commit_id(commit_id) # # pipeline = self.get_data_pipeline_from_commit(commit_id) # uri = pipeline.get_artifacts_uri_by_component( # GDPComponent.DataSchema.name)[0] # view_schema(uri) # # def view_statistics(self, commit_id: Text = None, port: int = None, # magic: bool = False): # """ # View statistics of data flowing in pipeline. # # Args: # port (int): Port at which to launch the statistics facet. # commit_id: used to specify which commit's schema to use, if None # uses latest # magic (bool): Whether to display within a jupyter notebook or not # """ # if commit_id is None: # commit_id = self.get_latest_commit() # self._assert_commit_id(commit_id) # pipeline = self.get_data_pipeline_from_commit(commit_id) # uri = pipeline.get_artifacts_uri_by_component( # GDPComponent.DataStatistics.name)[0] # view_statistics(uri, port=port, magic=magic) <block_end>
<import_stmt>networkx<import_stmt>pandas<as>pd<import_from_stmt>math isnan<import_from_stmt>gtfspy route_types<import_from_stmt>gtfspy.util wgs84_distance graph_node_attrs<import_from_stmt>warnings warn<line_sep>ALL_STOP_TO_STOP_LINK_ATTRIBUTES=["capacity_estimate" "duration_min" "duration_max" "duration_median" "duration_avg" "n_vehicles" "route_types" "d" "distance_shape" "route_I_counts"]<line_sep>DEFAULT_STOP_TO_STOP_LINK_ATTRIBUTES=["n_vehicles" "duration_avg" "d" "route_I_counts"]<def_stmt>walk_transfer_stop_to_stop_network gtfs max_link_distance=<none><block_start>""" Construct the walk network. If OpenStreetMap-based walking distances have been computed, then those are used as the distance. Otherwise, the great circle distances ("d") is used. Parameters ---------- gtfs: gtfspy.GTFS max_link_distance: int, optional If given, all walking transfers with great circle distance longer than this limit (expressed in meters) will be omitted. Returns ------- net: networkx.DiGraph edges have attributes d: straight-line distance between stops d_walk: distance along the road/tracks/.. """<if_stmt>max_link_distance<is><none><block_start>max_link_distance=1000<block_end>net=networkx.Graph()<line_sep>_add_stops_to_net(net gtfs.get_table("stops"))<line_sep>stop_distances=gtfs.get_table("stop_distances")<if_stmt>stop_distances["d_walk"][0]<is><none><block_start>osm_distances_available=<false><line_sep>warn("Warning: OpenStreetMap-based walking distances have not been computed, using euclidean distances instead."<concat>"Ignore this warning if running unit tests.")<block_end><else_stmt><block_start>osm_distances_available=<true><block_end><for_stmt>stop_distance_tuple stop_distances.itertuples()<block_start>from_node=stop_distance_tuple.from_stop_I<line_sep>to_node=stop_distance_tuple.to_stop_I<if_stmt>osm_distances_available<block_start><if_stmt>stop_distance_tuple.d_walk<g>max_link_distance<or>isnan(stop_distance_tuple.d_walk)<block_start><continue><block_end>data={'d':stop_distance_tuple.d 'd_walk':stop_distance_tuple.d_walk}<block_end><else_stmt><block_start><if_stmt>stop_distance_tuple.d<g>max_link_distance<block_start><continue><block_end>data={'d':stop_distance_tuple.d}<block_end>net.add_edge(from_node to_node **data)<block_end><return>net<block_end><def_stmt>stop_to_stop_network_for_route_type gtfs route_type link_attributes=<none> start_time_ut=<none> end_time_ut=<none><block_start>""" Get a stop-to-stop network describing a single mode of travel. Parameters ---------- gtfs : gtfspy.GTFS route_type : int See gtfspy.route_types.TRANSIT_ROUTE_TYPES for the list of possible types. link_attributes: list[str], optional defaulting to use the following link attributes: "n_vehicles" : Number of vehicles passed "duration_min" : minimum travel time between stops "duration_max" : maximum travel time between stops "duration_median" : median travel time between stops "duration_avg" : average travel time between stops "d" : distance along straight line (wgs84_distance) "distance_shape" : minimum distance along shape "capacity_estimate" : approximate capacity passed through the stop "route_I_counts" : dict from route_I to counts start_time_ut: int start time of the time span (in unix time) end_time_ut: int end time of the time span (in unix time) Returns ------- net: networkx.DiGraph A directed graph Directed graph """<if_stmt>link_attributes<is><none><block_start>link_attributes=DEFAULT_STOP_TO_STOP_LINK_ATTRIBUTES<block_end><assert_stmt>(route_type<in>route_types.TRANSIT_ROUTE_TYPES)<line_sep>stops_dataframe=gtfs.get_stops_for_route_type(route_type)<line_sep>net=networkx.DiGraph()<line_sep>_add_stops_to_net(net stops_dataframe)<line_sep>events_df=gtfs.get_transit_events(start_time_ut=start_time_ut end_time_ut=end_time_ut route_type=route_type)<if_stmt>len(net.nodes())<l>2<block_start><assert_stmt>events_df.shape[0]<eq>0<block_end># group events by links, and loop over them (i.e. each link): link_event_groups=events_df.groupby(['from_stop_I' 'to_stop_I'] sort=<false>)<for_stmt>key,link_events link_event_groups<block_start>from_stop_I,to_stop_I=key<assert_stmt>isinstance(link_events pd.DataFrame)<line_sep># 'dep_time_ut' 'arr_time_ut' 'shape_id' 'route_type' 'trip_I' 'duration' 'from_seq' 'to_seq' <if_stmt>link_attributes<is><none><block_start>net.add_edge(from_stop_I to_stop_I)<block_end><else_stmt><block_start>link_data={}<if_stmt>"duration_min"<in>link_attributes<block_start>link_data['duration_min']=float(link_events['duration'].min())<block_end><if_stmt>"duration_max"<in>link_attributes<block_start>link_data['duration_max']=float(link_events['duration'].max())<block_end><if_stmt>"duration_median"<in>link_attributes<block_start>link_data['duration_median']=float(link_events['duration'].median())<block_end><if_stmt>"duration_avg"<in>link_attributes<block_start>link_data['duration_avg']=float(link_events['duration'].mean())<block_end># statistics on numbers of vehicles: <if_stmt>"n_vehicles"<in>link_attributes<block_start>link_data['n_vehicles']=int(link_events.shape[0])<block_end><if_stmt>"capacity_estimate"<in>link_attributes<block_start>link_data['capacity_estimate']=route_types.ROUTE_TYPE_TO_APPROXIMATE_CAPACITY[route_type]<times>int(link_events.shape[0])<block_end><if_stmt>"d"<in>link_attributes<block_start>from_lat=graph_node_attrs(net from_stop_I)['lat']<line_sep>from_lon=graph_node_attrs(net from_stop_I)['lon']<line_sep>to_lat=graph_node_attrs(net to_stop_I)['lat']<line_sep>to_lon=graph_node_attrs(net to_stop_I)['lon']<line_sep>distance=wgs84_distance(from_lat from_lon to_lat to_lon)<line_sep>link_data['d']=int(distance)<block_end><if_stmt>"distance_shape"<in>link_attributes<block_start><assert_stmt>"shape_id"<in>link_events.columns.values<line_sep>found=<none><for_stmt>i,shape_id enumerate(link_events["shape_id"].values)<block_start><if_stmt>shape_id<is><not><none><block_start>found=i<line_sep><break><block_end><block_end><if_stmt>found<is><none><block_start>link_data["distance_shape"]=<none><block_end><else_stmt><block_start>link_event=link_events.iloc[found]<line_sep>distance=gtfs.get_shape_distance_between_stops(link_event["trip_I"] int(link_event["from_seq"]) int(link_event["to_seq"]))<line_sep>link_data['distance_shape']=distance<block_end><block_end><if_stmt>"route_I_counts"<in>link_attributes<block_start>link_data["route_I_counts"]=link_events.groupby("route_I").size().to_dict()<block_end>net.add_edge(from_stop_I to_stop_I **link_data)<block_end><block_end><return>net<block_end><def_stmt>stop_to_stop_networks_by_type gtfs<block_start>""" Compute stop-to-stop networks for all travel modes (route_types). Parameters ---------- gtfs: gtfspy.GTFS Returns ------- dict: dict[int, networkx.DiGraph] keys should be one of route_types.ALL_ROUTE_TYPES (i.e. GTFS route_types) """<line_sep>route_type_to_network=dict()<for_stmt>route_type route_types.ALL_ROUTE_TYPES<block_start><if_stmt>route_type<eq>route_types.WALK<block_start>net=walk_transfer_stop_to_stop_network(gtfs)<block_end><else_stmt><block_start>net=stop_to_stop_network_for_route_type(gtfs route_type)<block_end>route_type_to_network[route_type]=net<block_end><assert_stmt>len(route_type_to_network)<eq>len(route_types.ALL_ROUTE_TYPES)<line_sep><return>route_type_to_network<block_end><def_stmt>combined_stop_to_stop_transit_network gtfs start_time_ut=<none> end_time_ut=<none><block_start>""" Compute stop-to-stop networks for all travel modes and combine them into a single network. The modes of transport are encoded to a single network. The network consists of multiple links corresponding to each travel mode. Walk mode is not included. Parameters ---------- gtfs: gtfspy.GTFS Returns ------- net: networkx.MultiDiGraph keys should be one of route_types.TRANSIT_ROUTE_TYPES (i.e. GTFS route_types) """<line_sep>multi_di_graph=networkx.MultiDiGraph()<for_stmt>route_type route_types.TRANSIT_ROUTE_TYPES<block_start>graph=stop_to_stop_network_for_route_type(gtfs route_type start_time_ut=start_time_ut end_time_ut=end_time_ut)<for_stmt>from_node,to_node,data graph.edges(data=<true>)<block_start>data['route_type']=route_type<block_end>multi_di_graph.add_edges_from(graph.edges(data=<true>))<line_sep>multi_di_graph.add_nodes_from(graph.nodes(data=<true>))<block_end><return>multi_di_graph<block_end><def_stmt>_add_stops_to_net net stops<block_start>""" Add nodes to the network from the pandas dataframe describing (a part of the) stops table in the GTFS database. Parameters ---------- net: networkx.Graph stops: pandas.DataFrame """<for_stmt>stop stops.itertuples()<block_start>data={"lat":stop.lat "lon":stop.lon "name":stop.name}<line_sep>net.add_node(stop.stop_I **data)<block_end><block_end><def_stmt>temporal_network gtfs start_time_ut=<none> end_time_ut=<none> route_type=<none><block_start>""" Compute the temporal network of the data, and return it as a pandas.DataFrame Parameters ---------- gtfs : gtfspy.GTFS start_time_ut: int | None start time of the time span (in unix time) end_time_ut: int | None end time of the time span (in unix time) route_type: int | None Specifies which mode of public transport are included, or whether all modes should be included. The int should be one of the standard GTFS route_types: (see also gtfspy.route_types.TRANSIT_ROUTE_TYPES ) If route_type is not specified, all modes are included. Returns ------- events_df: pandas.DataFrame Columns: departure_stop, arrival_stop, departure_time_ut, arrival_time_ut, route_type, route_I, trip_I """<line_sep>events_df=gtfs.get_transit_events(start_time_ut=start_time_ut end_time_ut=end_time_ut route_type=route_type)<line_sep>events_df.drop('to_seq' 1 inplace=<true>)<line_sep>events_df.drop('shape_id' 1 inplace=<true>)<line_sep>events_df.drop('duration' 1 inplace=<true>)<line_sep>events_df.drop('route_id' 1 inplace=<true>)<line_sep>events_df.rename(columns={'from_seq':"seq"} inplace=<true>)<line_sep><return>events_df<block_end><def_stmt>route_to_route_network gtfs walking_threshold start_time end_time<block_start>""" Creates networkx graph where the nodes are bus routes and a edge indicates that there is a possibility to transfer between the routes :param gtfs: :param walking_threshold: :param start_time: :param end_time: :return: """<line_sep>graph=networkx.Graph()<line_sep>routes=gtfs.get_table("routes")<for_stmt>i routes.itertuples()<block_start>graph.add_node(i.route_id type=i.type color=route_types.ROUTE_TYPE_TO_COLOR[i.type])<block_end>query="""SELECT stop1.route_id AS route_id1, stop1.type, stop2.route_id AS route_id2, stop2.type FROM (SELECT * FROM stop_distances WHERE d_walk < %s) sd, (SELECT * FROM stop_times, trips, routes WHERE stop_times.trip_I=trips.trip_I AND trips.route_I=routes.route_I AND stop_times.dep_time_ds > %s AND stop_times.dep_time_ds < %s) stop1, (SELECT * FROM stop_times, trips, routes WHERE stop_times.trip_I=trips.trip_I AND trips.route_I=routes.route_I AND stop_times.dep_time_ds > %s AND stop_times.dep_time_ds < %s) stop2 WHERE sd.from_stop_I = stop1.stop_I AND sd.to_stop_I = stop2.stop_I AND stop1.route_id != stop2.route_id GROUP BY stop1.route_id, stop2.route_id"""%(walking_threshold start_time end_time start_time end_time)<line_sep>df=gtfs.execute_custom_query_pandas(query)<for_stmt>items df.itertuples()<block_start>graph.add_edge(items.route_id1 items.route_id2)<block_end>graph.remove_nodes_from(networkx.isolates(graph))<line_sep><return>graph<block_end># def cluster_network_stops(stop_to_stop_net, distance): # """ # Aggregate graph by grouping nodes that are within a specified distance. # The ids of the nodes are tuples of the original stop_Is. # # Parameters # ---------- # network: networkx.DiGraph # distance: float # group all nodes within this distance. # # Returns # ------- # graph: networkx.Graph # """ # pass # def aggregate__network(self, graph, distance): # """ # See to_aggregate_line_graph for documentation # """ # raise NotImplementedError("this is not working fully yet") # assert distance <= 1000, "only works with distances below 1000 meters" # nodes = set(graph.nodes()) # # node_distance_graph = networkx.Graph() # # stop_distances = self.get_table("stop_distances") # stop_pairs = stop_distances[stop_distances['d'] <= distance] # stop_pairs = zip(stop_pairs['from_stop_I'], stop_pairs['to_stop_I']) # for node in nodes: # node_distance_graph.add_node(node) # for node, another_node in stop_pairs: # if (node in nodes) and (another_node in nodes): # node_distance_graph.add_edge(node, another_node) # # node_group_iter = networkx.connected_components(node_distance_graph) # # aggregate_graph = networkx.Graph() # old_node_to_new_node = {} # for node_group in node_group_iter: # new_node_id = tuple(node for node in node_group) # lats = [] # lons = [] # names = [] # for node in node_group: # if node not in graph: # # some stops may not part of the original node line graph # # (e.g. if some lines are not considered, or there are extra stops in stops table) # continue # old_node_to_new_node[node] = new_node_id # lats.append(graph.node[node]['lat']) # lons.append(graph.node[node]['lon']) # names.append(graph.node[node]['name']) # new_lat = numpy.mean(lats) # new_lon = numpy.mean(lons) # attr_dict = { # "lat": new_lat, # "lon": new_lon, # "names": names # } # aggregate_graph.add_node(new_node_id, attr_dict=attr_dict) # # for from_node, to_node, data in graph.edges(data=True): # new_from_node = old_node_to_new_node[from_node] # new_to_node = old_node_to_new_node[to_node] # if aggregate_graph.has_edge(new_from_node, new_to_node): # edge_data = aggregate_graph.get_edge_data(new_from_node, new_to_node) # edge_data['route_ids'].append(data['route_ids']) # else: # aggregate_graph.add_edge(new_from_node, new_to_node, route_ids=data['route_ids']) # return aggregate_graph
# ---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: domainbounds.py # # Tests: libsim - connecting to simulation and retrieving data from it. # mesh - 3D rectilinear mesh # # Programmer: <NAME> # Date: June 17, 2014 # # Modifications: # # ---------------------------------------------------------------------------- # Create our simulation object. sim=TestSimulation("domainbounds" "domainbounds.sim2")<line_sep># Test that we can start and connect to the simulation. started,connected=TestSimStartAndConnect("domainbounds00" sim)<line_sep># Perform our tests. <if_stmt>connected# Make sure the metadata is right. <block_start>TestSimMetaData("domainbounds01" sim.metadata())<line_sep>AddPlot("Subset" "Domains")<line_sep>DrawPlots()<line_sep>v=GetView3D()<line_sep>v.viewNormal=(0.672727 0.569817 0.471961)<line_sep>v.viewUp=(-0.252634 0.776445 -0.57733)<line_sep>SetView3D(v)<line_sep>Test("domainbounds02")<line_sep>DeleteAllPlots()<line_sep>AddPlot("Pseudocolor" "zonal")<line_sep>DrawPlots()<line_sep>Test("domainbounds03")<line_sep>DeleteAllPlots()<block_end># Close down the simulation. <if_stmt>started<block_start>sim.endsim()<block_end>Exit()<line_sep>
<import_stmt>time<import_stmt>os<import_from_stmt>datetime datetime<import_from_stmt>app db ConnectedAgents ConnectedDomAgents auth extraModules AutomaticModuleExecution<import_from_stmt>flask jsonify send_from_directory Blueprint Response render_template request escape<import_from_stmt>pywebpush webpush WebPushException<import_from_stmt>database.models Registration Agent Module DomCommand DashboardRegistration<import_from_stmt>sqlalchemy.orm joinedload<line_sep>dashboard=Blueprint('dashboard' __name__)<line_sep>AGENT_TIMEOUT=8<line_sep>@dashboard.after_request<def_stmt>applySecurityHeaders response#style-src 'self'; <block_start>response.headers["Content-Security-Policy"]="script-src 'self'; img-src 'self'; font-src 'self'; media-src 'self'; frame-src 'self'; frame-ancestors 'none'"<line_sep>response.headers["X-Frame-Options"]="deny"<line_sep>response.headers["X-Xss-Protection"]="1; mode=block"<line_sep>response.headers["Referrer-Policy"]="same-origin"<line_sep><return>response<block_end>@dashboard.before_request<def_stmt>contentTypeCSRFProtection <block_start><if_stmt>request.method<eq>'POST'<block_start><if_stmt><not>request.content_type<eq>'application/json'<block_start><return>Response("" 404)<block_end><block_end><block_end>@dashboard.route('/')@auth.login_required<def_stmt>servedashboard <block_start><return>render_template('index.html')<block_end>@dashboard.route('/sw.js')@auth.login_required<def_stmt>sw <block_start>vapidPub=os.popen("vapid --applicationServerKey | cut -d' ' -f5").read().strip()<line_sep>res=render_template('dashboard_notifications.js' vapidPub=vapidPub)<line_sep><return>res {'Content-Type':'application/javascript'}<block_end>@dashboard.route('/modules')@auth.login_required<def_stmt>getModules <block_start><return>jsonify({'modules':extraModules['modules'] 'autoLoadedModules':AutomaticModuleExecution})<block_end>@dashboard.route('/agents')@auth.login_required<def_stmt>getAgents <block_start>activeAgents()<line_sep><return>jsonify({'active':ConnectedAgents 'dormant':dormantAgents()})<block_end>@dashboard.route('/agent/<agentID>' methods=['GET'])@auth.login_required<def_stmt>getAgent agentID<block_start><if_stmt>agentID<ne><none><block_start>agent=db.session().query(Agent).filter(Agent.id<eq>agentID).first()<if_stmt>agent<is><not><none><block_start>result=Agent.to_json(agent)<line_sep>registration=db.session.query(Registration).filter(Registration.agentId<eq>agent.id).order_by(Registration.id.desc()).first()<line_sep>result['push']=str(registration<is><not><none>).lower()<line_sep>result['active']='true'<if>agent.id<in>ConnectedAgents<else>'false'<line_sep>result['domActive']='true'<if>agent.id<in>ConnectedDomAgents<else>'false'<line_sep>result['user_agent']=escape(agent.user_agent)<line_sep>modules=db.session().query(Module).filter(Module.agentId<eq>agentID Module.processed<eq>1).all()<if_stmt>len(modules)<ne>0<block_start>result['modules']={}<for_stmt>module modules<block_start>result['modules'][module.name]=escape(module.results)<block_end><block_end>dom_commands=db.session().query(DomCommand).filter(DomCommand.agentId<eq>agentID DomCommand.processed<eq>1).order_by(DomCommand.id.desc()).limit(3).all()<if_stmt>len(dom_commands)<ne>0<block_start>result['dom_commands']={}<for_stmt>dom_command dom_commands<block_start>result['dom_commands'][escape(dom_command.command)]=escape(dom_command.result)<block_end><block_end><return>jsonify(result)<block_end><block_end><return>Response("" 404)<block_end>@dashboard.route('/automodule/<moduleName>' methods=['POST'])@auth.login_required<def_stmt>autoLoadModule moduleName<block_start>checkModule(moduleName)<if_stmt>moduleName<in>AutomaticModuleExecution<block_start><return>Response("" 404)<block_end>AutomaticModuleExecution.append(moduleName)<line_sep><return>""<block_end>@dashboard.route('/automodule/<moduleName>' methods=['DELETE'])@auth.login_required<def_stmt>deleteAutoLoadModule moduleName<block_start>checkModule(moduleName)<if_stmt>moduleName<not><in>AutomaticModuleExecution<block_start><return>Response("" 404)<block_end>AutomaticModuleExecution.remove(moduleName)<line_sep><return>""<block_end>@dashboard.route('/agent/<agentID>' methods=['DELETE'])@auth.login_required<def_stmt>deleteAgent agentID<block_start><if_stmt>agentID<is><none><block_start><return>Response("" 404)<block_end>agent=db.session().query(Agent).filter(Agent.id<eq>agentID).first()<if_stmt>agent<is><none><block_start><return>Response("" 404)<block_end>db.session().delete(agent)<line_sep>db.session().commit()<line_sep><return>""<block_end>@dashboard.route('/module/<moduleName>/<agentID>' methods=['POST'])@auth.login_required<def_stmt>createModule moduleName agentID<block_start>module=loadAgentModule(moduleName agentID)<if_stmt>module<is><not><none># already loaded <block_start><return>Response("" 404)<block_end>module=Module(<none> agentID moduleName '' 0 datetime.now())<line_sep>db.session().add(module)<line_sep>db.session().commit()<line_sep><return>""<block_end>@dashboard.route('/module/<moduleName>/<agentID>' methods=['DELETE'])@auth.login_required<def_stmt>removeModule moduleName agentID<block_start>module=loadAgentModule(moduleName agentID)<if_stmt>module<is><not><none><block_start>db.session().delete(module)<line_sep>db.session().commit()<line_sep><return>""<block_end><return>Response("" 404)<block_end># Send command to be executed to Dashboard @dashboard.route('/dom/<agentID>' methods=['POST'])@auth.login_required<def_stmt>sendDomJS agentID<block_start>body=request.get_json(silent=<true>)<if_stmt>body<and>body['js']<block_start>dom_command=DomCommand(<none> agentID body['js'] <none> 0 datetime.now())<line_sep>db.session().add(dom_command)<line_sep>db.session().commit()<line_sep>longpoll_counter=0<while_stmt><true><block_start>time.sleep(0.5)<line_sep>longpoll_counter<augadd>1<if_stmt>(longpoll_counter<g>8)# wait up to 4 seconds for response <block_start><return>Response("" 404)<block_end>dom_results=db.session().query(DomCommand).filter(DomCommand.agentId<eq>agentID DomCommand.processed<eq>1 DomCommand.id<eq>dom_command.id).order_by(DomCommand.id.desc()).limit(3).all()<if_stmt>len(dom_results)<ne>0<block_start>result={}<for_stmt>cmd_result dom_results<block_start>result['cmd']=cmd_result.command<line_sep>result['result']=cmd_result.result<block_end><return>jsonify(result)<block_end><else_stmt><block_start><continue><block_end><block_end><block_end><return>Response("" 404)<block_end># API to get the results of any command. Not used at the moment @dashboard.route('/dom/result/<agentID>/<cmdID>' methods=['GET'])@auth.login_required<def_stmt>sendDomCmdResult agentID cmdID<block_start>dom_commands=db.session().query(DomCommand).filter(DomCommand.agentId<eq>agentID DomCommand.processed<eq>1 DomCommand.id<eq>cmdID).order_by(DomCommand.id.desc()).limit(3).all()<if_stmt>len(dom_commands)<ne>0<block_start>result={}<for_stmt>dom_command dom_commands<block_start>result['cmd']=dom_command.command<line_sep>result['result']=dom_command.result<block_end><return>jsonify(result)<block_end><return>Response("" 404)<block_end>@dashboard.route('/push/<agentId>' methods=['POST'])@auth.login_required<def_stmt>push agentId<block_start>registration=db.session.query(Registration).filter(Registration.agentId<eq>agentId).order_by(Registration.id.desc()).first()<if_stmt>registration<is><none><block_start><return>Response("" 404)<block_end><else_stmt><block_start><try_stmt><block_start>webpush(subscription_info={"endpoint":registration.endpoint "keys":{"p256dh":registration.authKey "auth":registration.authSecret}} data="" vapid_private_key="./private_key.pem" vapid_claims={"sub":"mailto:<EMAIL>" })<block_end><except_stmt>WebPushException<as>ex<block_start>print(ex)<line_sep><return>Response("" 404)<block_end><block_end><return>""<block_end>@dashboard.route('/registration' methods=['POST'])@auth.login_required<def_stmt>registration <block_start>body=request.get_json(silent=<true>)<if_stmt>body<and>body['endpoint']<and>body['key']<and>body['authSecret']<block_start>dashboard_registration=DashboardRegistration(<none> body['endpoint'] body['key'] body['authSecret'])<line_sep>db.session.add(dashboard_registration)<line_sep>db.session.commit()<line_sep><return>""<block_end><return>Response("" 404)<block_end><def_stmt>activeAgents <block_start>now=time.time()<line_sep>agentsToRemove={}<line_sep># remove DOM agents that timed out <for_stmt>agentID ConnectedDomAgents<block_start><if_stmt>(now-ConnectedDomAgents[agentID]['last_seen'])<g>AGENT_TIMEOUT<block_start>agentsToRemove[agentID]=ConnectedDomAgents[agentID]<block_end><block_end><for_stmt>agentID agentsToRemove<block_start><del_stmt>ConnectedDomAgents[agentID]<block_end>agentsToRemove={}<line_sep># remove SW agents that timed out <for_stmt>agentID ConnectedAgents<block_start><if_stmt>(now-ConnectedAgents[agentID]['last_seen'])<g>AGENT_TIMEOUT<block_start>agentsToRemove[agentID]=ConnectedAgents[agentID]<block_end>ConnectedAgents[agentID]['domActive']='true'<if>agentID<in>ConnectedDomAgents<else>'false'<block_end><for_stmt>agentID agentsToRemove<block_start><del_stmt>ConnectedAgents[agentID]<block_end><block_end><def_stmt>dormantAgents <block_start>agents=db.session().query(Agent).options(joinedload('registration')).filter(Agent.id.notin_(ConnectedAgents.keys())).all()<line_sep>results={}<for_stmt>agent agents<block_start>results[agent.id]=Agent.to_json(agent)<line_sep>results[agent.id]['push']=str(agent.registration<is><not><none>).lower()<line_sep>results[agent.id]['active']='false'<line_sep>results[agent.id]['domActive']='true'<if>agent.id<in>ConnectedDomAgents<else>'false'<block_end><return>results<block_end><def_stmt>loadAgentModule moduleName agentID<block_start>checkModule(moduleName)<line_sep><return>db.session.query(Module).filter(Module.agentId<eq>agentID Module.name<eq>moduleName).order_by(Module.id.desc()).first()<block_end><def_stmt>checkModule moduleName<block_start><if_stmt>moduleName<not><in>extraModules['modules']<block_start><return>Response("" 404)<block_end><block_end>
<import_stmt>itertools<import_from_stmt>collections OrderedDict<import_from_stmt>...error GraphQLError<import_from_stmt>...language ast<import_from_stmt>...language.printer print_ast<import_from_stmt>...pyutils.pair_set PairSet<import_from_stmt>...type.definition GraphQLInterfaceType GraphQLList GraphQLNonNull GraphQLObjectType get_named_type is_leaf_type <import_from_stmt>...utils.type_comparators is_equal_type<import_from_stmt>...utils.type_from_ast type_from_ast<import_from_stmt>.base ValidationRule<class_stmt>OverlappingFieldsCanBeMerged(ValidationRule)<block_start>__slots__=('_compared_fragments' '_cached_fields_and_fragment_names' )<def_stmt>__init__ self context<block_start>super(OverlappingFieldsCanBeMerged self).__init__(context)<line_sep># A memoization for when two fragments are compared "between" each other for # conflicts. Two fragments may be compared many times, so memoizing this can # dramatically improve the performance of this validator. self._compared_fragments=PairSet()<line_sep># A cache for the "field map" and list of fragment names found in any given # selection set. Selection sets may be asked for this information multiple # times, so this improves the performance of this validator. self._cached_fields_and_fragment_names={}<block_end><def_stmt>leave_SelectionSet self node key parent path ancestors# Note: we validate on the reverse traversal so deeper conflicts will be # caught first, for correct calculation of mutual exclusivity and for # clearer error messages. # field_map = _collect_field_asts_and_defs( # self.context, # self.context.get_parent_type(), # node # ) # conflicts = _find_conflicts(self.context, False, field_map, self.compared_set) <block_start>conflicts=_find_conflicts_within_selection_set(self.context self._cached_fields_and_fragment_names self._compared_fragments self.context.get_parent_type() node)<for_stmt>(reason_name reason),fields1,fields2 conflicts<block_start>self.context.report_error(GraphQLError(self.fields_conflict_message(reason_name reason) list(fields1)+list(fields2)))<block_end><block_end>@staticmethod<def_stmt>same_type type1 type2<block_start><return>is_equal_type(type1 type2)<line_sep># return type1.is_same_type(type2) <block_end>@classmethod<def_stmt>fields_conflict_message cls reason_name reason<block_start><return>('Fields "{}" conflict because {}. '<concat>'Use different aliases on the fields to fetch both if this was '<concat>'intentional.').format(reason_name cls.reason_message(reason))<block_end>@classmethod<def_stmt>reason_message cls reason<block_start><if_stmt>isinstance(reason list)<block_start><return>' and '.join('subfields "{}" conflict because {}'.format(reason_name cls.reason_message(sub_reason))<for>reason_name,sub_reason reason)<block_end><return>reason<block_end><block_end># Algorithm: # # Conflicts occur when two fields exist in a query which will produce the same # response name, but represent differing values, thus creating a conflict. # The algorithm below finds all conflicts via making a series of comparisons # between fields. In order to compare as few fields as possible, this makes # a series of comparisons "within" sets of fields and "between" sets of fields. # # Given any selection set, a collection produces both a set of fields by # also including all inline fragments, as well as a list of fragments # referenced by fragment spreads. # # A) Each selection set represented in the document first compares "within" its # collected set of fields, finding any conflicts between every pair of # overlapping fields. # Note: This is the only time that a the fields "within" a set are compared # to each other. After this only fields "between" sets are compared. # # B) Also, if any fragment is referenced in a selection set, then a # comparison is made "between" the original set of fields and the # referenced fragment. # # C) Also, if multiple fragments are referenced, then comparisons # are made "between" each referenced fragment. # # D) When comparing "between" a set of fields and a referenced fragment, first # a comparison is made between each field in the original set of fields and # each field in the the referenced set of fields. # # E) Also, if any fragment is referenced in the referenced selection set, # then a comparison is made "between" the original set of fields and the # referenced fragment (recursively referring to step D). # # F) When comparing "between" two fragments, first a comparison is made between # each field in the first referenced set of fields and each field in the the # second referenced set of fields. # # G) Also, any fragments referenced by the first must be compared to the # second, and any fragments referenced by the second must be compared to the # first (recursively referring to step F). # # H) When comparing two fields, if both have selection sets, then a comparison # is made "between" both selection sets, first comparing the set of fields in # the first selection set with the set of fields in the second. # # I) Also, if any fragment is referenced in either selection set, then a # comparison is made "between" the other set of fields and the # referenced fragment. # # J) Also, if two fragments are referenced in both selection sets, then a # comparison is made "between" the two fragments. <def_stmt>_find_conflicts_within_selection_set context cached_fields_and_fragment_names compared_fragments parent_type selection_set<block_start>"""Find all conflicts found "within" a selection set, including those found via spreading in fragments. Called when visiting each SelectionSet in the GraphQL Document. """<line_sep>conflicts=[]<line_sep>field_map,fragment_names=_get_fields_and_fragments_names(context cached_fields_and_fragment_names parent_type selection_set)<line_sep># (A) Find all conflicts "within" the fields of this selection set. # Note: this is the *only place* `collect_conflicts_within` is called. _collect_conflicts_within(context conflicts cached_fields_and_fragment_names compared_fragments field_map)<line_sep># (B) Then collect conflicts between these fields and those represented by # each spread fragment name found. <for_stmt>i,fragment_name enumerate(fragment_names)<block_start>_collect_conflicts_between_fields_and_fragment(context conflicts cached_fields_and_fragment_names compared_fragments <false> field_map fragment_name )<line_sep># (C) Then compare this fragment with all other fragments found in this # selection set to collect conflicts within fragments spread together. # This compares each item in the list of fragment names to every other item # in that same list (except for itself). <for_stmt>other_fragment_name fragment_names[i+1:]<block_start>_collect_conflicts_between_fragments(context conflicts cached_fields_and_fragment_names compared_fragments <false> fragment_name other_fragment_name )<block_end><block_end><return>conflicts<block_end><def_stmt>_collect_conflicts_between_fields_and_fragment context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive field_map fragment_name<block_start>fragment=context.get_fragment(fragment_name)<if_stmt><not>fragment<block_start><return><none><block_end>field_map2,fragment_names2=_get_referenced_fields_and_fragment_names(context cached_fields_and_fragment_names fragment)<line_sep># (D) First collect any conflicts between the provided collection of fields # and the collection of fields represented by the given fragment. _collect_conflicts_between(context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive field_map field_map2)<line_sep># (E) Then collect any conflicts between the provided collection of fields # and any fragment names found in the given fragment. <for_stmt>fragment_name2 fragment_names2<block_start>_collect_conflicts_between_fields_and_fragment(context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive field_map fragment_name2)<block_end><block_end># Collect all conflicts found between two fragments, including via spreading in # any nested fragments <def_stmt>_collect_conflicts_between_fragments context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive fragment_name1 fragment_name2<block_start>fragment1=context.get_fragment(fragment_name1)<line_sep>fragment2=context.get_fragment(fragment_name2)<if_stmt><not>fragment1<or><not>fragment2<block_start><return><none><block_end># No need to compare a fragment to itself. <if_stmt>fragment1<eq>fragment2<block_start><return><none><block_end># Memoize so two fragments are not compared for conflicts more than once. <if_stmt>compared_fragments.has(fragment_name1 fragment_name2 are_mutually_exclusive)<block_start><return><none><block_end>compared_fragments.add(fragment_name1 fragment_name2 are_mutually_exclusive)<line_sep>field_map1,fragment_names1=_get_referenced_fields_and_fragment_names(context cached_fields_and_fragment_names fragment1)<line_sep>field_map2,fragment_names2=_get_referenced_fields_and_fragment_names(context cached_fields_and_fragment_names fragment2)<line_sep># (F) First, collect all conflicts between these two collections of fields # (not including any nested fragments) _collect_conflicts_between(context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive field_map1 field_map2)<line_sep># (G) Then collect conflicts between the first fragment and any nested # fragments spread in the second fragment. <for_stmt>_fragment_name2 fragment_names2<block_start>_collect_conflicts_between_fragments(context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive fragment_name1 _fragment_name2)<block_end># (G) Then collect conflicts between the second fragment and any nested # fragments spread in the first fragment. <for_stmt>_fragment_name1 fragment_names1<block_start>_collect_conflicts_between_fragments(context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive _fragment_name1 fragment_name2)<block_end><block_end><def_stmt>_find_conflicts_between_sub_selection_sets context cached_fields_and_fragment_names compared_fragments are_mutually_exclusive parent_type1 selection_set1 parent_type2 selection_set2<block_start>"""Find all conflicts found between two selection sets. Includes those found via spreading in fragments. Called when determining if conflicts exist between the sub-fields of two overlapping fields. """<line_sep>conflicts=[]<line_sep>field_map1,fragment_names1=_get_fields_and_fragments_names(context cached_fields_and_fragment_names parent_type1 selection_set1)<line_sep>field_map2,fragment_names2=_get_fields_and_fragments_names(context cached_fields_and_fragment_names parent_type2 selection_set2)<line_sep># (H) First, collect all conflicts between these two collections of field. _collect_conflicts_between(context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive field_map1 field_map2)<line_sep># (I) Then collect conflicts between the first collection of fields and # those referenced by each fragment name associated with the second. <for_stmt>fragment_name2 fragment_names2<block_start>_collect_conflicts_between_fields_and_fragment(context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive field_map1 fragment_name2)<block_end># (I) Then collect conflicts between the second collection of fields and # those referenced by each fragment name associated with the first. <for_stmt>fragment_name1 fragment_names1<block_start>_collect_conflicts_between_fields_and_fragment(context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive field_map2 fragment_name1)<block_end># (J) Also collect conflicts between any fragment names by the first and # fragment names by the second. This compares each item in the first set of # names to each item in the second set of names. <for_stmt>fragment_name1 fragment_names1<block_start><for_stmt>fragment_name2 fragment_names2<block_start>_collect_conflicts_between_fragments(context conflicts cached_fields_and_fragment_names compared_fragments are_mutually_exclusive fragment_name1 fragment_name2)<block_end><block_end><return>conflicts<block_end><def_stmt>_collect_conflicts_within context conflicts cached_fields_and_fragment_names compared_fragments field_map<block_start>"""Collect all Conflicts "within" one collection of fields."""<line_sep># field map is a keyed collection, where each key represents a response # name and the value at that key is a list of all fields which provide that # response name. For every response name, if there are multiple fields, they # must be compared to find a potential conflict. <for_stmt>response_name,fields list(field_map.items())# This compares every field in the list to every other field in this list # (except to itself). If the list only has one item, nothing needs to # be compared. <block_start><for_stmt>i,field enumerate(fields)<block_start><for_stmt>other_field fields[i+1:]# within one collection is never mutually exclusive <block_start>conflict=_find_conflict(context cached_fields_and_fragment_names compared_fragments <false> response_name field other_field)<if_stmt>conflict<block_start>conflicts.append(conflict)<block_end><block_end><block_end><block_end><block_end><def_stmt>_collect_conflicts_between context conflicts cached_fields_and_fragment_names compared_fragments parent_fields_are_mutually_exclusive field_map1 field_map2<block_start>"""Collect all Conflicts between two collections of fields. This is similar to, but different from the `collect_conflicts_within` function above. This check assumes that `collect_conflicts_within` has already been called on each provided collection of fields. This is true because this validator traverses each individual selection set. """<line_sep># A field map is a keyed collection, where each key represents a response # name and the value at that key is a list of all fields which provide that # response name. For any response name which appears in both provided field # maps, each field from the first field map must be compared to every field # in the second field map to find potential conflicts. <for_stmt>response_name,fields1 list(field_map1.items())<block_start>fields2=field_map2.get(response_name)<if_stmt>fields2<block_start><for_stmt>field1 fields1<block_start><for_stmt>field2 fields2<block_start>conflict=_find_conflict(context cached_fields_and_fragment_names compared_fragments parent_fields_are_mutually_exclusive response_name field1 field2)<if_stmt>conflict<block_start>conflicts.append(conflict)<block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>_find_conflict context cached_fields_and_fragment_names compared_fragments parent_fields_are_mutually_exclusive response_name field1 field2<block_start>"""Determines if there is a conflict between two particular fields."""<line_sep>parent_type1,ast1,def1=field1<line_sep>parent_type2,ast2,def2=field2<line_sep># If it is known that two fields could not possibly apply at the same # time, due to the parent types, then it is safe to permit them to diverge # in aliased field or arguments used as they will not present any ambiguity # by differing. # It is known that two parent types could never overlap if they are # different Object types. Interface or Union types might overlap - if not # in the current state of the schema, then perhaps in some future version, # thus may not safely diverge. are_mutually_exclusive=(parent_fields_are_mutually_exclusive<or>(parent_type1<ne>parent_type2<and>isinstance(parent_type1 GraphQLObjectType)<and>isinstance(parent_type2 GraphQLObjectType)))<line_sep># The return type for each field. type1=def1<and>def1.type<line_sep>type2=def2<and>def2.type<if_stmt><not>are_mutually_exclusive# Two aliases must refer to the same field. <block_start>name1=ast1.name.value<line_sep>name2=ast2.name.value<if_stmt>name1<ne>name2<block_start><return>((response_name '{} and {} are different fields'.format(name1 name2)) [ast1] [ast2])<block_end># Two field calls must have the same arguments. <if_stmt><not>_same_arguments(ast1.arguments ast2.arguments)<block_start><return>((response_name 'they have differing arguments') [ast1] [ast2])<block_end><block_end><if_stmt>type1<and>type2<and>do_types_conflict(type1 type2)<block_start><return>((response_name 'they return conflicting types {} and {}'.format(type1 type2)) [ast1] [ast2])<block_end># Collect and compare sub-fields. Use the same "visited fragment names" list # for both collections so fields in a fragment reference are never # compared to themselves. selection_set1=ast1.selection_set<line_sep>selection_set2=ast2.selection_set<if_stmt>selection_set1<and>selection_set2<block_start>conflicts=_find_conflicts_between_sub_selection_sets(context cached_fields_and_fragment_names compared_fragments are_mutually_exclusive get_named_type(type1) selection_set1 get_named_type(type2) selection_set2)<line_sep><return>_subfield_conflicts(conflicts response_name ast1 ast2)<block_end><block_end><def_stmt>_get_fields_and_fragments_names context cached_fields_and_fragment_names parent_type selection_set<block_start>cached=cached_fields_and_fragment_names.get(selection_set)<if_stmt><not>cached<block_start>ast_and_defs=OrderedDict()<line_sep>fragment_names=OrderedDict()<line_sep>_collect_fields_and_fragment_names(context parent_type selection_set ast_and_defs fragment_names)<line_sep>cached=[ast_and_defs list(fragment_names.keys())]<line_sep>cached_fields_and_fragment_names[selection_set]=cached<block_end><return>cached<block_end><def_stmt>_get_referenced_fields_and_fragment_names context cached_fields_and_fragment_names fragment<block_start>"""Given a reference to a fragment, return the represented collection of fields as well as a list of nested fragment names referenced via fragment spreads."""<line_sep># Short-circuit building a type from the AST if possible. cached=cached_fields_and_fragment_names.get(fragment.selection_set)<if_stmt>cached<block_start><return>cached<block_end>fragment_type=type_from_ast(context.get_schema() fragment.type_condition)<line_sep><return>_get_fields_and_fragments_names(context cached_fields_and_fragment_names fragment_type fragment.selection_set)<block_end><def_stmt>_collect_fields_and_fragment_names context parent_type selection_set ast_and_defs fragment_names<block_start><for_stmt>selection selection_set.selections<block_start><if_stmt>isinstance(selection ast.Field)<block_start>field_name=selection.name.value<if_stmt>isinstance(parent_type (GraphQLObjectType GraphQLInterfaceType))<block_start>field_def=parent_type.fields.get(field_name)<block_end><else_stmt><block_start>field_def=<none><block_end>response_name=selection.alias.value<if>selection.alias<else>field_name<if_stmt><not>ast_and_defs.get(response_name)<block_start>ast_and_defs[response_name]=[]<block_end>ast_and_defs[response_name].append([parent_type selection field_def])<block_end><elif_stmt>isinstance(selection ast.FragmentSpread)<block_start>fragment_names[selection.name.value]=<true><block_end><elif_stmt>isinstance(selection ast.InlineFragment)<block_start>type_condition=selection.type_condition<if_stmt>type_condition<block_start>inline_fragment_type=type_from_ast(context.get_schema() selection.type_condition)<block_end><else_stmt><block_start>inline_fragment_type=parent_type<block_end>_collect_fields_and_fragment_names(context inline_fragment_type selection.selection_set ast_and_defs fragment_names)<block_end><block_end><block_end><def_stmt>_subfield_conflicts conflicts response_name ast1 ast2<block_start>"""Given a series of Conflicts which occurred between two sub-fields, generate a single Conflict."""<if_stmt>conflicts<block_start><return>((response_name [conflict[0]<for>conflict conflicts]) tuple(itertools.chain([ast1] *[conflict[1]<for>conflict conflicts])) tuple(itertools.chain([ast2] *[conflict[2]<for>conflict conflicts])))<block_end><block_end><def_stmt>do_types_conflict type1 type2<block_start><if_stmt>isinstance(type1 GraphQLList)<block_start><if_stmt>isinstance(type2 GraphQLList)<block_start><return>do_types_conflict(type1.of_type type2.of_type)<block_end><return><true><block_end><if_stmt>isinstance(type2 GraphQLList)<block_start><if_stmt>isinstance(type1 GraphQLList)<block_start><return>do_types_conflict(type1.of_type type2.of_type)<block_end><return><true><block_end><if_stmt>isinstance(type1 GraphQLNonNull)<block_start><if_stmt>isinstance(type2 GraphQLNonNull)<block_start><return>do_types_conflict(type1.of_type type2.of_type)<block_end><return><true><block_end><if_stmt>isinstance(type2 GraphQLNonNull)<block_start><if_stmt>isinstance(type1 GraphQLNonNull)<block_start><return>do_types_conflict(type1.of_type type2.of_type)<block_end><return><true><block_end><if_stmt>is_leaf_type(type1)<or>is_leaf_type(type2)<block_start><return>type1<ne>type2<block_end><return><false><block_end><def_stmt>_same_value value1 value2<block_start><return>(<not>value1<and><not>value2)<or>print_ast(value1)<eq>print_ast(value2)<block_end><def_stmt>_same_arguments arguments1 arguments2# Check to see if they are empty arguments or nones. If they are, we can # bail out early. <block_start><if_stmt><not>(arguments1<or>arguments2)<block_start><return><true><block_end><if_stmt>len(arguments1)<ne>len(arguments2)<block_start><return><false><block_end>arguments2_values_to_arg={a.name.value:a<for>a arguments2}<for_stmt>argument1 arguments1<block_start>argument2=arguments2_values_to_arg.get(argument1.name.value)<if_stmt><not>argument2<block_start><return><false><block_end><if_stmt><not>_same_value(argument1.value argument2.value)<block_start><return><false><block_end><block_end><return><true><block_end>